diff --git a/checkstyle/suppressions.xml b/checkstyle/suppressions.xml index 7c576935a2756..b6f83a30871f0 100644 --- a/checkstyle/suppressions.xml +++ b/checkstyle/suppressions.xml @@ -56,7 +56,7 @@ + files="(Microbenchmarks|SaslServerAuthenticator).java"/> validatedAddresses = checkWithLookup(asList("example.com:10000")); - assertTrue(validatedAddresses.size() >= 1, "Unexpected addresses " + validatedAddresses); + assertFalse(validatedAddresses.isEmpty(), "Unexpected addresses " + validatedAddresses); List validatedHostNames = validatedAddresses.stream().map(InetSocketAddress::getHostName) .collect(Collectors.toList()); List expectedHostNames = asList("93.184.215.14", "2606:2800:21f:cb07:6820:80da:af6b:8b2c"); diff --git a/clients/src/test/java/org/apache/kafka/clients/ClusterConnectionStatesTest.java b/clients/src/test/java/org/apache/kafka/clients/ClusterConnectionStatesTest.java index 2da4bbeba3a01..8f066c668c919 100644 --- a/clients/src/test/java/org/apache/kafka/clients/ClusterConnectionStatesTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/ClusterConnectionStatesTest.java @@ -75,15 +75,16 @@ public class ClusterConnectionStatesTest { private final String nodeId2 = "2002"; private final String nodeId3 = "3003"; private final String hostTwoIps = "multiple.ip.address"; - private ClusterConnectionStates connectionStates; // For testing nodes with a single IP address, use localhost and default DNS resolution - private DefaultHostResolver singleIPHostResolver = new DefaultHostResolver(); + private final DefaultHostResolver singleIPHostResolver = new DefaultHostResolver(); // For testing nodes with multiple IP addresses, mock DNS resolution to get consistent results - private AddressChangeHostResolver multipleIPHostResolver = new AddressChangeHostResolver( + private final AddressChangeHostResolver multipleIPHostResolver = new AddressChangeHostResolver( initialAddresses.toArray(new InetAddress[0]), newAddresses.toArray(new InetAddress[0])); + private ClusterConnectionStates connectionStates; + @BeforeEach public void setup() { this.connectionStates = new ClusterConnectionStates(reconnectBackoffMs, reconnectBackoffMax, diff --git a/clients/src/test/java/org/apache/kafka/clients/FetchSessionHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/FetchSessionHandlerTest.java index 4bf53d9608e2a..a14b1b34b3984 100644 --- a/clients/src/test/java/org/apache/kafka/clients/FetchSessionHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/FetchSessionHandlerTest.java @@ -52,6 +52,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -67,12 +68,7 @@ public class FetchSessionHandlerTest { * ordering for test purposes. */ private static Set toSet(TopicPartition... arr) { - TreeSet set = new TreeSet<>(new Comparator() { - @Override - public int compare(TopicPartition o1, TopicPartition o2) { - return o1.toString().compareTo(o2.toString()); - } - }); + TreeSet set = new TreeSet<>(Comparator.comparing(TopicPartition::toString)); set.addAll(Arrays.asList(arr)); return set; } @@ -317,12 +313,7 @@ public void testDoubleBuild() { builder.add(new TopicPartition("foo", 0), new FetchRequest.PartitionData(Uuid.randomUuid(), 0, 100, 200, Optional.empty())); builder.build(); - try { - builder.build(); - fail("Expected calling build twice to fail."); - } catch (Throwable t) { - // expected - } + assertThrows(Throwable.class, builder::build, "Expected calling build twice to fail."); } @Test diff --git a/clients/src/test/java/org/apache/kafka/clients/InFlightRequestsTest.java b/clients/src/test/java/org/apache/kafka/clients/InFlightRequestsTest.java index 09022666e3497..78952286aebe2 100644 --- a/clients/src/test/java/org/apache/kafka/clients/InFlightRequestsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/InFlightRequestsTest.java @@ -34,9 +34,9 @@ public class InFlightRequestsTest { + private final String dest = "dest"; private InFlightRequests inFlightRequests; private int correlationId; - private String dest = "dest"; @BeforeEach public void setup() { diff --git a/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java b/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java index e6db9685eb52e..0b2733207ce42 100644 --- a/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java @@ -67,6 +67,7 @@ import java.util.stream.Collectors; import static org.apache.kafka.test.TestUtils.assertOptional; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -80,9 +81,9 @@ public class MetadataTest { - private long refreshBackoffMs = 100; - private long refreshBackoffMaxMs = 1000; - private long metadataExpireMs = 1000; + private final long refreshBackoffMs = 100; + private final long refreshBackoffMaxMs = 1000; + private final long metadataExpireMs = 1000; private Metadata metadata = new Metadata(refreshBackoffMs, refreshBackoffMaxMs, metadataExpireMs, new LogContext(), new ClusterResourceListeners()); @@ -1210,8 +1211,7 @@ else if (partition.equals(internalPart)) metadata.update(versionAndBuilder.requestVersion, RequestTestUtils.metadataUpdateWith(clusterId, numNodes, errorCounts, topicPartitionCounts, tp -> null, metadataSupplier, ApiKeys.METADATA.latestVersion(), topicIds), false, time.milliseconds()); - List nodes = new ArrayList<>(); - nodes.addAll(metadata.fetch().nodes()); + List nodes = new ArrayList<>(metadata.fetch().nodes()); Node controller = metadata.fetch().controller(); assertEquals(numNodes, nodes.size()); assertFalse(metadata.updateRequested()); @@ -1321,22 +1321,14 @@ public void testConcurrentUpdateAndFetchForSnapshotAndCluster() throws Interrupt metadata.updateWithCurrentRequestVersion(newMetadataResponse, true, time.milliseconds()); atleastMetadataUpdatedOnceLatch.countDown(); } else { // Thread to read metadata snapshot, once its updated - try { - if (!atleastMetadataUpdatedOnceLatch.await(5, TimeUnit.MINUTES)) { - assertFalse(true, "Test had to wait more than 5 minutes, something went wrong."); - } - } catch (InterruptedException e) { - throw new RuntimeException(e); - } + assertTrue(assertDoesNotThrow(() -> atleastMetadataUpdatedOnceLatch.await(5, TimeUnit.MINUTES))); newSnapshot.set(metadata.fetchMetadataSnapshot()); newCluster.set(metadata.fetch()); } allThreadsDoneLatch.countDown(); }); } - if (!allThreadsDoneLatch.await(5, TimeUnit.MINUTES)) { - assertFalse(true, "Test had to wait more than 5 minutes, something went wrong."); - } + assertTrue(allThreadsDoneLatch.await(5, TimeUnit.MINUTES)); // Validate new snapshot is upto-date. And has higher partition counts, nodes & leader epoch than earlier. { diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/TopicCollectionTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/TopicCollectionTest.java index ce373380e28e6..a8ee16670e2f9 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/TopicCollectionTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/TopicCollectionTest.java @@ -34,11 +34,11 @@ public void testTopicCollection() { List topicIds = Arrays.asList(Uuid.randomUuid(), Uuid.randomUuid(), Uuid.randomUuid()); List topicNames = Arrays.asList("foo", "bar"); - TopicCollection idCollection = TopicCollection.ofTopicIds(topicIds); - TopicCollection nameCollection = TopicCollection.ofTopicNames(topicNames); + TopicIdCollection idCollection = TopicCollection.ofTopicIds(topicIds); + TopicNameCollection nameCollection = TopicCollection.ofTopicNames(topicNames); - assertTrue(((TopicIdCollection) idCollection).topicIds().containsAll(topicIds)); - assertTrue(((TopicNameCollection) nameCollection).topicNames().containsAll(topicNames)); + assertTrue(idCollection.topicIds().containsAll(topicIds)); + assertTrue(nameCollection.topicNames().containsAll(topicNames)); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupOffsetsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupOffsetsHandlerTest.java index fef8a55074131..9be96c2c60cdc 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupOffsetsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupOffsetsHandlerTest.java @@ -122,7 +122,7 @@ private OffsetDeleteResponse buildGroupErrorResponse(Errors error) { } private OffsetDeleteResponse buildPartitionErrorResponse(Errors error) { - OffsetDeleteResponse response = new OffsetDeleteResponse( + return new OffsetDeleteResponse( new OffsetDeleteResponseData() .setThrottleTimeMs(0) .setTopics(new OffsetDeleteResponseTopicCollection(singletonList( @@ -135,7 +135,6 @@ private OffsetDeleteResponse buildPartitionErrorResponse(Errors error) { ).iterator())) ).iterator())) ); - return response; } private AdminApiHandler.ApiResult> handleWithGroupError( diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupsHandlerTest.java index 1d1b152afba05..efdde52770fb9 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupsHandlerTest.java @@ -75,13 +75,12 @@ public void testFailedHandleResponse() { } private DeleteGroupsResponse buildResponse(Errors error) { - DeleteGroupsResponse response = new DeleteGroupsResponse( + return new DeleteGroupsResponse( new DeleteGroupsResponseData() .setResults(new DeletableGroupResultCollection(singletonList( new DeletableGroupResult() .setErrorCode(error.code()) .setGroupId(groupId1)).iterator()))); - return response; } private AdminApiHandler.ApiResult handleWithError( diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java index 7179e13a4fc78..e974ee0f9f541 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java @@ -296,7 +296,7 @@ private ConsumerGroupDescribeResponse buildConsumerGroupDescribeResponse(Errors } private DescribeGroupsResponse buildResponse(Errors error, String protocolType) { - DescribeGroupsResponse response = new DescribeGroupsResponse( + return new DescribeGroupsResponse( new DescribeGroupsResponseData() .setGroups(singletonList( new DescribedGroup() @@ -314,7 +314,6 @@ private DescribeGroupsResponse buildResponse(Errors error, String protocolType) .setMemberAssignment(ConsumerProtocol.serializeAssignment( new Assignment(new ArrayList<>(tps))).array()) ))))); - return response; } private AdminApiHandler.ApiResult handleClassicGroupWithError( diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/FenceProducersHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/FenceProducersHandlerTest.java index 8b20f84c9889f..34ed2e6772c2f 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/FenceProducersHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/FenceProducersHandlerTest.java @@ -125,8 +125,6 @@ private ApiResult handleResponseError( String transactionalId, Errors error ) { - int brokerId = 1; - CoordinatorKey key = CoordinatorKey.byTransactionalId(transactionalId); Set keys = mkSet(key); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/RemoveMembersFromConsumerGroupHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/RemoveMembersFromConsumerGroupHandlerTest.java index 1ad66bf7822e1..d443af968312b 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/RemoveMembersFromConsumerGroupHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/RemoveMembersFromConsumerGroupHandlerTest.java @@ -90,7 +90,7 @@ public void testFailedHandleResponseInMemberLevel() { } private LeaveGroupResponse buildResponse(Errors error) { - LeaveGroupResponse response = new LeaveGroupResponse( + return new LeaveGroupResponse( new LeaveGroupResponseData() .setErrorCode(error.code()) .setMembers(singletonList( @@ -98,11 +98,10 @@ private LeaveGroupResponse buildResponse(Errors error) { .setErrorCode(Errors.NONE.code()) .setMemberId("m1") .setGroupInstanceId("m1-gii")))); - return response; } private LeaveGroupResponse buildResponseWithMemberError(Errors error) { - LeaveGroupResponse response = new LeaveGroupResponse( + return new LeaveGroupResponse( new LeaveGroupResponseData() .setErrorCode(Errors.NONE.code()) .setMembers(singletonList( @@ -110,7 +109,6 @@ private LeaveGroupResponse buildResponseWithMemberError(Errors error) { .setErrorCode(error.code()) .setMemberId("m1") .setGroupInstanceId("m1-gii")))); - return response; } private AdminApiHandler.ApiResult> handleWithGroupError( diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerGroupMetadataTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerGroupMetadataTest.java index b32b49cf337b3..a34e1dd88a4f8 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerGroupMetadataTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerGroupMetadataTest.java @@ -28,7 +28,7 @@ public class ConsumerGroupMetadataTest { - private String groupId = "group"; + private final String groupId = "group"; @Test public void testAssignmentConstructor() { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java index 8cce3fb847eab..aa35f4e38090f 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java @@ -192,12 +192,12 @@ public class KafkaConsumerTest { private final String memberId = "memberId"; private final String leaderId = "leaderId"; private final Optional groupInstanceId = Optional.of("mock-instance"); - private Map topicIds = Stream.of( + private final Map topicIds = Stream.of( new AbstractMap.SimpleEntry<>(topic, topicId), new AbstractMap.SimpleEntry<>(topic2, topicId2), new AbstractMap.SimpleEntry<>(topic3, topicId3)) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - private Map topicNames = Stream.of( + private final Map topicNames = Stream.of( new AbstractMap.SimpleEntry<>(topicId, topic), new AbstractMap.SimpleEntry<>(topicId2, topic2), new AbstractMap.SimpleEntry<>(topicId3, topic3)) @@ -1840,63 +1840,33 @@ public void testOperationsBySubscribingConsumerWithDefaultGroupId(GroupProtocol // OK, expected } - try (KafkaConsumer consumer = newConsumer(groupProtocol, (String) null)) { - consumer.subscribe(Collections.singleton(topic)); - fail("Expected an InvalidGroupIdException"); - } catch (InvalidGroupIdException e) { - // OK, expected + try (KafkaConsumer consumer = newConsumer(groupProtocol, null)) { + assertThrows(InvalidGroupIdException.class, () -> consumer.subscribe(Collections.singleton(topic))); } - try (KafkaConsumer consumer = newConsumer(groupProtocol, (String) null)) { - consumer.committed(Collections.singleton(tp0)).get(tp0); - fail("Expected an InvalidGroupIdException"); - } catch (InvalidGroupIdException e) { - // OK, expected + try (KafkaConsumer consumer = newConsumer(groupProtocol, null)) { + assertThrows(InvalidGroupIdException.class, () -> consumer.committed(Collections.singleton(tp0)).get(tp0)); } - try (KafkaConsumer consumer = newConsumer(groupProtocol, (String) null)) { - consumer.commitAsync(); - fail("Expected an InvalidGroupIdException"); - } catch (InvalidGroupIdException e) { - // OK, expected + try (KafkaConsumer consumer = newConsumer(groupProtocol, null)) { + assertThrows(InvalidGroupIdException.class, () -> consumer.commitAsync()); } - try (KafkaConsumer consumer = newConsumer(groupProtocol, (String) null)) { - consumer.commitSync(); - fail("Expected an InvalidGroupIdException"); - } catch (InvalidGroupIdException e) { - // OK, expected + try (KafkaConsumer consumer = newConsumer(groupProtocol, null)) { + assertThrows(InvalidGroupIdException.class, () -> consumer.commitSync()); } } @ParameterizedTest @EnumSource(GroupProtocol.class) public void testOperationsByAssigningConsumerWithDefaultGroupId(GroupProtocol groupProtocol) { - KafkaConsumer consumer = newConsumer(groupProtocol, null); - consumer.assign(singleton(tp0)); + try (KafkaConsumer consumer = newConsumer(groupProtocol, null)) { + consumer.assign(singleton(tp0)); - try { - consumer.committed(Collections.singleton(tp0)).get(tp0); - fail("Expected an InvalidGroupIdException"); - } catch (InvalidGroupIdException e) { - // OK, expected - } - - try { - consumer.commitAsync(); - fail("Expected an InvalidGroupIdException"); - } catch (InvalidGroupIdException e) { - // OK, expected + assertThrows(InvalidGroupIdException.class, () -> consumer.committed(Collections.singleton(tp0)).get(tp0)); + assertThrows(InvalidGroupIdException.class, () -> consumer.commitAsync()); + assertThrows(InvalidGroupIdException.class, () -> consumer.commitSync()); } - - try { - consumer.commitSync(); - fail("Expected an InvalidGroupIdException"); - } catch (InvalidGroupIdException e) { - // OK, expected - } - - consumer.close(); } @ParameterizedTest @@ -2055,12 +2025,7 @@ private void consumerCloseTest(GroupProtocol groupProtocol, } if (i < nonCloseRequests) { // the close request should not complete until non-close requests (commit requests) have completed. - try { - future.get(100, TimeUnit.MILLISECONDS); - fail("Close completed without waiting for response"); - } catch (TimeoutException e) { - // Expected exception - } + assertThrows(TimeoutException.class, () -> future.get(100, TimeUnit.MILLISECONDS)); } } @@ -2288,23 +2253,15 @@ public void testRebalanceException(GroupProtocol groupProtocol) { client.prepareResponseFrom(syncGroupResponse(singletonList(tp0), Errors.NONE), coordinator); // assign throws - try { - consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); - fail("Should throw exception"); - } catch (Throwable e) { - assertEquals(partitionAssigned + singleTopicPartition, e.getCause().getMessage()); - } + KafkaException exc = assertThrows(KafkaException.class, () -> consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE))); + assertEquals(partitionAssigned + singleTopicPartition, exc.getCause().getMessage()); // the assignment is still updated regardless of the exception assertEquals(singleton(tp0), subscription.assignedPartitions()); // close's revoke throws - try { - consumer.close(Duration.ofMillis(0)); - fail("Should throw exception"); - } catch (Throwable e) { - assertEquals(partitionRevoked + singleTopicPartition, e.getCause().getCause().getMessage()); - } + exc = assertThrows(KafkaException.class, () -> consumer.close(Duration.ofMillis(0))); + assertEquals(partitionRevoked + singleTopicPartition, exc.getCause().getCause().getMessage()); consumer.close(Duration.ofMillis(0)); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/RoundRobinAssignorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/RoundRobinAssignorTest.java index 97cd2862e234d..686bf9d95584c 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/RoundRobinAssignorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/RoundRobinAssignorTest.java @@ -36,12 +36,11 @@ public class RoundRobinAssignorTest { - private RoundRobinAssignor assignor = new RoundRobinAssignor(); - private String topic = "topic"; - private String consumerId = "consumer"; - - private String topic1 = "topic1"; - private String topic2 = "topic2"; + private final RoundRobinAssignor assignor = new RoundRobinAssignor(); + private final String topic = "topic"; + private final String consumerId = "consumer"; + private final String topic1 = "topic1"; + private final String topic2 = "topic2"; @Test public void testOneConsumerNoTopic() { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java index 1f13aea21fd42..fc6d2c85ba561 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java @@ -1018,7 +1018,7 @@ public void testHeartbeatInstanceFencedResponseWithOldGeneration() throws Interr } @Test - public void testHeartbeatRequestWithFencedInstanceIdException() throws InterruptedException { + public void testHeartbeatRequestWithFencedInstanceIdException() { setupCoordinator(); mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); @@ -1418,11 +1418,7 @@ public void testWakeupAfterJoinGroupReceivedExternalCompletion() throws Exceptio mockClient.prepareResponse(syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); - try { - coordinator.ensureActiveGroup(); - fail("Should have woken up from ensureActiveGroup()"); - } catch (WakeupException e) { - } + assertThrows(WakeupException.class, () -> coordinator.ensureActiveGroup(), "Should have woken up from ensureActiveGroup()"); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); @@ -1458,11 +1454,7 @@ public boolean matches(AbstractRequest body) { }, syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); - try { - coordinator.ensureActiveGroup(); - fail("Should have woken up from ensureActiveGroup()"); - } catch (WakeupException e) { - } + assertThrows(WakeupException.class, () -> coordinator.ensureActiveGroup(), "Should have woken up from ensureActiveGroup()"); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); @@ -1526,11 +1518,7 @@ public void testWakeupAfterSyncGroupReceivedExternalCompletion() throws Exceptio }, syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); - try { - coordinator.ensureActiveGroup(); - fail("Should have woken up from ensureActiveGroup()"); - } catch (WakeupException e) { - } + assertThrows(WakeupException.class, () -> coordinator.ensureActiveGroup(), "Should have woken up from ensureActiveGroup()"); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java index 404e221541f4e..d60d7aac6e9e7 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java @@ -142,12 +142,7 @@ @SuppressWarnings("unchecked") public class AsyncKafkaConsumerTest { - private long retryBackoffMs = 100L; - private int defaultApiTimeoutMs = 1000; - private boolean autoCommitEnabled = true; - private AsyncKafkaConsumer consumer = null; - private Time time = new MockTime(0); private final FetchCollector fetchCollector = mock(FetchCollector.class); private final ApplicationEventHandler applicationEventHandler = mock(ApplicationEventHandler.class); @@ -208,6 +203,9 @@ private AsyncKafkaConsumer newConsumer( List assignors, String groupId, String clientId) { + long retryBackoffMs = 100L; + int defaultApiTimeoutMs = 1000; + boolean autoCommitEnabled = true; return new AsyncKafkaConsumer<>( new LogContext(), clientId, diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index 069db53e57d47..b1db0297a120b 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -307,7 +307,7 @@ public void testCommitSyncFailsWithExpectedException(Errors commitError, new OffsetAndMetadata(0)); // Send sync offset commit that fails and verify it propagates the expected exception. - Long expirationTimeMs = time.milliseconds() + retryBackoffMs; + long expirationTimeMs = time.milliseconds() + retryBackoffMs; CompletableFuture commitResult = commitRequestManager.commitSync(offsets, expirationTimeMs); completeOffsetCommitRequestWithError(commitRequestManager, commitError); assertFutureThrows(commitResult, expectedException); @@ -748,7 +748,7 @@ public void testOffsetCommitSyncTimeoutNotReturnedOnPollAndFails() { new OffsetAndMetadata(0)); // Send sync offset commit request that fails with retriable error. - Long expirationTimeMs = time.milliseconds() + retryBackoffMs * 2; + long expirationTimeMs = time.milliseconds() + retryBackoffMs * 2; CompletableFuture commitResult = commitRequestManager.commitSync(offsets, expirationTimeMs); completeOffsetCommitRequestWithError(commitRequestManager, Errors.REQUEST_TIMED_OUT); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java index a07ea42caa556..a2ec3125aac93 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java @@ -61,11 +61,11 @@ public class ConsumerNetworkClientTest { - private String topicName = "test"; - private MockTime time = new MockTime(1); - private Cluster cluster = TestUtils.singletonCluster(topicName, 1); - private Node node = cluster.nodes().get(0); - private Metadata metadata = new Metadata(100, 100, 50000, new LogContext(), + private final String topicName = "test"; + private final MockTime time = new MockTime(1); + private final Cluster cluster = TestUtils.singletonCluster(topicName, 1); + private final Node node = cluster.nodes().get(0); + private final Metadata metadata = new Metadata(100, 100, 50000, new LogContext(), new ClusterResourceListeners()); private MockClient client = new MockClient(time, metadata); private ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(new LogContext(), @@ -206,11 +206,7 @@ public void blockOnlyForRetryBackoffIfNoInflightRequests() { public void wakeup() { RequestFuture future = consumerClient.send(node, heartbeat()); consumerClient.wakeup(); - try { - consumerClient.poll(time.timer(0)); - fail(); - } catch (WakeupException e) { - } + assertThrows(WakeupException.class, () -> consumerClient.poll(time.timer(0))); client.respond(heartbeatResponse(Errors.NONE)); consumerClient.poll(future); @@ -263,12 +259,8 @@ public void testTopicAuthorizationExceptionPropagatedFromMetadata() { public void testMetadataFailurePropagated() { KafkaException metadataException = new KafkaException(); metadata.fatalError(metadataException); - try { - consumerClient.poll(time.timer(Duration.ZERO)); - fail("Expected poll to throw exception"); - } catch (Exception e) { - assertEquals(metadataException, e); - } + Exception exc = assertThrows(Exception.class, () -> consumerClient.poll(time.timer(Duration.ZERO))); + assertEquals(metadataException, exc); } @Test diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 091009064deef..0e68b5df95051 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -76,7 +76,6 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -@SuppressWarnings("ClassDataAbstractionCoupling") public class ConsumerNetworkThreadTest { private ConsumerTestBuilder.ConsumerNetworkThreadTestBuilder testBuilder; @@ -88,11 +87,8 @@ public class ConsumerNetworkThreadTest { private OffsetsRequestManager offsetsRequestManager; private CommitRequestManager commitRequestManager; private CoordinatorRequestManager coordinatorRequestManager; - private HeartbeatRequestManager heartbeatRequestManager; - private MembershipManager memberhipsManager; private ConsumerNetworkThread consumerNetworkThread; private MockClient client; - private SubscriptionState subscriptions; @BeforeEach public void setup() { @@ -106,10 +102,7 @@ public void setup() { commitRequestManager = testBuilder.commitRequestManager.orElseThrow(IllegalStateException::new); offsetsRequestManager = testBuilder.offsetsRequestManager; coordinatorRequestManager = testBuilder.coordinatorRequestManager.orElseThrow(IllegalStateException::new); - heartbeatRequestManager = testBuilder.heartbeatRequestManager.orElseThrow(IllegalStateException::new); - memberhipsManager = testBuilder.membershipManager.orElseThrow(IllegalStateException::new); consumerNetworkThread = testBuilder.consumerNetworkThread; - subscriptions = testBuilder.subscriptions; consumerNetworkThread.initializeResources(); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java index b01e11f452262..b72699cb4739d 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java @@ -150,34 +150,35 @@ public class FetchRequestManagerTest { private static final double EPSILON = 0.0001; - private String topicName = "test"; - private String groupId = "test-group"; - private Uuid topicId = Uuid.randomUuid(); - private Map topicIds = new HashMap() { + private final String topicName = "test"; + private final String groupId = "test-group"; + private final Uuid topicId = Uuid.randomUuid(); + private final Map topicIds = new HashMap() { { put(topicName, topicId); } }; - private Map topicNames = singletonMap(topicId, topicName); + private final Map topicNames = singletonMap(topicId, topicName); private final String metricGroup = "consumer" + groupId + "-fetch-manager-metrics"; - private TopicPartition tp0 = new TopicPartition(topicName, 0); - private TopicPartition tp1 = new TopicPartition(topicName, 1); - private TopicPartition tp2 = new TopicPartition(topicName, 2); - private TopicPartition tp3 = new TopicPartition(topicName, 3); - private TopicIdPartition tidp0 = new TopicIdPartition(topicId, tp0); - private TopicIdPartition tidp1 = new TopicIdPartition(topicId, tp1); - private TopicIdPartition tidp2 = new TopicIdPartition(topicId, tp2); - private TopicIdPartition tidp3 = new TopicIdPartition(topicId, tp3); - private int validLeaderEpoch = 0; - private MetadataResponse initialUpdateResponse = + private final TopicPartition tp0 = new TopicPartition(topicName, 0); + private final TopicPartition tp1 = new TopicPartition(topicName, 1); + private final TopicPartition tp2 = new TopicPartition(topicName, 2); + private final TopicPartition tp3 = new TopicPartition(topicName, 3); + private final TopicIdPartition tidp0 = new TopicIdPartition(topicId, tp0); + private final TopicIdPartition tidp1 = new TopicIdPartition(topicId, tp1); + private final TopicIdPartition tidp2 = new TopicIdPartition(topicId, tp2); + private final TopicIdPartition tidp3 = new TopicIdPartition(topicId, tp3); + private final int validLeaderEpoch = 0; + private final MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, 4), topicIds); - private int minBytes = 1; - private int maxBytes = Integer.MAX_VALUE; - private int maxWaitMs = 0; - private int fetchSize = 1000; - private long retryBackoffMs = 100; - private long requestTimeoutMs = 30000; + private final int minBytes = 1; + private final int maxBytes = Integer.MAX_VALUE; + private final int maxWaitMs = 0; + private final int fetchSize = 1000; + private final long retryBackoffMs = 100; + private final long requestTimeoutMs = 30000; + private final ApiVersions apiVersions = new ApiVersions(); private MockTime time = new MockTime(1); private SubscriptionState subscriptions; private ConsumerMetadata metadata; @@ -185,11 +186,9 @@ public class FetchRequestManagerTest { private FetchMetricsManager metricsManager; private MockClient client; private Metrics metrics; - private ApiVersions apiVersions = new ApiVersions(); private TestableFetchRequestManager fetcher; private TestableNetworkClientDelegate networkClientDelegate; private OffsetFetcher offsetFetcher; - private MemoryRecords records; private MemoryRecords nextRecords; private MemoryRecords emptyRecords; @@ -761,29 +760,29 @@ public void testLeaderEpochInConsumerRecord() { assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); - Integer partitionLeaderEpoch = 1; + int partitionLeaderEpoch = 1; ByteBuffer buffer = ByteBuffer.allocate(1024); MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, 0L, System.currentTimeMillis(), partitionLeaderEpoch); - builder.append(0L, "key".getBytes(), partitionLeaderEpoch.toString().getBytes()); - builder.append(0L, "key".getBytes(), partitionLeaderEpoch.toString().getBytes()); + builder.append(0L, "key".getBytes(), Integer.toString(partitionLeaderEpoch).getBytes()); + builder.append(0L, "key".getBytes(), Integer.toString(partitionLeaderEpoch).getBytes()); builder.close(); partitionLeaderEpoch += 7; builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, 2L, System.currentTimeMillis(), partitionLeaderEpoch); - builder.append(0L, "key".getBytes(), partitionLeaderEpoch.toString().getBytes()); + builder.append(0L, "key".getBytes(), Integer.toString(partitionLeaderEpoch).getBytes()); builder.close(); partitionLeaderEpoch += 5; builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, 3L, System.currentTimeMillis(), partitionLeaderEpoch); - builder.append(0L, "key".getBytes(), partitionLeaderEpoch.toString().getBytes()); - builder.append(0L, "key".getBytes(), partitionLeaderEpoch.toString().getBytes()); - builder.append(0L, "key".getBytes(), partitionLeaderEpoch.toString().getBytes()); + builder.append(0L, "key".getBytes(), Integer.toString(partitionLeaderEpoch).getBytes()); + builder.append(0L, "key".getBytes(), Integer.toString(partitionLeaderEpoch).getBytes()); + builder.append(0L, "key".getBytes(), Integer.toString(partitionLeaderEpoch).getBytes()); builder.close(); buffer.flip(); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java index 44d21f7c8d15f..2eeb9accc7d35 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java @@ -152,34 +152,35 @@ public class FetcherTest { private static final double EPSILON = 0.0001; - private String topicName = "test"; - private String groupId = "test-group"; - private Uuid topicId = Uuid.randomUuid(); - private Map topicIds = new HashMap() { + private final String topicName = "test"; + private final String groupId = "test-group"; + private final Uuid topicId = Uuid.randomUuid(); + private final Map topicIds = new HashMap() { { put(topicName, topicId); } }; - private Map topicNames = singletonMap(topicId, topicName); + private final Map topicNames = singletonMap(topicId, topicName); private final String metricGroup = "consumer" + groupId + "-fetch-manager-metrics"; - private TopicPartition tp0 = new TopicPartition(topicName, 0); - private TopicPartition tp1 = new TopicPartition(topicName, 1); - private TopicPartition tp2 = new TopicPartition(topicName, 2); - private TopicPartition tp3 = new TopicPartition(topicName, 3); - private TopicIdPartition tidp0 = new TopicIdPartition(topicId, tp0); - private TopicIdPartition tidp1 = new TopicIdPartition(topicId, tp1); - private TopicIdPartition tidp2 = new TopicIdPartition(topicId, tp2); - private TopicIdPartition tidp3 = new TopicIdPartition(topicId, tp3); - private int validLeaderEpoch = 0; - private MetadataResponse initialUpdateResponse = + private final TopicPartition tp0 = new TopicPartition(topicName, 0); + private final TopicPartition tp1 = new TopicPartition(topicName, 1); + private final TopicPartition tp2 = new TopicPartition(topicName, 2); + private final TopicPartition tp3 = new TopicPartition(topicName, 3); + private final TopicIdPartition tidp0 = new TopicIdPartition(topicId, tp0); + private final TopicIdPartition tidp1 = new TopicIdPartition(topicId, tp1); + private final TopicIdPartition tidp2 = new TopicIdPartition(topicId, tp2); + private final TopicIdPartition tidp3 = new TopicIdPartition(topicId, tp3); + private final int validLeaderEpoch = 0; + private final MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, 4), topicIds); - private int minBytes = 1; - private int maxBytes = Integer.MAX_VALUE; - private int maxWaitMs = 0; + private final int minBytes = 1; + private final int maxBytes = Integer.MAX_VALUE; + private final int maxWaitMs = 0; + private final long retryBackoffMs = 100; + private final long requestTimeoutMs = 30000; + private final ApiVersions apiVersions = new ApiVersions(); private int fetchSize = 1000; - private long retryBackoffMs = 100; - private long requestTimeoutMs = 30000; private MockTime time = new MockTime(1); private SubscriptionState subscriptions; private ConsumerMetadata metadata; @@ -187,11 +188,9 @@ public class FetcherTest { private FetchMetricsManager metricsManager; private MockClient client; private Metrics metrics; - private ApiVersions apiVersions = new ApiVersions(); private ConsumerNetworkClient consumerClient; private Fetcher fetcher; private OffsetFetcher offsetFetcher; - private MemoryRecords records; private MemoryRecords nextRecords; private MemoryRecords moreRecords; @@ -710,29 +709,29 @@ public void testLeaderEpochInConsumerRecord() { assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); - Integer partitionLeaderEpoch = 1; + int partitionLeaderEpoch = 1; ByteBuffer buffer = ByteBuffer.allocate(1024); MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, 0L, System.currentTimeMillis(), partitionLeaderEpoch); - builder.append(0L, "key".getBytes(), partitionLeaderEpoch.toString().getBytes()); - builder.append(0L, "key".getBytes(), partitionLeaderEpoch.toString().getBytes()); + builder.append(0L, "key".getBytes(), Integer.toString(partitionLeaderEpoch).getBytes()); + builder.append(0L, "key".getBytes(), Integer.toString(partitionLeaderEpoch).getBytes()); builder.close(); partitionLeaderEpoch += 7; builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, 2L, System.currentTimeMillis(), partitionLeaderEpoch); - builder.append(0L, "key".getBytes(), partitionLeaderEpoch.toString().getBytes()); + builder.append(0L, "key".getBytes(), Integer.toString(partitionLeaderEpoch).getBytes()); builder.close(); partitionLeaderEpoch += 5; builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, 3L, System.currentTimeMillis(), partitionLeaderEpoch); - builder.append(0L, "key".getBytes(), partitionLeaderEpoch.toString().getBytes()); - builder.append(0L, "key".getBytes(), partitionLeaderEpoch.toString().getBytes()); - builder.append(0L, "key".getBytes(), partitionLeaderEpoch.toString().getBytes()); + builder.append(0L, "key".getBytes(), Integer.toString(partitionLeaderEpoch).getBytes()); + builder.append(0L, "key".getBytes(), Integer.toString(partitionLeaderEpoch).getBytes()); + builder.append(0L, "key".getBytes(), Integer.toString(partitionLeaderEpoch).getBytes()); builder.close(); buffer.flip(); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/HeartbeatTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/HeartbeatTest.java index 4dc1319f2dd55..49102da976603 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/HeartbeatTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/HeartbeatTest.java @@ -29,17 +29,17 @@ import static org.junit.jupiter.api.Assertions.assertTrue; public class HeartbeatTest { - private int sessionTimeoutMs = 300; - private int heartbeatIntervalMs = 100; - private int maxPollIntervalMs = 900; - private long retryBackoffMs = 10L; - private long retryBackoffMaxMs = 100L; - private MockTime time = new MockTime(); + private final int sessionTimeoutMs = 300; + private final int heartbeatIntervalMs = 100; + private final int maxPollIntervalMs = 900; + private final MockTime time = new MockTime(); private Heartbeat heartbeat; @BeforeEach public void setUp() { + long retryBackoffMs = 10L; + long retryBackoffMaxMs = 100L; GroupRebalanceConfig rebalanceConfig = new GroupRebalanceConfig(sessionTimeoutMs, maxPollIntervalMs, heartbeatIntervalMs, diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java index 4fdcf917d6c35..70f33bfdf451e 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java @@ -152,14 +152,13 @@ public NetworkClientDelegate newNetworkClientDelegate() { public NetworkClientDelegate.UnsentRequest newUnsentFindCoordinatorRequest() { Objects.requireNonNull(GROUP_ID); - NetworkClientDelegate.UnsentRequest req = new NetworkClientDelegate.UnsentRequest( + return new NetworkClientDelegate.UnsentRequest( new FindCoordinatorRequest.Builder(new FindCoordinatorRequestData() .setKey(GROUP_ID) .setKeyType(FindCoordinatorRequest.CoordinatorType.GROUP.id()) ), Optional.empty() ); - return req; } public void prepareFindCoordinatorResponse(Errors error) { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java index 596b549dd5524..2029d608f032a 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java @@ -1153,7 +1153,7 @@ public void testOffsetValidationRequestGrouping() { .collect(Collectors.toSet()); assertTrue(expectedPartitions.stream().noneMatch(allRequestedPartitions::contains)); - assertTrue(expectedPartitions.size() > 0); + assertFalse(expectedPartitions.isEmpty()); allRequestedPartitions.addAll(expectedPartitions); OffsetForLeaderEpochResponseData data = new OffsetForLeaderEpochResponseData(); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetForLeaderEpochClientTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetForLeaderEpochClientTest.java index 57c61babd11cc..8a10fb73755c1 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetForLeaderEpochClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetForLeaderEpochClientTest.java @@ -44,13 +44,9 @@ public class OffsetForLeaderEpochClientTest { + private final TopicPartition tp0 = new TopicPartition("topic", 0); private ConsumerNetworkClient consumerClient; - private SubscriptionState subscriptions; - private Metadata metadata; private MockClient client; - private Time time; - - private TopicPartition tp0 = new TopicPartition("topic", 0); @Test public void testEmptyResponse() { @@ -156,9 +152,9 @@ private OffsetsForLeaderEpochClient newOffsetClient() { private void buildDependencies(OffsetResetStrategy offsetResetStrategy) { LogContext logContext = new LogContext(); - time = new MockTime(1); - subscriptions = new SubscriptionState(logContext, offsetResetStrategy); - metadata = new ConsumerMetadata(0, 0, Long.MAX_VALUE, false, false, + Time time = new MockTime(1); + SubscriptionState subscriptions = new SubscriptionState(logContext, offsetResetStrategy); + Metadata metadata = new ConsumerMetadata(0, 0, Long.MAX_VALUE, false, false, subscriptions, logContext, new ClusterResourceListeners()); client = new MockClient(time, metadata); consumerClient = new ConsumerNetworkClient(logContext, client, metadata, time, diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java index b23660e5469c8..fdcb0cdc39953 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java @@ -47,16 +47,9 @@ public class ApplicationEventProcessorTest { private final Time time = new MockTime(1); + private final BlockingQueue applicationEventQueue = mock(BlockingQueue.class); + private final ConsumerMetadata metadata = mock(ConsumerMetadata.class); private ApplicationEventProcessor processor; - private BlockingQueue applicationEventQueue = mock(BlockingQueue.class); - private RequestManagers requestManagers; - private ConsumerMetadata metadata = mock(ConsumerMetadata.class); - private NetworkClientDelegate networkClientDelegate = mock(NetworkClientDelegate.class); - private OffsetsRequestManager offsetRequestManager; - private OffsetsRequestManager offsetsRequestManager; - private TopicMetadataRequestManager topicMetadataRequestManager; - private FetchRequestManager fetchRequestManager; - private CoordinatorRequestManager coordinatorRequestManager; private CommitRequestManager commitRequestManager; private HeartbeatRequestManager heartbeatRequestManager; private MembershipManager membershipManager; @@ -65,15 +58,14 @@ public class ApplicationEventProcessorTest { @SuppressWarnings("unchecked") public void setup() { LogContext logContext = new LogContext(); - offsetRequestManager = mock(OffsetsRequestManager.class); - offsetsRequestManager = mock(OffsetsRequestManager.class); - topicMetadataRequestManager = mock(TopicMetadataRequestManager.class); - fetchRequestManager = mock(FetchRequestManager.class); - coordinatorRequestManager = mock(CoordinatorRequestManager.class); + OffsetsRequestManager offsetsRequestManager = mock(OffsetsRequestManager.class); + TopicMetadataRequestManager topicMetadataRequestManager = mock(TopicMetadataRequestManager.class); + FetchRequestManager fetchRequestManager = mock(FetchRequestManager.class); + CoordinatorRequestManager coordinatorRequestManager = mock(CoordinatorRequestManager.class); commitRequestManager = mock(CommitRequestManager.class); heartbeatRequestManager = mock(HeartbeatRequestManager.class); membershipManager = mock(MembershipManager.class); - requestManagers = new RequestManagers( + RequestManagers requestManagers = new RequestManagers( logContext, offsetsRequestManager, topicMetadataRequestManager, diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/HeartbeatMetricsManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/HeartbeatMetricsManagerTest.java index 1761bc2fef07c..6a01daa9b8237 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/HeartbeatMetricsManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/HeartbeatMetricsManagerTest.java @@ -28,8 +28,8 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; public class HeartbeatMetricsManagerTest { - private Time time = new MockTime(); - private Metrics metrics = new Metrics(time); + private final Time time = new MockTime(); + private final Metrics metrics = new Metrics(time); @Test public void testHeartbeatMetrics() { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/OffsetCommitMetricsManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/OffsetCommitMetricsManagerTest.java index fa89456019a84..de5a29445be0e 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/OffsetCommitMetricsManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/OffsetCommitMetricsManagerTest.java @@ -25,8 +25,8 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; public class OffsetCommitMetricsManagerTest { - private Time time = new MockTime(); - private Metrics metrics = new Metrics(time); + private final Time time = new MockTime(); + private final Metrics metrics = new Metrics(time); @Test public void testOffsetCommitMetrics() { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/RebalanceCallbackMetricsManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/RebalanceCallbackMetricsManagerTest.java index c0b5e6eae4824..45dd1c42a75f5 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/RebalanceCallbackMetricsManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/RebalanceCallbackMetricsManagerTest.java @@ -25,8 +25,8 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; public class RebalanceCallbackMetricsManagerTest { - private Time time = new MockTime(); - private Metrics metrics = new Metrics(time); + private final Time time = new MockTime(); + private final Metrics metrics = new Metrics(time); @Test public void testRebalanceCallbackMetrics() { diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java index 781aaaaf31405..7d4aa5e3a85d6 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java @@ -551,11 +551,10 @@ public void testConstructorWithNotStringKey() { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); props.put(1, "not string key"); - try (KafkaProducer ff = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer())) { - fail("Constructor should throw exception"); - } catch (ConfigException e) { - assertTrue(e.getMessage().contains("not string key"), "Unexpected exception message: " + e.getMessage()); - } + ConfigException ce = assertThrows( + ConfigException.class, + () -> new KafkaProducer<>(props, new StringSerializer(), new StringSerializer())); + assertTrue(ce.getMessage().contains("not string key"), "Unexpected exception message: " + ce.getMessage()); } @Test @@ -2085,7 +2084,6 @@ public void testCallbackAndInterceptorHandleError() { assertNotNull(recordMetadata.topic(), "Topic name should be valid even on send failure"); assertEquals(invalidTopicName, recordMetadata.topic()); - assertNotNull(recordMetadata.partition(), "Partition should be valid even on send failure"); assertFalse(recordMetadata.hasOffset()); assertEquals(ProduceResponse.INVALID_OFFSET, recordMetadata.offset()); @@ -2391,15 +2389,15 @@ private static class KafkaProducerTestContext { private final TestInfo testInfo; private final Map configs; private final Serializer serializer; + private final Partitioner partitioner = mock(Partitioner.class); + private final KafkaThread ioThread = mock(KafkaThread.class); + private final List> interceptors = new ArrayList<>(); private ProducerMetadata metadata = mock(ProducerMetadata.class); private RecordAccumulator accumulator = mock(RecordAccumulator.class); private Sender sender = mock(Sender.class); private TransactionManager transactionManager = mock(TransactionManager.class); - private Partitioner partitioner = mock(Partitioner.class); - private KafkaThread ioThread = mock(KafkaThread.class); private Time time = new MockTime(); - private Metrics metrics = new Metrics(time); - private List> interceptors = new ArrayList<>(); + private final Metrics metrics = new Metrics(time); public KafkaProducerTestContext( TestInfo testInfo, diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerInterceptorsTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerInterceptorsTest.java index cd15a3ea81254..31d417a56dc03 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerInterceptorsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerInterceptorsTest.java @@ -40,7 +40,7 @@ public class ProducerInterceptorsTest { private int onSendCount = 0; private class AppendProducerInterceptor implements ProducerInterceptor { - private String appendStr = ""; + private final String appendStr; private boolean throwExceptionOnSend = false; private boolean throwExceptionOnAck = false; diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerMetadataTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerMetadataTest.java index fa5618e1b2644..dd77c0493b860 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerMetadataTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerMetadataTest.java @@ -45,12 +45,12 @@ public class ProducerMetadataTest { private static final long METADATA_IDLE_MS = 60 * 1000; - private long refreshBackoffMs = 100; - private long refreshBackoffMaxMs = 1000; - private long metadataExpireMs = 1000; - private ProducerMetadata metadata = new ProducerMetadata(refreshBackoffMs, refreshBackoffMaxMs, metadataExpireMs, METADATA_IDLE_MS, + private final long refreshBackoffMs = 100; + private final long refreshBackoffMaxMs = 1000; + private final long metadataExpireMs = 1000; + private final ProducerMetadata metadata = new ProducerMetadata(refreshBackoffMs, refreshBackoffMaxMs, metadataExpireMs, METADATA_IDLE_MS, new LogContext(), new ClusterResourceListeners(), Time.SYSTEM); - private AtomicReference backgroundError = new AtomicReference<>(); + private final AtomicReference backgroundError = new AtomicReference<>(); @AfterEach public void tearDown() { diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java index 9ed0e70380969..32d3e532b6ee7 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java @@ -72,6 +72,7 @@ import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Arrays.asList; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -83,23 +84,23 @@ public class RecordAccumulatorTest { - private String topic = "test"; - private int partition1 = 0; - private int partition2 = 1; - private int partition3 = 2; - private Node node1 = new Node(0, "localhost", 1111); - private Node node2 = new Node(1, "localhost", 1112); + private final String topic = "test"; + private final int partition1 = 0; + private final int partition2 = 1; + private final int partition3 = 2; + private final Node node1 = new Node(0, "localhost", 1111); + private final Node node2 = new Node(1, "localhost", 1112); - private TopicPartition tp1 = new TopicPartition(topic, partition1); - private TopicPartition tp2 = new TopicPartition(topic, partition2); - private TopicPartition tp3 = new TopicPartition(topic, partition3); + private final TopicPartition tp1 = new TopicPartition(topic, partition1); + private final TopicPartition tp2 = new TopicPartition(topic, partition2); + private final TopicPartition tp3 = new TopicPartition(topic, partition3); - private PartitionMetadata partMetadata1 = new PartitionMetadata(Errors.NONE, tp1, Optional.of(node1.id()), Optional.empty(), null, null, null); - private PartitionMetadata partMetadata2 = new PartitionMetadata(Errors.NONE, tp2, Optional.of(node1.id()), Optional.empty(), null, null, null); - private PartitionMetadata partMetadata3 = new PartitionMetadata(Errors.NONE, tp3, Optional.of(node2.id()), Optional.empty(), null, null, null); - private List partMetadatas = new ArrayList<>(Arrays.asList(partMetadata1, partMetadata2, partMetadata3)); + private final PartitionMetadata partMetadata1 = new PartitionMetadata(Errors.NONE, tp1, Optional.of(node1.id()), Optional.empty(), null, null, null); + private final PartitionMetadata partMetadata2 = new PartitionMetadata(Errors.NONE, tp2, Optional.of(node1.id()), Optional.empty(), null, null, null); + private final PartitionMetadata partMetadata3 = new PartitionMetadata(Errors.NONE, tp3, Optional.of(node2.id()), Optional.empty(), null, null, null); + private final List partMetadatas = new ArrayList<>(Arrays.asList(partMetadata1, partMetadata2, partMetadata3)); - private Map nodes = Arrays.asList(node1, node2).stream().collect(Collectors.toMap(Node::id, Function.identity())); + private final Map nodes = Stream.of(node1, node2).collect(Collectors.toMap(Node::id, Function.identity())); private MetadataSnapshot metadataCache = new MetadataSnapshot(null, nodes, partMetadatas, @@ -109,14 +110,14 @@ public class RecordAccumulatorTest { null, Collections.emptyMap()); - private Cluster cluster = metadataCache.cluster(); + private final Cluster cluster = metadataCache.cluster(); - private MockTime time = new MockTime(); - private byte[] key = "key".getBytes(); - private byte[] value = "value".getBytes(); - private int msgSize = DefaultRecord.sizeInBytes(0, 0, key.length, value.length, Record.EMPTY_HEADERS); + private final MockTime time = new MockTime(); + private final byte[] key = "key".getBytes(); + private final byte[] value = "value".getBytes(); + private final int msgSize = DefaultRecord.sizeInBytes(0, 0, key.length, value.length, Record.EMPTY_HEADERS); - private Metrics metrics = new Metrics(time); + private final Metrics metrics = new Metrics(time); private final long maxBlockTimeMs = 1000; private final LogContext logContext = new LogContext(); @@ -790,7 +791,7 @@ private void doExpireBatchSingle(int deliveryTimeoutMs) throws InterruptedExcept int lingerMs = 300; List muteStates = Arrays.asList(false, true); Set readyNodes; - List expiredBatches = new ArrayList<>(); + List expiredBatches; // test case assumes that the records do not fill the batch completely int batchSize = 1025; RecordAccumulator accum = createTestRecordAccumulator(deliveryTimeoutMs, @@ -953,7 +954,7 @@ public void testMutedPartitions() throws InterruptedException { // Test ready without muted partition accum.unmutePartition(tp1); result = accum.ready(metadataCache, time.milliseconds()); - assertTrue(result.readyNodes.size() > 0, "The batch should be ready"); + assertFalse(result.readyNodes.isEmpty(), "The batch should be ready"); // Test drain with muted partition accum.mutePartition(tp1); @@ -963,7 +964,7 @@ public void testMutedPartitions() throws InterruptedException { // Test drain without muted partition. accum.unmutePartition(tp1); drained = accum.drain(metadataCache, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); - assertTrue(drained.get(node1.id()).size() > 0, "The batch should have been drained."); + assertFalse(drained.get(node1.id()).isEmpty(), "The batch should have been drained."); } @Test @@ -1057,7 +1058,7 @@ public void testSplitAndReenqueue() throws ExecutionException, InterruptedExcept time.sleep(121L); // Drain the batch. RecordAccumulator.ReadyCheckResult result = accum.ready(metadataCache, time.milliseconds()); - assertTrue(result.readyNodes.size() > 0, "The batch should be ready"); + assertFalse(result.readyNodes.isEmpty(), "The batch should be ready"); Map> drained = accum.drain(metadataCache, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertEquals(1, drained.size(), "Only node1 should be drained"); assertEquals(1, drained.get(node1.id()).size(), "Only one batch should be drained"); @@ -1682,7 +1683,7 @@ private byte[] bytesWithPoorCompression(Random random, int size) { return value; } - private class BatchDrainedResult { + private static class BatchDrainedResult { final int numSplit; final int numBatches; BatchDrainedResult(int numSplit, int numBatches) { diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java index eb01d1d5841d7..31afa87d60587 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java @@ -149,16 +149,15 @@ public class SenderTest { private static final int DELIVERY_TIMEOUT_MS = 1500; private static final long TOPIC_IDLE_MS = 60 * 1000; - private TopicPartition tp0 = new TopicPartition("test", 0); - private TopicPartition tp1 = new TopicPartition("test", 1); - - private TopicPartition tp2 = new TopicPartition("test", 2); + private final TopicPartition tp0 = new TopicPartition("test", 0); + private final TopicPartition tp1 = new TopicPartition("test", 1); + private final TopicPartition tp2 = new TopicPartition("test", 2); private MockTime time = new MockTime(); - private int batchSize = 16 * 1024; - private ProducerMetadata metadata = new ProducerMetadata(0, 0, Long.MAX_VALUE, TOPIC_IDLE_MS, + private final int batchSize = 16 * 1024; + private final ProducerMetadata metadata = new ProducerMetadata(0, 0, Long.MAX_VALUE, TOPIC_IDLE_MS, new LogContext(), new ClusterResourceListeners(), time); + private final ApiVersions apiVersions = new ApiVersions(); private MockClient client = new MockClient(time, metadata); - private ApiVersions apiVersions = new ApiVersions(); private Metrics metrics = null; private RecordAccumulator accumulator = null; private Sender sender = null; @@ -517,7 +516,7 @@ public void onCompletion(RecordMetadata metadata, Exception exception) { sender.runOnce(); // We should try to flush the batch, but we expire it instead without sending anything. assertEquals(messagesPerBatch, expiryCallbackCount.get(), "Callbacks not invoked for expiry"); assertNull(unexpectedException.get(), "Unexpected exception"); - // Make sure that the reconds were appended back to the batch. + // Make sure that the records were appended back to the batch. assertNotNull(accumulator.getDeque(tp1)); assertEquals(1, accumulator.getDeque(tp1).size()); assertEquals(messagesPerBatch, accumulator.getDeque(tp1).peekFirst().recordCount); @@ -2309,7 +2308,7 @@ public void testSequenceNumberIncrement() throws InterruptedException { sender.runOnce(); // receive response assertTrue(responseFuture.isDone()); assertEquals(OptionalInt.of(0), transactionManager.lastAckedSequence(tp0)); - assertEquals(1L, (long) transactionManager.sequenceNumber(tp0)); + assertEquals(1L, transactionManager.sequenceNumber(tp0)); } @Test @@ -2472,7 +2471,7 @@ private void testSplitBatchAndSend(TransactionManager txnManager, assertEquals(OptionalInt.of(0), txnManager.lastAckedSequence(tp), "The last ack'd sequence number should be 0"); assertFalse(f2.isDone(), "The future shouldn't have been done."); assertEquals(0L, f1.get().offset(), "Offset of the first message should be 0"); - sender.runOnce(); // send the seconcd produce request + sender.runOnce(); // send the second produce request id = client.requests().peek().destination(); assertEquals(ApiKeys.PRODUCE, client.requests().peek().requestBuilder().apiKey()); node = new Node(Integer.parseInt(id), "localhost", 0); @@ -2956,7 +2955,7 @@ public void testForceShutdownWithIncompleteTransaction() { } @Test - public void testTransactionAbortedExceptionOnAbortWithoutError() throws InterruptedException, ExecutionException { + public void testTransactionAbortedExceptionOnAbortWithoutError() throws InterruptedException { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); TransactionManager txnManager = new TransactionManager(logContext, "testTransactionAbortedExceptionOnAbortWithoutError", 60000, 100, apiVersions); @@ -3151,7 +3150,7 @@ public void testInvalidTxnStateIsAnAbortableError() throws Exception { } @Test - public void testTransactionAbortablenExceptionIsAnAbortableError() throws Exception { + public void testTransactionAbortableExceptionIsAnAbortableError() throws Exception { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); apiVersions.update("0", NodeApiVersions.create(ApiKeys.INIT_PRODUCER_ID.id, (short) 0, (short) 3)); TransactionManager txnManager = new TransactionManager(logContext, "textTransactionAbortableException", 60000, 100, apiVersions); @@ -3232,7 +3231,7 @@ public void testProducerBatchRetriesWhenPartitionLeaderChanges() throws Exceptio assertTrue(client.hasInFlightRequests()); client.respond(produceResponse(tp0, -1, Errors.NOT_LEADER_OR_FOLLOWER, 0)); sender.runOnce(); // receive produce response, batch scheduled for retry - assertTrue(!futureIsProduced.isDone(), "Produce request is yet not done."); + assertFalse(futureIsProduced.isDone(), "Produce request should not be done."); // TEST that as new-leader(with epochA) is discovered, the batch is retried immediately i.e. skips any backoff period. // Update leader epoch for tp0 @@ -3253,13 +3252,13 @@ public void testProducerBatchRetriesWhenPartitionLeaderChanges() throws Exceptio assertTrue(client.hasInFlightRequests()); client.respond(produceResponse(tp0, -1, Errors.NOT_LEADER_OR_FOLLOWER, 0)); sender.runOnce(); // receive produce response, schedule batch for retry. - assertTrue(!futureIsProduced.isDone(), "Produce request is yet not done."); + assertFalse(futureIsProduced.isDone(), "Produce request should not be done."); // TEST that a subsequent retry to the same leader(epochA) waits the backoff period. sender.runOnce(); //send produce request // No batches in-flight assertEquals(0, sender.inFlightBatches(tp0).size()); - assertTrue(!client.hasInFlightRequests()); + assertFalse(client.hasInFlightRequests()); // TEST that after waiting for longer than backoff period, batch is retried again. time.sleep(2 * retryBackoffMaxMs); @@ -3279,7 +3278,7 @@ public void testProducerBatchRetriesWhenPartitionLeaderChanges() throws Exceptio // This test is expected to run fast. If timeout, the sender is not able to close properly. @Timeout(5) @Test - public void testSenderShouldCloseWhenTransactionManagerInErrorState() throws Exception { + public void testSenderShouldCloseWhenTransactionManagerInErrorState() { metrics.close(); Map clientTags = Collections.singletonMap("client-id", "clientA"); metrics = new Metrics(new MetricConfig().tags(clientTags)); @@ -3356,8 +3355,8 @@ public void testWhenProduceResponseReturnsWithALeaderShipChangeErrorButNoNewLead responses.put(tp2, new OffsetAndError(100, Errors.NONE)); client.respond(produceResponse(responses)); sender.runOnce(); // receive produce response, batch scheduled for retry - assertTrue(!futureIsProducedTp0.isDone(), "Produce request to tp0 should be unfinished."); - assertTrue(!futureIsProducedTp1.isDone(), "Produce request to tp1 should be unfinished."); + assertFalse(futureIsProducedTp0.isDone(), "Produce request to tp0 should be unfinished."); + assertFalse(futureIsProducedTp1.isDone(), "Produce request to tp1 should be unfinished."); assertTrue(futureIsProducedTp2.isDone(), "Produce request to tp0 should be done."); // Validate metadata is unchanged as new leader info wasn't received. @@ -3452,8 +3451,8 @@ public void testWhenProduceResponseReturnsWithALeaderShipChangeErrorAndNewLeader client.respond(produceResponse(responses, partitionLeaderInfo, newNodes)); sender.runOnce(); // receive produce response, batch scheduled for retry - assertTrue(!futureIsProducedTp0.isDone(), "Produce request to tp0 should be unfinished."); - assertTrue(!futureIsProducedTp1.isDone(), "Produce request to tp1 should be unfinished."); + assertFalse(futureIsProducedTp0.isDone(), "Produce request to tp0 should be unfinished."); + assertFalse(futureIsProducedTp1.isDone(), "Produce request to tp1 should be unfinished."); assertTrue(futureIsProducedTp2.isDone(), "Produce request to tp0 should be done."); // Validate metadata is unchanged as new leader info wasn't received. @@ -3492,9 +3491,9 @@ private void verifyErrorMessage(ProduceResponse response, String expectedMessage assertEquals(expectedMessage, e1.getCause().getMessage()); } - class AssertEndTxnRequestMatcher implements MockClient.RequestMatcher { + private static class AssertEndTxnRequestMatcher implements MockClient.RequestMatcher { - private TransactionResult requiredResult; + private final TransactionResult requiredResult; private boolean matched = false; AssertEndTxnRequestMatcher(TransactionResult requiredResult) { @@ -3513,7 +3512,7 @@ public boolean matches(AbstractRequest body) { } } - private class MatchingBufferPool extends BufferPool { + private static class MatchingBufferPool extends BufferPool { IdentityHashMap allocatedBuffers; MatchingBufferPool(long totalSize, int batchSize, Metrics metrics, Time time, String metricGrpName) { diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java index 51299ad337ea1..3b539b8803f86 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java @@ -17,7 +17,6 @@ package org.apache.kafka.clients.producer.internals; import org.apache.kafka.clients.ApiVersions; -import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.MetadataSnapshot; import org.apache.kafka.clients.MockClient; import org.apache.kafka.clients.NodeApiVersions; @@ -25,10 +24,8 @@ import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.producer.RecordMetadata; -import org.apache.kafka.common.Cluster; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Node; -import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.FencedInstanceIdException; import org.apache.kafka.common.errors.GroupAuthorizationException; @@ -103,7 +100,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; -import org.mockito.Mockito; import static java.util.Collections.singleton; import static java.util.Collections.singletonList; @@ -3541,7 +3537,7 @@ public void testTransactionAbortableExceptionInAddPartitions() { prepareAddPartitionsToTxn(tp, Errors.TRANSACTION_ABORTABLE); runUntil(transactionManager::hasError); - assertTrue(transactionManager.lastError() instanceof TransactionAbortableException); + assertInstanceOf(TransactionAbortableException.class, transactionManager.lastError()); assertAbortableError(TransactionAbortableException.class); } @@ -3559,11 +3555,11 @@ public void testTransactionAbortableExceptionInFindCoordinator() { prepareFindCoordinatorResponse(Errors.TRANSACTION_ABORTABLE, false, CoordinatorType.GROUP, consumerGroupId); runUntil(transactionManager::hasError); - assertTrue(transactionManager.lastError() instanceof TransactionAbortableException); + assertInstanceOf(TransactionAbortableException.class, transactionManager.lastError()); runUntil(sendOffsetsResult::isCompleted); assertFalse(sendOffsetsResult.isSuccessful()); - assertTrue(sendOffsetsResult.error() instanceof TransactionAbortableException); + assertInstanceOf(TransactionAbortableException.class, sendOffsetsResult.error()); assertAbortableError(TransactionAbortableException.class); } @@ -3605,10 +3601,10 @@ public void testTransactionAbortableExceptionInAddOffsetsToTxn() { prepareAddOffsetsToTxnResponse(Errors.TRANSACTION_ABORTABLE, consumerGroupId, producerId, epoch); runUntil(transactionManager::hasError); - assertTrue(transactionManager.lastError() instanceof TransactionAbortableException); + assertInstanceOf(TransactionAbortableException.class, transactionManager.lastError()); assertTrue(sendOffsetsResult.isCompleted()); assertFalse(sendOffsetsResult.isSuccessful()); - assertTrue(sendOffsetsResult.error() instanceof TransactionAbortableException); + assertInstanceOf(TransactionAbortableException.class, sendOffsetsResult.error()); assertAbortableError(TransactionAbortableException.class); } @@ -3628,10 +3624,10 @@ public void testTransactionAbortableExceptionInTxnOffsetCommit() { prepareTxnOffsetCommitResponse(consumerGroupId, producerId, epoch, singletonMap(tp, Errors.TRANSACTION_ABORTABLE)); runUntil(transactionManager::hasError); - assertTrue(transactionManager.lastError() instanceof TransactionAbortableException); + assertInstanceOf(TransactionAbortableException.class, transactionManager.lastError()); assertTrue(sendOffsetsResult.isCompleted()); assertFalse(sendOffsetsResult.isSuccessful()); - assertTrue(sendOffsetsResult.error() instanceof TransactionAbortableException); + assertInstanceOf(TransactionAbortableException.class, sendOffsetsResult.error()); assertAbortableError(TransactionAbortableException.class); } @@ -3969,16 +3965,4 @@ private void runUntil(Supplier condition) { ProducerTestUtils.runUntil(sender, condition); } - private Metadata setupMetadata(Cluster cluster) { - Metadata metadataMock = Mockito.mock(Metadata.class); - Mockito.when(metadataMock.fetch()).thenReturn(cluster); - for (String topic: cluster.topics()) { - for (PartitionInfo partInfo: cluster.partitionsForTopic(topic)) { - TopicPartition tp = new TopicPartition(partInfo.topic(), partInfo.partition()); - Mockito.when(metadataMock.currentLeader(tp)).thenReturn(new Metadata.LeaderAndEpoch(Optional.of(partInfo.leader()), Optional.of(999 /* dummy value */))); - } - } - return metadataMock; - } - } diff --git a/clients/src/test/java/org/apache/kafka/common/TopicPartitionTest.java b/clients/src/test/java/org/apache/kafka/common/TopicPartitionTest.java index ede6918500afe..d130f1c6a40ea 100644 --- a/clients/src/test/java/org/apache/kafka/common/TopicPartitionTest.java +++ b/clients/src/test/java/org/apache/kafka/common/TopicPartitionTest.java @@ -30,9 +30,8 @@ * That is, older code won't necessarily be able to deserialize data serialized with newer code. */ public class TopicPartitionTest { - private String topicName = "mytopic"; - private String fileName = "serializedData/topicPartitionSerializedfile"; - private int partNum = 5; + private final String topicName = "mytopic"; + private final int partNum = 5; private void checkValues(TopicPartition deSerTP) { //assert deserialized values are same as original @@ -56,7 +55,7 @@ public void testSerializationRoundtrip() throws IOException, ClassNotFoundExcept public void testTopiPartitionSerializationCompatibility() throws IOException, ClassNotFoundException { // assert serialized TopicPartition object in file (serializedData/topicPartitionSerializedfile) is // deserializable into TopicPartition and is compatible - Object deserializedObject = Serializer.deserialize(fileName); + Object deserializedObject = Serializer.deserialize("serializedData/topicPartitionSerializedfile"); assertInstanceOf(TopicPartition.class, deserializedObject); checkValues((TopicPartition) deserializedObject); } diff --git a/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java b/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java index 890cbb52a66b1..b85dd3556e007 100644 --- a/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java +++ b/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java @@ -450,7 +450,7 @@ public void testBaseConfigDefDependents() { private static class IntegerRecommender implements ConfigDef.Recommender { - private boolean hasParent; + private final boolean hasParent; public IntegerRecommender(boolean hasParent) { this.hasParent = hasParent; @@ -684,7 +684,7 @@ public Class loadClass(String name, boolean resolve) throws ClassNotFoundExce } } - private class NestedClass { + private static class NestedClass { } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/header/internals/RecordHeadersTest.java b/clients/src/test/java/org/apache/kafka/common/header/internals/RecordHeadersTest.java index 0791eca8e42ce..30f0d1ce3ff6b 100644 --- a/clients/src/test/java/org/apache/kafka/common/header/internals/RecordHeadersTest.java +++ b/clients/src/test/java/org/apache/kafka/common/header/internals/RecordHeadersTest.java @@ -219,13 +219,7 @@ public void shouldThrowNpeWhenAddingCollectionWithNullHeader() { } private int getCount(Headers headers) { - int count = 0; - Iterator
headerIterator = headers.iterator(); - while (headerIterator.hasNext()) { - headerIterator.next(); - count++; - } - return count; + return headers.toArray().length; } static void assertHeader(String key, String value, Header actual) { diff --git a/clients/src/test/java/org/apache/kafka/common/message/MessageTest.java b/clients/src/test/java/org/apache/kafka/common/message/MessageTest.java index 478bfa0668d3a..755d2fbda8195 100644 --- a/clients/src/test/java/org/apache/kafka/common/message/MessageTest.java +++ b/clients/src/test/java/org/apache/kafka/common/message/MessageTest.java @@ -1181,7 +1181,7 @@ public void testUnknownTaggedFields() { } @Test - public void testLongTaggedString() throws Exception { + public void testLongTaggedString() { char[] chars = new char[1024]; Arrays.fill(chars, 'a'); String longString = new String(chars); diff --git a/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java b/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java index 19e3b6a7a3711..f514281abf314 100644 --- a/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java @@ -68,10 +68,10 @@ public class MetricsTest { private static final Logger log = LoggerFactory.getLogger(MetricsTest.class); - private static final double EPS = 0.000001; - private MockTime time = new MockTime(); - private MetricConfig config = new MetricConfig(); + + private final MockTime time = new MockTime(); + private final MetricConfig config = new MetricConfig(); private Metrics metrics; private ExecutorService executorService; @@ -850,7 +850,7 @@ synchronized void processMetrics() { alive.set(false); } - private class ConcurrentMetricOperation implements Runnable { + private static class ConcurrentMetricOperation implements Runnable { private final AtomicBoolean alive; private final String opName; private final Runnable op; @@ -884,7 +884,7 @@ enum StatType { PERCENTILES(9), METER(10); - int id; + final int id; StatType(int id) { this.id = id; } diff --git a/clients/src/test/java/org/apache/kafka/common/metrics/internals/IntGaugeSuiteTest.java b/clients/src/test/java/org/apache/kafka/common/metrics/internals/IntGaugeSuiteTest.java index 0e6a91373e7bb..f94aceed202b9 100644 --- a/clients/src/test/java/org/apache/kafka/common/metrics/internals/IntGaugeSuiteTest.java +++ b/clients/src/test/java/org/apache/kafka/common/metrics/internals/IntGaugeSuiteTest.java @@ -36,12 +36,11 @@ public class IntGaugeSuiteTest { private static IntGaugeSuite createIntGaugeSuite() { MetricConfig config = new MetricConfig(); Metrics metrics = new Metrics(config); - IntGaugeSuite suite = new IntGaugeSuite<>(log, + return new IntGaugeSuite<>(log, "mySuite", metrics, name -> new MetricName(name, "group", "myMetric", Collections.emptyMap()), 3); - return suite; } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/network/CertStores.java b/clients/src/test/java/org/apache/kafka/common/network/CertStores.java index 9fced6b6f4b73..1b013d933e279 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/CertStores.java +++ b/clients/src/test/java/org/apache/kafka/common/network/CertStores.java @@ -56,10 +56,6 @@ public CertStores(boolean server, String commonName, String sanHostName) throws this(server, commonName, new TestSslUtils.CertificateBuilder().sanDnsNames(sanHostName)); } - public CertStores(boolean server, String commonName, InetAddress hostAddress) throws Exception { - this(server, commonName, new TestSslUtils.CertificateBuilder().sanIpAddress(hostAddress)); - } - private CertStores(boolean server, String commonName, TestSslUtils.CertificateBuilder certBuilder) throws Exception { this(server, commonName, "RSA", certBuilder, false); } @@ -110,8 +106,8 @@ public Map trustStoreProps() { public static class Builder { private final boolean isServer; + private final List sanDns; private String cn; - private List sanDns; private InetAddress sanIp; private String keyAlgorithm; private boolean usePem; diff --git a/clients/src/test/java/org/apache/kafka/common/network/SaslChannelBuilderTest.java b/clients/src/test/java/org/apache/kafka/common/network/SaslChannelBuilderTest.java index 35ea36367f4f1..2863b4906aa77 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SaslChannelBuilderTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SaslChannelBuilderTest.java @@ -182,7 +182,7 @@ private Supplier defaultApiVersionsSupplier() { } private SaslChannelBuilder createChannelBuilder(SecurityProtocol securityProtocol, String saslMechanism) { - Class loginModule = null; + Class loginModule; switch (saslMechanism) { case "PLAIN": loginModule = PlainLoginModule.class; @@ -233,7 +233,7 @@ public boolean abort() throws LoginException { } @Override - public boolean logout() throws LoginException { + public boolean logout() { return true; } } diff --git a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java index fb889f895339a..25a240c2ede18 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java @@ -264,7 +264,7 @@ public void testNormalOperation() throws Exception { } else { waitForCondition(() -> cipherMetrics(metrics).size() == 1, "Waiting for cipher metrics to be created."); - assertEquals(Integer.valueOf(5), cipherMetrics(metrics).get(0).metricValue()); + assertEquals(5, cipherMetrics(metrics).get(0).metricValue()); } } @@ -639,7 +639,7 @@ public void testGracefulClose() throws Exception { // Poll until one or more receives complete and then close the server-side connection waitForCondition(() -> { selector.poll(1000); - return selector.completedReceives().size() > 0; + return !selector.completedReceives().isEmpty(); }, 5000, "Receive not completed"); server.closeConnections(); while (selector.disconnected().isEmpty()) { diff --git a/clients/src/test/java/org/apache/kafka/common/network/SslSelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SslSelectorTest.java index 4f617961a731b..3f7660d5fbdbd 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SslSelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SslSelectorTest.java @@ -128,14 +128,14 @@ public void testConnectionWithCustomKeyManager() throws Exception { TestUtils.waitForCondition(() -> cipherMetrics(metrics).size() == 1, "Waiting for cipher metrics to be created."); - assertEquals(Integer.valueOf(1), cipherMetrics(metrics).get(0).metricValue()); + assertEquals(1, cipherMetrics(metrics).get(0).metricValue()); assertNotNull(selector.channel(node).channelMetadataRegistry().cipherInformation()); selector.close(node); super.verifySelectorEmpty(selector); assertEquals(1, cipherMetrics(metrics).size()); - assertEquals(Integer.valueOf(0), cipherMetrics(metrics).get(0).metricValue()); + assertEquals(0, cipherMetrics(metrics).get(0).metricValue()); Security.removeProvider(testProviderCreator.getProvider().getName()); selector.close(); @@ -349,8 +349,7 @@ protected SslTransportLayer buildTransportLayer(SslFactory sslFactory, String id ChannelMetadataRegistry metadataRegistry) { SocketChannel socketChannel = (SocketChannel) key.channel(); SSLEngine sslEngine = sslFactory.createSslEngine(socketChannel.socket()); - TestSslTransportLayer transportLayer = new TestSslTransportLayer(id, key, sslEngine, metadataRegistry); - return transportLayer; + return new TestSslTransportLayer(id, key, sslEngine, metadataRegistry); } /* diff --git a/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java b/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java index b6033747df3ab..aedd89ee83954 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java @@ -93,7 +93,7 @@ public class SslTransportLayerTest { private static final int BUFFER_SIZE = 4 * 1024; - private static Time time = Time.SYSTEM; + private static final Time TIME = Time.SYSTEM; private static class Args { private final String tlsProtocol; @@ -272,7 +272,7 @@ protected TestSslTransportLayer newTransportLayer(String id, SelectionKey key, S }; serverChannelBuilder.configure(args.sslServerConfigs); server = new NioEchoServer(ListenerName.forSecurityProtocol(SecurityProtocol.SSL), SecurityProtocol.SSL, - new TestSecurityConfig(args.sslServerConfigs), "localhost", serverChannelBuilder, null, time); + new TestSecurityConfig(args.sslServerConfigs), "localhost", serverChannelBuilder, null, TIME); server.start(); createSelector(args.sslClientConfigs); @@ -694,7 +694,7 @@ public void testSelectorPollReadSize(Args args) throws Exception { } catch (IOException e) { return false; } - return selector.completedSends().size() > 0; + return !selector.completedSends().isEmpty(); }, "Timed out waiting for message to be sent"); // Wait for echo server to send the message back @@ -880,7 +880,7 @@ private void testIOExceptionsDuringHandshake(Args args, channelBuilder.flushFailureAction = flushFailureAction; channelBuilder.failureIndex = i; channelBuilder.configure(args.sslClientConfigs); - this.selector = new Selector(10000, new Metrics(), time, "MetricGroup", channelBuilder, new LogContext()); + this.selector = new Selector(10000, new Metrics(), TIME, "MetricGroup", channelBuilder, new LogContext()); InetSocketAddress addr = new InetSocketAddress("localhost", server.port()); selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); @@ -926,7 +926,7 @@ public void testPeerNotifiedOfHandshakeFailure(Args args) throws Exception { serverChannelBuilder.flushDelayCount = i; server = new NioEchoServer(ListenerName.forSecurityProtocol(SecurityProtocol.SSL), SecurityProtocol.SSL, new TestSecurityConfig(args.sslServerConfigs), - "localhost", serverChannelBuilder, null, time); + "localhost", serverChannelBuilder, null, TIME); server.start(); createSelector(args.sslClientConfigs); InetSocketAddress addr = new InetSocketAddress("localhost", server.port()); @@ -967,7 +967,7 @@ private void testClose(Args args, SecurityProtocol securityProtocol, ChannelBuil String node = "0"; server = createEchoServer(args, securityProtocol); clientChannelBuilder.configure(args.sslClientConfigs); - this.selector = new Selector(5000, new Metrics(), time, "MetricGroup", clientChannelBuilder, new LogContext()); + this.selector = new Selector(5000, new Metrics(), TIME, "MetricGroup", clientChannelBuilder, new LogContext()); InetSocketAddress addr = new InetSocketAddress("localhost", server.port()); selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); @@ -1013,10 +1013,10 @@ public void testInterBrokerSslConfigValidation(Args args) throws Exception { TestSecurityConfig config = new TestSecurityConfig(args.sslServerConfigs); ListenerName listenerName = ListenerName.forSecurityProtocol(securityProtocol); ChannelBuilder serverChannelBuilder = ChannelBuilders.serverChannelBuilder(listenerName, - true, securityProtocol, config, null, null, time, new LogContext(), + true, securityProtocol, config, null, null, TIME, new LogContext(), defaultApiVersionsSupplier()); server = new NioEchoServer(listenerName, securityProtocol, config, - "localhost", serverChannelBuilder, null, time); + "localhost", serverChannelBuilder, null, TIME); server.start(); this.selector = createSelector(args.sslClientConfigs, null, null, null); @@ -1038,7 +1038,7 @@ public void testInterBrokerSslConfigValidationFailure(Args args) { ListenerName listenerName = ListenerName.forSecurityProtocol(securityProtocol); assertThrows(KafkaException.class, () -> ChannelBuilders.serverChannelBuilder( listenerName, true, securityProtocol, config, - null, null, time, new LogContext(), defaultApiVersionsSupplier())); + null, null, TIME, new LogContext(), defaultApiVersionsSupplier())); } /** @@ -1052,10 +1052,10 @@ public void testServerKeystoreDynamicUpdate(Args args) throws Exception { TestSecurityConfig config = new TestSecurityConfig(args.sslServerConfigs); ListenerName listenerName = ListenerName.forSecurityProtocol(securityProtocol); ChannelBuilder serverChannelBuilder = ChannelBuilders.serverChannelBuilder(listenerName, - false, securityProtocol, config, null, null, time, new LogContext(), + false, securityProtocol, config, null, null, TIME, new LogContext(), defaultApiVersionsSupplier()); server = new NioEchoServer(listenerName, securityProtocol, config, - "localhost", serverChannelBuilder, null, time); + "localhost", serverChannelBuilder, null, TIME); server.start(); InetSocketAddress addr = new InetSocketAddress("localhost", server.port()); @@ -1113,10 +1113,10 @@ public void testServerKeystoreDynamicUpdateWithNewSubjectAltName(Args args) thro TestSecurityConfig config = new TestSecurityConfig(args.sslServerConfigs); ListenerName listenerName = ListenerName.forSecurityProtocol(securityProtocol); ChannelBuilder serverChannelBuilder = ChannelBuilders.serverChannelBuilder(listenerName, - false, securityProtocol, config, null, null, time, new LogContext(), + false, securityProtocol, config, null, null, TIME, new LogContext(), defaultApiVersionsSupplier()); server = new NioEchoServer(listenerName, securityProtocol, config, - "localhost", serverChannelBuilder, null, time); + "localhost", serverChannelBuilder, null, TIME); server.start(); InetSocketAddress addr = new InetSocketAddress("localhost", server.port()); @@ -1179,10 +1179,10 @@ public void testServerTruststoreDynamicUpdate(Args args) throws Exception { TestSecurityConfig config = new TestSecurityConfig(args.sslServerConfigs); ListenerName listenerName = ListenerName.forSecurityProtocol(securityProtocol); ChannelBuilder serverChannelBuilder = ChannelBuilders.serverChannelBuilder(listenerName, - false, securityProtocol, config, null, null, time, new LogContext(), + false, securityProtocol, config, null, null, TIME, new LogContext(), defaultApiVersionsSupplier()); server = new NioEchoServer(listenerName, securityProtocol, config, - "localhost", serverChannelBuilder, null, time); + "localhost", serverChannelBuilder, null, TIME); server.start(); InetSocketAddress addr = new InetSocketAddress("localhost", server.port()); @@ -1291,12 +1291,12 @@ private Selector createSelector(Map sslClientConfigs, final Inte if (this.selector != null) { this.selector.close(); } - this.selector = new Selector(100 * 5000, new Metrics(), time, "MetricGroup", channelBuilder, new LogContext()); + this.selector = new Selector(100 * 5000, new Metrics(), TIME, "MetricGroup", channelBuilder, new LogContext()); return selector; } private NioEchoServer createEchoServer(Args args, ListenerName listenerName, SecurityProtocol securityProtocol) throws Exception { - return NetworkTestUtils.createEchoServer(listenerName, securityProtocol, new TestSecurityConfig(args.sslServerConfigs), null, time); + return NetworkTestUtils.createEchoServer(listenerName, securityProtocol, new TestSecurityConfig(args.sslServerConfigs), null, TIME); } private NioEchoServer createEchoServer(Args args, SecurityProtocol securityProtocol) throws Exception { @@ -1307,7 +1307,7 @@ private Selector createSelector(Args args) { LogContext logContext = new LogContext(); ChannelBuilder channelBuilder = new SslChannelBuilder(Mode.CLIENT, null, false, logContext); channelBuilder.configure(args.sslClientConfigs); - selector = new Selector(5000, new Metrics(), time, "MetricGroup", channelBuilder, logContext); + selector = new Selector(5000, new Metrics(), TIME, "MetricGroup", channelBuilder, logContext); return selector; } diff --git a/clients/src/test/java/org/apache/kafka/common/network/SslVersionsTransportLayerTest.java b/clients/src/test/java/org/apache/kafka/common/network/SslVersionsTransportLayerTest.java index 5e7073ae61837..105f25b51d726 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SslVersionsTransportLayerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SslVersionsTransportLayerTest.java @@ -132,7 +132,7 @@ public void testTlsDefaults(List serverProtocols, List clientPro * > supported_versions: Lists which versions of TLS the client supports. In particular, if the client * > requests TLS 1.3, then the client version field has the value TLSv1.2 and this extension * > contains the value TLSv1.3; if the client requests TLS 1.2, then the client version field has the - * > value TLSv1.2 and this extension either doesn’t exist or contains the value TLSv1.2 but not the value TLSv1.3. + * > value TLSv1.2 and this extension either doesn't exist or contains the value TLSv1.2 but not the value TLSv1.3. *

* * This mean that TLSv1.3 client can fallback to TLSv1.2 but TLSv1.2 client can't change protocol to TLSv1.3. diff --git a/clients/src/test/java/org/apache/kafka/common/protocol/MessageUtilTest.java b/clients/src/test/java/org/apache/kafka/common/protocol/MessageUtilTest.java index 05640f26b7388..4795798908ff8 100755 --- a/clients/src/test/java/org/apache/kafka/common/protocol/MessageUtilTest.java +++ b/clients/src/test/java/org/apache/kafka/common/protocol/MessageUtilTest.java @@ -101,7 +101,7 @@ public void testBinaryNode() throws IOException { JsonNode textNode = mapper.readTree(writer.toString()); - assertTrue(textNode.isTextual(), String.format("Expected a JSON string but was: %s", textNode.toString())); + assertTrue(textNode.isTextual(), String.format("Expected a JSON string but was: %s", textNode)); byte[] actual = MessageUtil.jsonNodeToBinary(textNode, "Test base64 JSON string"); assertArrayEquals(expected, actual); } diff --git a/clients/src/test/java/org/apache/kafka/common/record/CompressionRatioEstimatorTest.java b/clients/src/test/java/org/apache/kafka/common/record/CompressionRatioEstimatorTest.java index 7ba51dbdd0459..879530934fba8 100644 --- a/clients/src/test/java/org/apache/kafka/common/record/CompressionRatioEstimatorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/record/CompressionRatioEstimatorTest.java @@ -26,8 +26,8 @@ public class CompressionRatioEstimatorTest { @Test public void testUpdateEstimation() { class EstimationsObservedRatios { - float currentEstimation; - float observedRatio; + final float currentEstimation; + final float observedRatio; EstimationsObservedRatios(float currentEstimation, float observedRatio) { this.currentEstimation = currentEstimation; this.observedRatio = observedRatio; diff --git a/clients/src/test/java/org/apache/kafka/common/record/DefaultRecordBatchTest.java b/clients/src/test/java/org/apache/kafka/common/record/DefaultRecordBatchTest.java index f9d3ff3d57fd8..9475b29fe6bcd 100644 --- a/clients/src/test/java/org/apache/kafka/common/record/DefaultRecordBatchTest.java +++ b/clients/src/test/java/org/apache/kafka/common/record/DefaultRecordBatchTest.java @@ -187,7 +187,7 @@ public void buildDefaultRecordBatchWithSequenceWrapAround() { public void testSizeInBytes() { Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), - new RecordHeader("bar", (byte[]) null) + new RecordHeader("bar", null) }; long timestamp = System.currentTimeMillis(); diff --git a/clients/src/test/java/org/apache/kafka/common/record/DefaultRecordTest.java b/clients/src/test/java/org/apache/kafka/common/record/DefaultRecordTest.java index 20502868a5b66..f171bb9c81e09 100644 --- a/clients/src/test/java/org/apache/kafka/common/record/DefaultRecordTest.java +++ b/clients/src/test/java/org/apache/kafka/common/record/DefaultRecordTest.java @@ -39,7 +39,7 @@ public class DefaultRecordTest { public void testBasicSerde() throws IOException { Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), - new RecordHeader("bar", (byte[]) null), + new RecordHeader("bar", null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) }; diff --git a/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java b/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java index 74b7e2ff137e7..5c09e4dfc13ef 100644 --- a/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java @@ -65,7 +65,7 @@ public class FileRecordsTest { - private byte[][] values = new byte[][] { + private final byte[][] values = new byte[][] { "abcd".getBytes(), "efgh".getBytes(), "ijkl".getBytes() diff --git a/clients/src/test/java/org/apache/kafka/common/record/UnalignedFileRecordsTest.java b/clients/src/test/java/org/apache/kafka/common/record/UnalignedFileRecordsTest.java index 9a05a22ca5dcc..859b5a03dedd1 100644 --- a/clients/src/test/java/org/apache/kafka/common/record/UnalignedFileRecordsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/record/UnalignedFileRecordsTest.java @@ -31,7 +31,7 @@ public class UnalignedFileRecordsTest { - private byte[][] values = new byte[][] { + private final byte[][] values = new byte[][] { "foo".getBytes(), "bar".getBytes() }; diff --git a/clients/src/test/java/org/apache/kafka/common/requests/AddPartitionsToTxnRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/AddPartitionsToTxnRequestTest.java index 92bb8741be09d..cac73f7244066 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/AddPartitionsToTxnRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/AddPartitionsToTxnRequestTest.java @@ -39,13 +39,13 @@ import static org.junit.jupiter.api.Assertions.assertEquals; public class AddPartitionsToTxnRequestTest { + private static final int PRODUCER_ID = 10; + private static final short PRODUCER_EPOCH = 1; + private static final int THROTTLE_TIME_MS = 10; + private static final TopicPartition TP_0 = new TopicPartition("topic", 0); + private static final TopicPartition TP_1 = new TopicPartition("topic", 1); private final String transactionalId1 = "transaction1"; private final String transactionalId2 = "transaction2"; - private static int producerId = 10; - private static short producerEpoch = 1; - private static int throttleTimeMs = 10; - private static TopicPartition tp0 = new TopicPartition("topic", 0); - private static TopicPartition tp1 = new TopicPartition("topic", 1); @ParameterizedTest @ApiKeyVersionsSource(apiKey = ApiKeys.ADD_PARTITIONS_TO_TXN) @@ -55,15 +55,15 @@ public void testConstructor(short version) { if (version < 4) { List partitions = new ArrayList<>(); - partitions.add(tp0); - partitions.add(tp1); + partitions.add(TP_0); + partitions.add(TP_1); - AddPartitionsToTxnRequest.Builder builder = AddPartitionsToTxnRequest.Builder.forClient(transactionalId1, producerId, producerEpoch, partitions); + AddPartitionsToTxnRequest.Builder builder = AddPartitionsToTxnRequest.Builder.forClient(transactionalId1, PRODUCER_ID, PRODUCER_EPOCH, partitions); request = builder.build(version); assertEquals(transactionalId1, request.data().v3AndBelowTransactionalId()); - assertEquals(producerId, request.data().v3AndBelowProducerId()); - assertEquals(producerEpoch, request.data().v3AndBelowProducerEpoch()); + assertEquals(PRODUCER_ID, request.data().v3AndBelowProducerId()); + assertEquals(PRODUCER_EPOCH, request.data().v3AndBelowProducerEpoch()); assertEquals(partitions, AddPartitionsToTxnRequest.getPartitions(request.data().v3AndBelowTopics())); } else { AddPartitionsToTxnTransactionCollection transactions = createTwoTransactionCollection(); @@ -77,9 +77,9 @@ public void testConstructor(short version) { assertEquals(transactions.find(transactionalId1), reqTxn1); assertEquals(transactions.find(transactionalId2), reqTxn2); } - AddPartitionsToTxnResponse response = request.getErrorResponse(throttleTimeMs, Errors.UNKNOWN_TOPIC_OR_PARTITION.exception()); + AddPartitionsToTxnResponse response = request.getErrorResponse(THROTTLE_TIME_MS, Errors.UNKNOWN_TOPIC_OR_PARTITION.exception()); - assertEquals(throttleTimeMs, response.throttleTimeMs()); + assertEquals(THROTTLE_TIME_MS, response.throttleTimeMs()); if (version >= 4) { assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), response.data().errorCode()); @@ -98,8 +98,8 @@ public void testBatchedRequests() { AddPartitionsToTxnRequest request = builder.build(ApiKeys.ADD_PARTITIONS_TO_TXN.latestVersion()); Map> expectedMap = new HashMap<>(); - expectedMap.put(transactionalId1, Collections.singletonList(tp0)); - expectedMap.put(transactionalId2, Collections.singletonList(tp1)); + expectedMap.put(transactionalId1, Collections.singletonList(TP_0)); + expectedMap.put(transactionalId2, Collections.singletonList(TP_1)); assertEquals(expectedMap, request.partitionsByTransaction()); @@ -110,50 +110,50 @@ public void testBatchedRequests() { AddPartitionsToTxnResponse response = new AddPartitionsToTxnResponse(new AddPartitionsToTxnResponseData() .setResultsByTransaction(results) - .setThrottleTimeMs(throttleTimeMs)); + .setThrottleTimeMs(THROTTLE_TIME_MS)); - assertEquals(Collections.singletonMap(tp0, Errors.UNKNOWN_TOPIC_OR_PARTITION), errorsForTransaction(response.getTransactionTopicResults(transactionalId1))); - assertEquals(Collections.singletonMap(tp1, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED), errorsForTransaction(response.getTransactionTopicResults(transactionalId2))); + assertEquals(Collections.singletonMap(TP_0, Errors.UNKNOWN_TOPIC_OR_PARTITION), errorsForTransaction(response.getTransactionTopicResults(transactionalId1))); + assertEquals(Collections.singletonMap(TP_1, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED), errorsForTransaction(response.getTransactionTopicResults(transactionalId2))); } @Test public void testNormalizeRequest() { List partitions = new ArrayList<>(); - partitions.add(tp0); - partitions.add(tp1); + partitions.add(TP_0); + partitions.add(TP_1); - AddPartitionsToTxnRequest.Builder builder = AddPartitionsToTxnRequest.Builder.forClient(transactionalId1, producerId, producerEpoch, partitions); + AddPartitionsToTxnRequest.Builder builder = AddPartitionsToTxnRequest.Builder.forClient(transactionalId1, PRODUCER_ID, PRODUCER_EPOCH, partitions); AddPartitionsToTxnRequest request = builder.build((short) 3); AddPartitionsToTxnRequest singleton = request.normalizeRequest(); assertEquals(partitions, singleton.partitionsByTransaction().get(transactionalId1)); AddPartitionsToTxnTransaction transaction = singleton.data().transactions().find(transactionalId1); - assertEquals(producerId, transaction.producerId()); - assertEquals(producerEpoch, transaction.producerEpoch()); + assertEquals(PRODUCER_ID, transaction.producerId()); + assertEquals(PRODUCER_EPOCH, transaction.producerEpoch()); } private AddPartitionsToTxnTransactionCollection createTwoTransactionCollection() { AddPartitionsToTxnTopicCollection topics0 = new AddPartitionsToTxnTopicCollection(); topics0.add(new AddPartitionsToTxnTopic() - .setName(tp0.topic()) - .setPartitions(Collections.singletonList(tp0.partition()))); + .setName(TP_0.topic()) + .setPartitions(Collections.singletonList(TP_0.partition()))); AddPartitionsToTxnTopicCollection topics1 = new AddPartitionsToTxnTopicCollection(); topics1.add(new AddPartitionsToTxnTopic() - .setName(tp1.topic()) - .setPartitions(Collections.singletonList(tp1.partition()))); + .setName(TP_1.topic()) + .setPartitions(Collections.singletonList(TP_1.partition()))); AddPartitionsToTxnTransactionCollection transactions = new AddPartitionsToTxnTransactionCollection(); transactions.add(new AddPartitionsToTxnTransaction() .setTransactionalId(transactionalId1) - .setProducerId(producerId) - .setProducerEpoch(producerEpoch) + .setProducerId(PRODUCER_ID) + .setProducerEpoch(PRODUCER_EPOCH) .setVerifyOnly(true) .setTopics(topics0)); transactions.add(new AddPartitionsToTxnTransaction() .setTransactionalId(transactionalId2) - .setProducerId(producerId + 1) - .setProducerEpoch((short) (producerEpoch + 1)) + .setProducerId(PRODUCER_ID + 1) + .setProducerEpoch((short) (PRODUCER_EPOCH + 1)) .setVerifyOnly(false) .setTopics(topics1)); return transactions; diff --git a/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsRequestTest.java index e88a5b94aabfc..b2901dabcf551 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsRequestTest.java @@ -29,9 +29,9 @@ import org.junit.jupiter.api.Test; import java.nio.ByteBuffer; +import java.util.Arrays; import java.util.stream.Collectors; -import static java.util.Arrays.asList; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -110,7 +110,7 @@ private static void assertRequestEquals(final DeleteAclsRequest original, final } private static DeleteAclsRequestData requestData(AclBindingFilter... acls) { - return new DeleteAclsRequestData().setFilters(asList(acls).stream() + return new DeleteAclsRequestData().setFilters(Arrays.stream(acls) .map(DeleteAclsRequest::deleteAclsFilter) .collect(Collectors.toList())); } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/DeleteGroupsResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/DeleteGroupsResponseTest.java index ff352f1ab87b3..7cf7ad8b3371c 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/DeleteGroupsResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/DeleteGroupsResponseTest.java @@ -34,47 +34,44 @@ public class DeleteGroupsResponseTest { private static final String GROUP_ID_1 = "groupId1"; private static final String GROUP_ID_2 = "groupId2"; private static final int THROTTLE_TIME_MS = 10; - private static DeleteGroupsResponse deleteGroupsResponse; - - static { - deleteGroupsResponse = new DeleteGroupsResponse( - new DeleteGroupsResponseData() - .setResults( - new DeletableGroupResultCollection(Arrays.asList( - new DeletableGroupResult() - .setGroupId(GROUP_ID_1) - .setErrorCode(Errors.NONE.code()), - new DeletableGroupResult() - .setGroupId(GROUP_ID_2) - .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code())).iterator() - ) + private static final DeleteGroupsResponse DELETE_GROUPS_RESPONSE = new DeleteGroupsResponse( + new DeleteGroupsResponseData() + .setResults( + new DeletableGroupResultCollection(Arrays.asList( + new DeletableGroupResult() + .setGroupId(GROUP_ID_1) + .setErrorCode(Errors.NONE.code()), + new DeletableGroupResult() + .setGroupId(GROUP_ID_2) + .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code())).iterator() ) - .setThrottleTimeMs(THROTTLE_TIME_MS)); - } + ) + .setThrottleTimeMs(THROTTLE_TIME_MS)); + @Test public void testGetErrorWithExistingGroupIds() { - assertEquals(Errors.NONE, deleteGroupsResponse.get(GROUP_ID_1)); - assertEquals(Errors.GROUP_AUTHORIZATION_FAILED, deleteGroupsResponse.get(GROUP_ID_2)); + assertEquals(Errors.NONE, DELETE_GROUPS_RESPONSE.get(GROUP_ID_1)); + assertEquals(Errors.GROUP_AUTHORIZATION_FAILED, DELETE_GROUPS_RESPONSE.get(GROUP_ID_2)); Map expectedErrors = new HashMap<>(); expectedErrors.put(GROUP_ID_1, Errors.NONE); expectedErrors.put(GROUP_ID_2, Errors.GROUP_AUTHORIZATION_FAILED); - assertEquals(expectedErrors, deleteGroupsResponse.errors()); + assertEquals(expectedErrors, DELETE_GROUPS_RESPONSE.errors()); Map expectedErrorCounts = new HashMap<>(); expectedErrorCounts.put(Errors.NONE, 1); expectedErrorCounts.put(Errors.GROUP_AUTHORIZATION_FAILED, 1); - assertEquals(expectedErrorCounts, deleteGroupsResponse.errorCounts()); + assertEquals(expectedErrorCounts, DELETE_GROUPS_RESPONSE.errorCounts()); } @Test public void testGetErrorWithInvalidGroupId() { - assertThrows(IllegalArgumentException.class, () -> deleteGroupsResponse.get("invalid-group-id")); + assertThrows(IllegalArgumentException.class, () -> DELETE_GROUPS_RESPONSE.get("invalid-group-id")); } @Test public void testGetThrottleTimeMs() { - assertEquals(THROTTLE_TIME_MS, deleteGroupsResponse.throttleTimeMs()); + assertEquals(THROTTLE_TIME_MS, DELETE_GROUPS_RESPONSE.throttleTimeMs()); } } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsRequestTest.java index 00ce57a6002e2..9ca0e74d08405 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsRequestTest.java @@ -101,7 +101,7 @@ public void shouldRoundTripAnyV1() { private static void assertRequestEquals(final DescribeAclsRequest original, final DescribeAclsRequest actual) { final AclBindingFilter originalFilter = original.filter(); - final AclBindingFilter acttualFilter = actual.filter(); - assertEquals(originalFilter, acttualFilter); + final AclBindingFilter actualFilter = actual.filter(); + assertEquals(originalFilter, actualFilter); } } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/LeaveGroupRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/LeaveGroupRequestTest.java index 1694ef5fdf938..e9aa88e9ec521 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/LeaveGroupRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/LeaveGroupRequestTest.java @@ -38,9 +38,6 @@ public class LeaveGroupRequestTest { private final String groupId = "group_id"; private final String memberIdOne = "member_1"; - private final String instanceIdOne = "instance_1"; - private final String memberIdTwo = "member_2"; - private final String instanceIdTwo = "instance_2"; private final int throttleTimeMs = 10; @@ -51,10 +48,10 @@ public class LeaveGroupRequestTest { public void setUp() { members = Arrays.asList(new MemberIdentity() .setMemberId(memberIdOne) - .setGroupInstanceId(instanceIdOne), + .setGroupInstanceId("instance_1"), new MemberIdentity() - .setMemberId(memberIdTwo) - .setGroupInstanceId(instanceIdTwo)); + .setMemberId("member_2") + .setGroupInstanceId("instance_2")); builder = new LeaveGroupRequest.Builder( groupId, members diff --git a/clients/src/test/java/org/apache/kafka/common/requests/LeaveGroupResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/LeaveGroupResponseTest.java index 4bb5c72281952..0f6a2dec820d8 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/LeaveGroupResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/LeaveGroupResponseTest.java @@ -42,11 +42,6 @@ public class LeaveGroupResponseTest { - private final String memberIdOne = "member_1"; - private final String instanceIdOne = "instance_1"; - private final String memberIdTwo = "member_2"; - private final String instanceIdTwo = "instance_2"; - private final int throttleTimeMs = 10; private List memberResponses; @@ -54,12 +49,12 @@ public class LeaveGroupResponseTest { @BeforeEach public void setUp() { memberResponses = Arrays.asList(new MemberResponse() - .setMemberId(memberIdOne) - .setGroupInstanceId(instanceIdOne) + .setMemberId("member_1") + .setGroupInstanceId("instance_1") .setErrorCode(Errors.UNKNOWN_MEMBER_ID.code()), new MemberResponse() - .setMemberId(memberIdTwo) - .setGroupInstanceId(instanceIdTwo) + .setMemberId("member_2") + .setGroupInstanceId("instance_2") .setErrorCode(Errors.FENCED_INSTANCE_ID.code()) ); } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitRequestTest.java index 7da2271d97c60..4ce705766f8b1 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitRequestTest.java @@ -55,11 +55,10 @@ public class OffsetCommitRequestTest { protected static int throttleTimeMs = 10; private static OffsetCommitRequestData data; - private static List topics; @BeforeEach public void setUp() { - topics = Arrays.asList( + List topics = Arrays.asList( new OffsetCommitRequestTopic() .setName(topicOne) .setPartitions(Collections.singletonList( diff --git a/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchRequestTest.java index 6ce7dd93fb904..5077b08836df3 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchRequestTest.java @@ -52,7 +52,7 @@ public class OffsetFetchRequestTest { private final String group3 = "group3"; private final String group4 = "group4"; private final String group5 = "group5"; - private List groups = Arrays.asList(group1, group2, group3, group4, group5); + private final List groups = Arrays.asList(group1, group2, group3, group4, group5); private final List listOfVersionsNonBatchOffsetFetch = Arrays.asList(0, 1, 2, 3, 4, 5, 6, 7); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/TxnOffsetCommitRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/TxnOffsetCommitRequestTest.java index 6ebe7b09391d6..d05f3e8781388 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/TxnOffsetCommitRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/TxnOffsetCommitRequestTest.java @@ -38,11 +38,7 @@ public class TxnOffsetCommitRequestTest extends OffsetCommitRequestTest { - private static String transactionalId = "transactionalId"; - private static int producerId = 10; - private static short producerEpoch = 1; - private static int generationId = 5; - private static Map offsets = new HashMap<>(); + private static final Map OFFSETS = new HashMap<>(); private static TxnOffsetCommitRequest.Builder builder; private static TxnOffsetCommitRequest.Builder builderWithGroupMetadata; @@ -50,32 +46,36 @@ public class TxnOffsetCommitRequestTest extends OffsetCommitRequestTest { @Override public void setUp() { super.setUp(); - offsets.clear(); - offsets.put(new TopicPartition(topicOne, partitionOne), + OFFSETS.clear(); + OFFSETS.put(new TopicPartition(topicOne, partitionOne), new CommittedOffset( offset, metadata, Optional.of((int) leaderEpoch))); - offsets.put(new TopicPartition(topicTwo, partitionTwo), + OFFSETS.put(new TopicPartition(topicTwo, partitionTwo), new CommittedOffset( offset, metadata, Optional.of((int) leaderEpoch))); + String transactionalId = "transactionalId"; + int producerId = 10; + short producerEpoch = 1; builder = new TxnOffsetCommitRequest.Builder( transactionalId, groupId, producerId, producerEpoch, - offsets + OFFSETS ); + int generationId = 5; builderWithGroupMetadata = new TxnOffsetCommitRequest.Builder( transactionalId, groupId, producerId, producerEpoch, - offsets, + OFFSETS, memberId, generationId, Optional.of(groupInstanceId) @@ -118,7 +118,7 @@ public void testConstructor() { } else { request = builderWithGroupMetadata.build(version); } - assertEquals(offsets, request.offsets()); + assertEquals(OFFSETS, request.offsets()); assertEquals(expectedTopics, TxnOffsetCommitRequest.getTopics(request.offsets())); TxnOffsetCommitResponse response = diff --git a/clients/src/test/java/org/apache/kafka/common/requests/WriteTxnMarkersRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/WriteTxnMarkersRequestTest.java index 13e8c8cd94035..1a4890965bb8d 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/WriteTxnMarkersRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/WriteTxnMarkersRequestTest.java @@ -29,11 +29,11 @@ public class WriteTxnMarkersRequestTest { - private static long producerId = 10L; - private static short producerEpoch = 2; - private static int coordinatorEpoch = 1; - private static TransactionResult result = TransactionResult.COMMIT; - private static TopicPartition topicPartition = new TopicPartition("topic", 73); + private static final long PRODUCER_ID = 10L; + private static final short PRODUCER_EPOCH = 2; + private static final int COORDINATOR_EPOCH = 1; + private static final TransactionResult RESULT = TransactionResult.COMMIT; + private static final TopicPartition TOPIC_PARTITION = new TopicPartition("topic", 73); protected static int throttleTimeMs = 10; @@ -43,8 +43,8 @@ public class WriteTxnMarkersRequestTest { public void setUp() { markers = Collections.singletonList( new WriteTxnMarkersRequest.TxnMarkerEntry( - producerId, producerEpoch, coordinatorEpoch, - result, Collections.singletonList(topicPartition)) + PRODUCER_ID, PRODUCER_EPOCH, COORDINATOR_EPOCH, + RESULT, Collections.singletonList(TOPIC_PARTITION)) ); } @@ -55,11 +55,11 @@ public void testConstructor() { WriteTxnMarkersRequest request = builder.build(version); assertEquals(1, request.markers().size()); WriteTxnMarkersRequest.TxnMarkerEntry marker = request.markers().get(0); - assertEquals(producerId, marker.producerId()); - assertEquals(producerEpoch, marker.producerEpoch()); - assertEquals(coordinatorEpoch, marker.coordinatorEpoch()); - assertEquals(result, marker.transactionResult()); - assertEquals(Collections.singletonList(topicPartition), marker.partitions()); + assertEquals(PRODUCER_ID, marker.producerId()); + assertEquals(PRODUCER_EPOCH, marker.producerEpoch()); + assertEquals(COORDINATOR_EPOCH, marker.coordinatorEpoch()); + assertEquals(RESULT, marker.transactionResult()); + assertEquals(Collections.singletonList(TOPIC_PARTITION), marker.partitions()); } } @@ -72,7 +72,7 @@ public void testGetErrorResponse() { request.getErrorResponse(throttleTimeMs, Errors.UNKNOWN_PRODUCER_ID.exception()); assertEquals(Collections.singletonMap( - topicPartition, Errors.UNKNOWN_PRODUCER_ID), errorResponse.errorsByProducerId().get(producerId)); + TOPIC_PARTITION, Errors.UNKNOWN_PRODUCER_ID), errorResponse.errorsByProducerId().get(PRODUCER_ID)); assertEquals(Collections.singletonMap(Errors.UNKNOWN_PRODUCER_ID, 1), errorResponse.errorCounts()); // Write txn marker has no throttle time defined in response. assertEquals(0, errorResponse.throttleTimeMs()); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/WriteTxnMarkersResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/WriteTxnMarkersResponseTest.java index 2a07412d0d9df..9b146ef318fe7 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/WriteTxnMarkersResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/WriteTxnMarkersResponseTest.java @@ -29,22 +29,22 @@ public class WriteTxnMarkersResponseTest { - private static long producerIdOne = 1L; - private static long producerIdTwo = 2L; + private static final long PRODUCER_ID_ONE = 1L; + private static final long PRODUCER_ID_TWO = 2L; - private static TopicPartition tp1 = new TopicPartition("topic", 1); - private static TopicPartition tp2 = new TopicPartition("topic", 2); + private static final TopicPartition TP_1 = new TopicPartition("topic", 1); + private static final TopicPartition TP_2 = new TopicPartition("topic", 2); - private static Errors pidOneError = Errors.UNKNOWN_PRODUCER_ID; - private static Errors pidTwoError = Errors.INVALID_PRODUCER_EPOCH; + private static final Errors PID_ONE_ERROR = Errors.UNKNOWN_PRODUCER_ID; + private static final Errors PID_TWO_ERROR = Errors.INVALID_PRODUCER_EPOCH; private static Map> errorMap; @BeforeEach public void setUp() { errorMap = new HashMap<>(); - errorMap.put(producerIdOne, Collections.singletonMap(tp1, pidOneError)); - errorMap.put(producerIdTwo, Collections.singletonMap(tp2, pidTwoError)); + errorMap.put(PRODUCER_ID_ONE, Collections.singletonMap(TP_1, PID_ONE_ERROR)); + errorMap.put(PRODUCER_ID_TWO, Collections.singletonMap(TP_2, PID_TWO_ERROR)); } @Test @@ -54,7 +54,7 @@ public void testConstructor() { expectedErrorCounts.put(Errors.INVALID_PRODUCER_EPOCH, 1); WriteTxnMarkersResponse response = new WriteTxnMarkersResponse(errorMap); assertEquals(expectedErrorCounts, response.errorCounts()); - assertEquals(Collections.singletonMap(tp1, pidOneError), response.errorsByProducerId().get(producerIdOne)); - assertEquals(Collections.singletonMap(tp2, pidTwoError), response.errorsByProducerId().get(producerIdTwo)); + assertEquals(Collections.singletonMap(TP_1, PID_ONE_ERROR), response.errorsByProducerId().get(PRODUCER_ID_ONE)); + assertEquals(Collections.singletonMap(TP_2, PID_TWO_ERROR), response.errorsByProducerId().get(PRODUCER_ID_TWO)); } } diff --git a/clients/src/test/java/org/apache/kafka/common/security/authenticator/ClientAuthenticationFailureTest.java b/clients/src/test/java/org/apache/kafka/common/security/authenticator/ClientAuthenticationFailureTest.java index 0a0466f6ff1e6..d705c75ab3fe9 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/authenticator/ClientAuthenticationFailureTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/authenticator/ClientAuthenticationFailureTest.java @@ -52,13 +52,12 @@ import static org.junit.jupiter.api.Assertions.assertThrows; public class ClientAuthenticationFailureTest { - private static MockTime time = new MockTime(50); + private static final MockTime TIME = new MockTime(50); private NioEchoServer server; private Map saslServerConfigs; private Map saslClientConfigs; private final String topic = "test"; - private TestJaasConfig testJaasConfig; @BeforeEach public void setup() throws Exception { @@ -72,7 +71,7 @@ public void setup() throws Exception { saslClientConfigs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT"); saslClientConfigs.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); - testJaasConfig = TestJaasConfig.createConfiguration("PLAIN", Arrays.asList("PLAIN")); + TestJaasConfig testJaasConfig = TestJaasConfig.createConfiguration("PLAIN", Arrays.asList("PLAIN")); testJaasConfig.setClientOptions("PLAIN", TestJaasConfig.USERNAME, "anotherpassword"); server = createEchoServer(securityProtocol); } @@ -140,6 +139,6 @@ private NioEchoServer createEchoServer(SecurityProtocol securityProtocol) throws private NioEchoServer createEchoServer(ListenerName listenerName, SecurityProtocol securityProtocol) throws Exception { return NetworkTestUtils.createEchoServer(listenerName, securityProtocol, - new TestSecurityConfig(saslServerConfigs), new CredentialCache(), time); + new TestSecurityConfig(saslServerConfigs), new CredentialCache(), TIME); } } diff --git a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorFailureDelayTest.java b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorFailureDelayTest.java index f80170063ecce..477c6283bb498 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorFailureDelayTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorFailureDelayTest.java @@ -54,9 +54,6 @@ public abstract class SaslAuthenticatorFailureDelayTest { private final MockTime time = new MockTime(1); private NioEchoServer server; private Selector selector; - private ChannelBuilder channelBuilder; - private CertStores serverCertStores; - private CertStores clientCertStores; private Map saslClientConfigs; private Map saslServerConfigs; private CredentialCache credentialCache; @@ -70,8 +67,8 @@ public SaslAuthenticatorFailureDelayTest(int failedAuthenticationDelayMs) { @BeforeEach public void setup() throws Exception { LoginManager.closeAll(); - serverCertStores = new CertStores(true, "localhost"); - clientCertStores = new CertStores(false, "localhost"); + CertStores serverCertStores = new CertStores(true, "localhost"); + CertStores clientCertStores = new CertStores(false, "localhost"); saslServerConfigs = serverCertStores.getTrustingConfig(clientCertStores); saslClientConfigs = clientCertStores.getTrustingConfig(serverCertStores); credentialCache = new CredentialCache(); @@ -203,7 +200,7 @@ private void createSelector(SecurityProtocol securityProtocol, Map saslClientConfigs; private Map saslServerConfigs; private CredentialCache credentialCache; @@ -169,8 +167,8 @@ public class SaslAuthenticatorTest { public void setup() throws Exception { LoginManager.closeAll(); time = Time.SYSTEM; - serverCertStores = new CertStores(true, "localhost"); - clientCertStores = new CertStores(false, "localhost"); + CertStores serverCertStores = new CertStores(true, "localhost"); + CertStores clientCertStores = new CertStores(false, "localhost"); saslServerConfigs = serverCertStores.getTrustingConfig(clientCertStores); saslClientConfigs = clientCertStores.getTrustingConfig(serverCertStores); credentialCache = new CredentialCache(); @@ -661,10 +659,9 @@ public void testTokenReauthenticationOverSaslScram() throws Exception { @Override public TokenInformation token(String tokenId) { TokenInformation baseTokenInfo = super.token(tokenId); - long thisLifetimeMs = System.currentTimeMillis() + tokenLifetime.apply(++callNum).longValue(); - TokenInformation retvalTokenInfo = new TokenInformation(baseTokenInfo.tokenId(), baseTokenInfo.owner(), + long thisLifetimeMs = System.currentTimeMillis() + tokenLifetime.apply(++callNum); + return new TokenInformation(baseTokenInfo.tokenId(), baseTokenInfo.owner(), baseTokenInfo.renewers(), baseTokenInfo.issueTimestamp(), thisLifetimeMs, thisLifetimeMs); - return retvalTokenInfo; } }; server = createEchoServer(ListenerName.forSecurityProtocol(securityProtocol), securityProtocol, tokenCache); @@ -1092,7 +1089,7 @@ public void testClientAuthenticateCallbackHandler() throws Exception { public void testServerAuthenticateCallbackHandler() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); - jaasConfig.createOrUpdateEntry(TestJaasConfig.LOGIN_CONTEXT_SERVER, PlainLoginModule.class.getName(), new HashMap()); + jaasConfig.createOrUpdateEntry(TestJaasConfig.LOGIN_CONTEXT_SERVER, PlainLoginModule.class.getName(), new HashMap<>()); String callbackPrefix = ListenerName.forSecurityProtocol(securityProtocol).saslMechanismConfigPrefix("PLAIN"); saslServerConfigs.put(callbackPrefix + BrokerSecurityConfigs.SASL_SERVER_CALLBACK_HANDLER_CLASS, TestServerCallbackHandler.class.getName()); @@ -2199,7 +2196,7 @@ private void checkClientConnection(String node) throws Exception { NetworkTestUtils.checkClientConnection(selector, node, 100, 10); } - private void closeClientConnectionIfNecessary() throws Exception { + private void closeClientConnectionIfNecessary() { if (selector != null) { selector.close(); selector = null; @@ -2232,8 +2229,7 @@ private ChannelState createAndCheckClientConnectionFailure(SecurityProtocol secu throws Exception { try { createClientConnection(securityProtocol, node); - ChannelState finalState = NetworkTestUtils.waitForChannelClose(selector, node, ChannelState.State.AUTHENTICATION_FAILED); - return finalState; + return NetworkTestUtils.waitForChannelClose(selector, node, ChannelState.State.AUTHENTICATION_FAILED); } finally { closeClientConnectionIfNecessary(); } diff --git a/clients/src/test/java/org/apache/kafka/common/security/authenticator/TestJaasConfig.java b/clients/src/test/java/org/apache/kafka/common/security/authenticator/TestJaasConfig.java index f7ad140cb075e..9f119dfdaa78c 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/authenticator/TestJaasConfig.java +++ b/clients/src/test/java/org/apache/kafka/common/security/authenticator/TestJaasConfig.java @@ -39,7 +39,7 @@ public class TestJaasConfig extends Configuration { static final String USERNAME = "myuser"; static final String PASSWORD = "mypassword"; - private Map entryMap = new HashMap<>(); + private final Map entryMap = new HashMap<>(); public static TestJaasConfig createConfiguration(String clientMechanism, List serverMechanisms) { TestJaasConfig config = new TestJaasConfig(); diff --git a/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosNameTest.java b/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosNameTest.java index 2e063968f2e0a..a6e8f9714dc27 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosNameTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosNameTest.java @@ -24,7 +24,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; public class KerberosNameTest { @@ -132,10 +132,8 @@ public void testInvalidRules() { } private void testInvalidRule(List rules) { - try { - KerberosShortNamer.fromUnparsedRules("REALM.COM", rules); - fail("should have thrown IllegalArgumentException"); - } catch (IllegalArgumentException e) { - } + assertThrows( + IllegalArgumentException.class, + () -> KerberosShortNamer.fromUnparsedRules("REALM.COM", rules)); } } diff --git a/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosRuleTest.java b/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosRuleTest.java index 9c785c4eae392..101530a05c5f0 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosRuleTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosRuleTest.java @@ -17,7 +17,7 @@ package org.apache.kafka.common.security.kerberos; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; import org.junit.jupiter.api.Test; @@ -34,16 +34,15 @@ public void testReplaceParameters() throws BadFormatString { assertEquals(KerberosRule.replaceParameters("hello $0", new String[]{"no recursion $1"}), "hello no recursion $1"); // negative test cases - try { - KerberosRule.replaceParameters("$0", new String[]{}); - fail("An out-of-bounds parameter number should trigger an exception!"); - } catch (BadFormatString bfs) { - } - try { - KerberosRule.replaceParameters("hello $a", new String[]{"does not matter"}); - fail("A malformed parameter name should trigger an exception!"); - } catch (BadFormatString bfs) { - } + assertThrows( + BadFormatString.class, + () -> KerberosRule.replaceParameters("$0", new String[]{}), + "An out-of-bounds parameter number should trigger an exception!"); + + assertThrows( + BadFormatString.class, + () -> KerberosRule.replaceParameters("hello $a", new String[]{"does not matter"}), + "A malformed parameter name should trigger an exception!"); } } diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClientTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClientTest.java index 50ed3fd23c19b..b1aaa0b3d44cc 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClientTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClientTest.java @@ -51,8 +51,8 @@ public class OAuthBearerSaslClientTest { private final String errorMessage = "Error as expected!"; public class ExtensionsCallbackHandler implements AuthenticateCallbackHandler { + private final boolean toThrow; private boolean configured = false; - private boolean toThrow; ExtensionsCallbackHandler(boolean toThrow) { this.toThrow = toThrow; diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServerTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServerTest.java index b2cd0e88914e5..7ece041e68e1f 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServerTest.java @@ -202,11 +202,6 @@ public void illegalToken() throws Exception { private byte[] clientInitialResponse(String authorizationId) throws OAuthBearerConfigException, IOException, UnsupportedCallbackException { - return clientInitialResponse(authorizationId, false); - } - - private byte[] clientInitialResponse(String authorizationId, boolean illegalToken) - throws OAuthBearerConfigException, IOException, UnsupportedCallbackException { return clientInitialResponse(authorizationId, false, Collections.emptyMap()); } diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/expiring/ExpiringCredentialRefreshingLoginTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/expiring/ExpiringCredentialRefreshingLoginTest.java index cd946fa0eb069..32895e624b753 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/expiring/ExpiringCredentialRefreshingLoginTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/expiring/ExpiringCredentialRefreshingLoginTest.java @@ -347,7 +347,7 @@ public void testRefresh() throws Exception { for (int i = 0; i < numExpectedRefreshes; ++i) { KafkaFutureImpl waiter = waiters.get(i); assertTrue(waiter.isDone()); - assertEquals((i + 1) * 1000 * 60 * refreshEveryMinutes, waiter.get().longValue() - startMs); + assertEquals((i + 1) * 1000 * 60 * refreshEveryMinutes, waiter.get() - startMs); } assertFalse(waiters.get(numExpectedRefreshes).isDone()); @@ -438,7 +438,7 @@ public void testRefreshWithExpirationSmallerThanConfiguredBuffers() throws Excep for (int i = 0; i < numExpectedRefreshes; ++i) { KafkaFutureImpl waiter = waiters.get(i); assertTrue(waiter.isDone()); - assertEquals((i + 1) * 1000 * 60 * refreshEveryMinutes, waiter.get().longValue() - startMs); + assertEquals((i + 1) * 1000 * 60 * refreshEveryMinutes, waiter.get() - startMs); } assertFalse(waiters.get(numExpectedRefreshes).isDone()); @@ -522,7 +522,7 @@ public long getCreateMs() { for (int i = 0; i < numExpectedRefreshes; ++i) { KafkaFutureImpl waiter = waiters.get(i); assertTrue(waiter.isDone()); - assertEquals((i + 1) * 1000 * 60 * refreshEveryMinutes, waiter.get().longValue() - startMs); + assertEquals((i + 1) * 1000 * 60 * refreshEveryMinutes, waiter.get() - startMs); } assertFalse(waiters.get(numExpectedRefreshes).isDone()); @@ -603,7 +603,7 @@ public void testRefreshWithMinPeriodIntrusion() throws Exception { KafkaFutureImpl waiter = waiters.get(i); assertTrue(waiter.isDone()); assertEquals((i + 1) * 1000 * (60 * refreshEveryMinutes + bufferIntrusionSeconds), - waiter.get().longValue() - startMs); + waiter.get() - startMs); } assertFalse(waiters.get(numExpectedRefreshes).isDone()); @@ -683,7 +683,7 @@ public void testRefreshWithPreExpirationBufferIntrusion() throws Exception { KafkaFutureImpl waiter = waiters.get(i); assertTrue(waiter.isDone()); assertEquals((i + 1) * 1000 * (60 * refreshEveryMinutes - bufferIntrusionSeconds), - waiter.get().longValue() - startMs); + waiter.get() - startMs); } assertFalse(waiters.get(numExpectedRefreshes).isDone()); diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwksTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwksTest.java index cc19c74e66f70..7e60313bb3043 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwksTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwksTest.java @@ -236,7 +236,7 @@ public String getBody() { * A mock ScheduledExecutorService just for the test. Note that this is not a generally reusable mock as it does not * implement some interfaces like scheduleWithFixedDelay, etc. And it does not return ScheduledFuture correctly. */ - private class MockExecutorService implements MockTime.Listener { + private static class MockExecutorService implements MockTime.Listener { private final MockTime time; private final TreeMap>>> waiters = new TreeMap<>(); diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredJwsTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredJwsTest.java index af259c64ecaee..aa105685c2f9e 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredJwsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredJwsTest.java @@ -71,8 +71,8 @@ public void validCompactSerialization() { assertEquals(4, jws.claims().size()); assertEquals(subject, jws.claims().get("sub")); assertEquals(subject, jws.principalName()); - assertEquals(issuedAt, Number.class.cast(jws.claims().get("iat")).longValue()); - assertEquals(expirationTime, Number.class.cast(jws.claims().get("exp")).longValue()); + assertEquals(issuedAt, ((Number) jws.claims().get("iat")).longValue()); + assertEquals(expirationTime, ((Number) jws.claims().get("exp")).longValue()); assertEquals(expirationTime * 1000, jws.lifetimeMs()); assertEquals(scope, jws.claims().get("scope")); assertEquals(new HashSet<>(scope), jws.scope()); @@ -110,8 +110,8 @@ private static String compactSerialization(String subject, Long issuedAt, Long e String headerJson = "{\"alg\":\"" + algorithm + "\"}"; String encodedHeader = encoder.encodeToString(headerJson.getBytes(StandardCharsets.UTF_8)); String subjectJson = subject != null ? "\"sub\":\"" + subject + "\"" : null; - String issuedAtJson = issuedAt != null ? "\"iat\":" + issuedAt.longValue() : null; - String expirationTimeJson = expirationTime != null ? "\"exp\":" + expirationTime.longValue() : null; + String issuedAtJson = issuedAt != null ? "\"iat\":" + issuedAt : null; + String expirationTimeJson = expirationTime != null ? "\"exp\":" + expirationTime : null; String scopeJson = scope != null ? scopeJson(scope) : null; String claimsJson = claimsJson(subjectJson, issuedAtJson, expirationTimeJson, scopeJson); String encodedClaims = encoder.encodeToString(claimsJson.getBytes(StandardCharsets.UTF_8)); diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java index 443a1de4b51c2..e29b7c069c984 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java @@ -105,8 +105,8 @@ public void validOptionsWithExplicitOptionValues() options.put("unsecuredLoginListClaim_" + "emptyList1", ""); options.put("unsecuredLoginListClaim_" + "emptyList2", ","); options.put("unsecuredLoginNumberClaim_" + "number", "1"); - long lifetmeSeconds = 10000; - options.put("unsecuredLoginLifetimeSeconds", String.valueOf(lifetmeSeconds)); + long lifetimeSeconds = 10000; + options.put("unsecuredLoginLifetimeSeconds", String.valueOf(lifetimeSeconds)); options.put("unsecuredLoginPrincipalClaimName", principalClaimName); if (scopeClaimNameOptionValue != null) options.put("unsecuredLoginScopeClaimName", scopeClaimNameOptionValue); @@ -120,7 +120,7 @@ public void validOptionsWithExplicitOptionValues() OAuthBearerUnsecuredJws jws = (OAuthBearerUnsecuredJws) callback.token(); assertNotNull(jws, "create token failed"); long startMs = mockTime.milliseconds(); - confirmCorrectValues(jws, user, startMs, lifetmeSeconds * 1000); + confirmCorrectValues(jws, user, startMs, lifetimeSeconds * 1000); Map claims = jws.claims(); assertEquals(new HashSet<>(Arrays.asList(actualScopeClaimName, principalClaimName, "iat", "exp", "number", "list", "emptyList1", "emptyList2")), claims.keySet()); diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtilsTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtilsTest.java index 88241b72a97a2..ef8997a7bc7a9 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtilsTest.java @@ -44,7 +44,7 @@ public void validateClaimForExistenceAndType() throws OAuthBearerIllegalTokenExc appendCommaJsonText(sb, "sub", "principalName"); if (useErrorValue) appendCommaJsonText(sb, claimName, 1); - else if (exists != null && exists.booleanValue()) + else if (exists) appendCommaJsonText(sb, claimName, claimName); sb.append("}"); String compactSerialization = HEADER_COMPACT_SERIALIZATION + Base64.getUrlEncoder().withoutPadding() @@ -52,7 +52,7 @@ else if (exists != null && exists.booleanValue()) OAuthBearerUnsecuredJws testJwt = new OAuthBearerUnsecuredJws(compactSerialization, "sub", "scope"); OAuthBearerValidationResult result = OAuthBearerValidationUtils .validateClaimForExistenceAndType(testJwt, required, claimName, String.class); - if (useErrorValue || required && !exists.booleanValue()) + if (useErrorValue || required && !exists) assertTrue(isFailureWithMessageAndNoFailureScope(result)); else assertTrue(isSuccess(result)); diff --git a/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramMessagesTest.java b/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramMessagesTest.java index a286085714abd..066458a68910e 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramMessagesTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramMessagesTest.java @@ -280,7 +280,7 @@ public void invalidServerFinalMessage() { checkInvalidScramMessage(ServerFinalMessage.class, invalid); // Invalid server signature - invalid = String.format("v=1=23"); + invalid = "v=1=23"; checkInvalidScramMessage(ServerFinalMessage.class, invalid); // Invalid extensions diff --git a/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramSaslServerTest.java b/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramSaslServerTest.java index 7e83b76f6e131..4f1592b84fbf5 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramSaslServerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramSaslServerTest.java @@ -36,13 +36,12 @@ public class ScramSaslServerTest { private static final String USER_A = "userA"; private static final String USER_B = "userB"; - private ScramMechanism mechanism; private ScramFormatter formatter; private ScramSaslServer saslServer; @BeforeEach public void setUp() throws Exception { - mechanism = ScramMechanism.SCRAM_SHA_256; + ScramMechanism mechanism = ScramMechanism.SCRAM_SHA_256; formatter = new ScramFormatter(mechanism); CredentialCache.Cache credentialCache = new CredentialCache().createCache(mechanism.mechanismName(), ScramCredential.class); credentialCache.put(USER_A, formatter.generateCredential("passwordA", 4096)); diff --git a/clients/src/test/java/org/apache/kafka/common/security/ssl/CommonNameLoggingTrustManagerFactoryWrapperTest.java b/clients/src/test/java/org/apache/kafka/common/security/ssl/CommonNameLoggingTrustManagerFactoryWrapperTest.java index 8757dfee60b4f..129e383221e86 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/ssl/CommonNameLoggingTrustManagerFactoryWrapperTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/ssl/CommonNameLoggingTrustManagerFactoryWrapperTest.java @@ -171,7 +171,7 @@ void testNeverExpiringX509Certificate() throws Exception { assertDoesNotThrow(() -> wrappedCert.checkValidity(dateRecentPast)); } else { // Cert not valid yet - Exception origException = assertThrows(CertificateException.class, + assertThrows(CertificateException.class, () -> cert.checkValidity(dateRecentPast)); // The wrappend certificate class does not check dates at all assertDoesNotThrow(() -> wrappedCert.checkValidity(dateRecentPast)); diff --git a/clients/src/test/java/org/apache/kafka/common/security/ssl/SslPrincipalMapperTest.java b/clients/src/test/java/org/apache/kafka/common/security/ssl/SslPrincipalMapperTest.java index ff5a018d4bf9f..52e10fd36b99f 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/ssl/SslPrincipalMapperTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/ssl/SslPrincipalMapperTest.java @@ -19,7 +19,7 @@ import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; public class SslPrincipalMapperTest { @@ -56,11 +56,7 @@ public void testInvalidRules() { } private void testInvalidRule(String rules) { - try { - System.out.println(SslPrincipalMapper.fromRules(rules)); - fail("should have thrown IllegalArgumentException"); - } catch (IllegalArgumentException e) { - } + assertThrows(IllegalArgumentException.class, () -> SslPrincipalMapper.fromRules(rules)); } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/security/ssl/mock/TestKeyManagerFactory.java b/clients/src/test/java/org/apache/kafka/common/security/ssl/mock/TestKeyManagerFactory.java index 7c9c0dc094c61..596b4e9e5aca7 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/ssl/mock/TestKeyManagerFactory.java +++ b/clients/src/test/java/org/apache/kafka/common/security/ssl/mock/TestKeyManagerFactory.java @@ -61,8 +61,8 @@ public static class TestKeyManager extends X509ExtendedKeyManager { public static final String ALIAS = "TestAlias"; private static final String CN = "localhost"; private static final String SIGNATURE_ALGORITHM = "RSA"; - private KeyPair keyPair; - private X509Certificate certificate; + private final KeyPair keyPair; + private final X509Certificate certificate; protected TestKeyManager() { try { diff --git a/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java b/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java index 15c95a126c349..62ea29b7be348 100644 --- a/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java +++ b/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java @@ -26,6 +26,7 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -62,7 +63,7 @@ public class SerializationTest { } }; - private class DummyClass { + private static class DummyClass { } @SuppressWarnings("unchecked") @@ -147,7 +148,7 @@ public void stringSerdeConfigureThrowsOnUnknownEncoding() { @SuppressWarnings("unchecked") @Test public void listSerdeShouldReturnEmptyCollection() { - List testData = Arrays.asList(); + List testData = Collections.emptyList(); Serde> listSerde = Serdes.ListSerde(ArrayList.class, Serdes.Integer()); assertEquals(testData, listSerde.deserializer().deserialize(topic, listSerde.serializer().serialize(topic, testData)), diff --git a/clients/src/test/java/org/apache/kafka/common/utils/AbstractIteratorTest.java b/clients/src/test/java/org/apache/kafka/common/utils/AbstractIteratorTest.java index 5f3ca4ddf0d17..466d2362f32e8 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/AbstractIteratorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/AbstractIteratorTest.java @@ -54,7 +54,7 @@ public void testEmptyIterator() { } static class ListIterator extends AbstractIterator { - private List list; + private final List list; private int position = 0; public ListIterator(List l) { diff --git a/clients/src/test/java/org/apache/kafka/common/utils/ConfigUtilsTest.java b/clients/src/test/java/org/apache/kafka/common/utils/ConfigUtilsTest.java index 540229bd65597..7716583e1a335 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/ConfigUtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/ConfigUtilsTest.java @@ -27,6 +27,7 @@ import java.util.Map; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; @@ -161,10 +162,10 @@ public void testConfigMapToRedactedStringForEmptyMap() { public void testConfigMapToRedactedStringWithSecrets() { Map testMap1 = new HashMap<>(); testMap1.put("myString", "whatever"); - testMap1.put("myInt", Integer.valueOf(123)); + testMap1.put("myInt", 123); testMap1.put("myPassword", "foosecret"); testMap1.put("myString2", null); - testMap1.put("myUnknown", Integer.valueOf(456)); + testMap1.put("myUnknown", 456); assertEquals("{myInt=123, myPassword=(redacted), myString=\"whatever\", myString2=null, myUnknown=(redacted)}", ConfigUtils.configMapToRedactedString(testMap1, CONFIG)); } @@ -172,7 +173,7 @@ public void testConfigMapToRedactedStringWithSecrets() { @Test public void testGetBoolean() { String key = "test.key"; - Boolean defaultValue = true; + boolean defaultValue = true; Map config = new HashMap<>(); config.put("some.other.key", false); @@ -180,15 +181,15 @@ public void testGetBoolean() { config = new HashMap<>(); config.put(key, false); - assertEquals(false, ConfigUtils.getBoolean(config, key, defaultValue)); + assertFalse(ConfigUtils.getBoolean(config, key, defaultValue)); config = new HashMap<>(); config.put(key, "false"); - assertEquals(false, ConfigUtils.getBoolean(config, key, defaultValue)); + assertFalse(ConfigUtils.getBoolean(config, key, defaultValue)); config = new HashMap<>(); config.put(key, "not-a-boolean"); - assertEquals(false, ConfigUtils.getBoolean(config, key, defaultValue)); + assertFalse(ConfigUtils.getBoolean(config, key, defaultValue)); config = new HashMap<>(); config.put(key, 5); diff --git a/clients/src/test/java/org/apache/kafka/common/utils/Crc32CTest.java b/clients/src/test/java/org/apache/kafka/common/utils/Crc32CTest.java index c05df71372fdb..2c6d148e3a1e2 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/Crc32CTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/Crc32CTest.java @@ -34,8 +34,8 @@ public void testUpdate() { Checksum crc3 = Crc32C.create(); crc1.update(bytes, 0, len); - for (int i = 0; i < len; i++) - crc2.update(bytes[i]); + for (byte b : bytes) + crc2.update(b); crc3.update(bytes, 0, len / 2); crc3.update(bytes, len / 2, len - len / 2); diff --git a/clients/src/test/java/org/apache/kafka/common/utils/ImplicitLinkedHashMultiCollectionTest.java b/clients/src/test/java/org/apache/kafka/common/utils/ImplicitLinkedHashMultiCollectionTest.java index 2d3c4b519a1f1..da75d40f30ca5 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/ImplicitLinkedHashMultiCollectionTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/ImplicitLinkedHashMultiCollectionTest.java @@ -114,11 +114,9 @@ public void testEnlargement() { new TestElement(101), new TestElement(105) }; - for (int i = 0; i < testElements.length; i++) { - assertTrue(multiSet.add(testElements[i])); - } - for (int i = 0; i < testElements.length; i++) { - assertFalse(multiSet.add(testElements[i])); + for (TestElement testElement : testElements) { + assertTrue(multiSet.add(testElement)); + assertFalse(multiSet.add(testElement)); } assertEquals(23, multiSet.numSlots()); assertEquals(testElements.length, multiSet.size()); diff --git a/clients/src/test/java/org/apache/kafka/common/utils/MockScheduler.java b/clients/src/test/java/org/apache/kafka/common/utils/MockScheduler.java index 98023f8a4e074..97560c3983d3f 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/MockScheduler.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/MockScheduler.java @@ -74,11 +74,7 @@ public synchronized void addWaiter(long delayMs, KafkaFutureImpl waiter) { waiter.complete(timeMs); } else { long triggerTimeMs = timeMs + delayMs; - List> futures = waiters.get(triggerTimeMs); - if (futures == null) { - futures = new ArrayList<>(); - waiters.put(triggerTimeMs, futures); - } + List> futures = waiters.computeIfAbsent(triggerTimeMs, k -> new ArrayList<>()); futures.add(waiter); } } diff --git a/clients/src/test/java/org/apache/kafka/common/utils/SanitizerTest.java b/clients/src/test/java/org/apache/kafka/common/utils/SanitizerTest.java index 3024bd3583025..e78263a32e7e4 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/SanitizerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/SanitizerTest.java @@ -74,7 +74,7 @@ public interface TestStatMBean { int getValue(); } - public class TestStat implements TestStatMBean { + public static class TestStat implements TestStatMBean { public int getValue() { return 1; } diff --git a/clients/src/test/java/org/apache/kafka/common/utils/SecurityUtilsTest.java b/clients/src/test/java/org/apache/kafka/common/utils/SecurityUtilsTest.java index e651092bc44ed..a26fa4d6e8871 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/SecurityUtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/SecurityUtilsTest.java @@ -34,11 +34,11 @@ public class SecurityUtilsTest { - private SecurityProviderCreator testScramSaslServerProviderCreator = new TestScramSaslServerProviderCreator(); - private SecurityProviderCreator testPlainSaslServerProviderCreator = new TestPlainSaslServerProviderCreator(); + private final SecurityProviderCreator testScramSaslServerProviderCreator = new TestScramSaslServerProviderCreator(); + private final SecurityProviderCreator testPlainSaslServerProviderCreator = new TestPlainSaslServerProviderCreator(); - private Provider testScramSaslServerProvider = testScramSaslServerProviderCreator.getProvider(); - private Provider testPlainSaslServerProvider = testPlainSaslServerProviderCreator.getProvider(); + private final Provider testScramSaslServerProvider = testScramSaslServerProviderCreator.getProvider(); + private final Provider testPlainSaslServerProvider = testPlainSaslServerProviderCreator.getProvider(); private void clearTestProviders() { Security.removeProvider(testScramSaslServerProvider.getName()); diff --git a/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java b/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java index 467dd53ac76d0..89f050f68ca50 100755 --- a/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java @@ -1008,7 +1008,7 @@ else if (failure == null) }; } - private class TestException extends Exception { + private static class TestException extends Exception { final String key; TestException(String key) { this.key = key; diff --git a/clients/src/test/java/org/apache/kafka/test/MetricsBench.java b/clients/src/test/java/org/apache/kafka/test/MetricsBench.java index 4063d161f96a1..befd478da2466 100644 --- a/clients/src/test/java/org/apache/kafka/test/MetricsBench.java +++ b/clients/src/test/java/org/apache/kafka/test/MetricsBench.java @@ -50,7 +50,7 @@ public static void main(String[] args) { for (int i = 0; i < iters; i++) parent.record(i); double elapsed = (System.nanoTime() - start) / (double) iters; - System.out.println(String.format("%.2f ns per metric recording.", elapsed)); + System.out.printf("%.2f ns per metric recording.%n", elapsed); } finally { metrics.close(); } diff --git a/clients/src/test/java/org/apache/kafka/test/Microbenchmarks.java b/clients/src/test/java/org/apache/kafka/test/Microbenchmarks.java index 4966694461226..ce31ef10e3039 100644 --- a/clients/src/test/java/org/apache/kafka/test/Microbenchmarks.java +++ b/clients/src/test/java/org/apache/kafka/test/Microbenchmarks.java @@ -78,34 +78,30 @@ public static void main(String[] args) throws Exception { final Time time = Time.SYSTEM; final AtomicBoolean done = new AtomicBoolean(false); final Object lock = new Object(); - Thread t1 = new Thread() { - public void run() { - time.sleep(1); - int counter = 0; - long start = time.nanoseconds(); - for (int i = 0; i < iters; i++) { - synchronized (lock) { - counter++; - } + Thread t1 = new Thread(() -> { + time.sleep(1); + int counter = 0; + long start1 = time.nanoseconds(); + for (int i = 0; i < iters; i++) { + synchronized (lock) { + counter++; } - System.out.println("synchronized: " + ((time.nanoseconds() - start) / iters)); - System.out.println(counter); - done.set(true); } - }; - - Thread t2 = new Thread() { - public void run() { - int counter = 0; - while (!done.get()) { - time.sleep(1); - synchronized (lock) { - counter += 1; - } + System.out.println("synchronized: " + ((time.nanoseconds() - start1) / iters)); + System.out.println(counter); + done.set(true); + }); + + Thread t2 = new Thread(() -> { + int counter = 0; + while (!done.get()) { + time.sleep(1); + synchronized (lock) { + counter += 1; } - System.out.println("Counter: " + counter); } - }; + System.out.println("Counter: " + counter); + }); t1.start(); t2.start(); @@ -115,34 +111,30 @@ public void run() { System.out.println("Testing locks"); done.set(false); final ReentrantLock lock2 = new ReentrantLock(); - Thread t3 = new Thread() { - public void run() { - time.sleep(1); - int counter = 0; - long start = time.nanoseconds(); - for (int i = 0; i < iters; i++) { - lock2.lock(); - counter++; - lock2.unlock(); - } - System.out.println("lock: " + ((time.nanoseconds() - start) / iters)); - System.out.println(counter); - done.set(true); + Thread t3 = new Thread(() -> { + time.sleep(1); + int counter = 0; + long start12 = time.nanoseconds(); + for (int i = 0; i < iters; i++) { + lock2.lock(); + counter++; + lock2.unlock(); } - }; - - Thread t4 = new Thread() { - public void run() { - int counter = 0; - while (!done.get()) { - time.sleep(1); - lock2.lock(); - counter++; - lock2.unlock(); - } - System.out.println("Counter: " + counter); + System.out.println("lock: " + ((time.nanoseconds() - start12) / iters)); + System.out.println(counter); + done.set(true); + }); + + Thread t4 = new Thread(() -> { + int counter = 0; + while (!done.get()) { + time.sleep(1); + lock2.lock(); + counter++; + lock2.unlock(); } - }; + System.out.println("Counter: " + counter); + }); t3.start(); t4.start(); @@ -164,14 +156,12 @@ private static void benchMap(int numThreads, final int iters, final Map keys = new ArrayList<>(map.keySet()); final List threads = new ArrayList<>(); for (int i = 0; i < numThreads; i++) { - threads.add(new Thread() { - public void run() { - long start = System.nanoTime(); - for (int j = 0; j < iters; j++) - map.get(keys.get(j % threads.size())); - System.out.println("Map access time: " + ((System.nanoTime() - start) / (double) iters)); - } - }); + threads.add(new Thread(() -> { + long start = System.nanoTime(); + for (int j = 0; j < iters; j++) + map.get(keys.get(j % threads.size())); + System.out.println("Map access time: " + ((System.nanoTime() - start) / (double) iters)); + })); } for (Thread thread : threads) thread.start();