From b9499c2349272c22106e9cc47d11ba8351621761 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Fri, 24 May 2024 14:11:29 -0500 Subject: [PATCH 01/22] Changed toString() override to toStringBase() Changed toString() override to toStringBase() and implemented some fields in toStringBase() --- .../consumer/internals/CommitRequestManager.java | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java index 577cf7dee6b76..1f487877ca1de 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java @@ -1090,14 +1090,10 @@ private void chainFuture( } @Override - public String toString() { - return "OffsetFetchRequestState{" + - "requestedPartitions=" + requestedPartitions + - ", memberId=" + memberInfo.memberId.orElse("undefined") + - ", memberEpoch=" + (memberInfo.memberEpoch.isPresent() ? memberInfo.memberEpoch.get() : "undefined") + - ", future=" + future + - ", " + toStringBase() + - '}'; + public String toStringBase() { + return super.toStringBase() + + ", requestedPartitions=" + requestedPartitions + + ", future=" + future; } } From 58f1ca4eaa87ba2969a0f435d0ced4cccc64d311 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Fri, 24 May 2024 14:13:41 -0500 Subject: [PATCH 02/22] Added test for toStringBase() Added test for OffsetFetchRequestState.toStringBase(). --- .../internals/CommitRequestManagerTest.java | 297 ++++++++++-------- 1 file changed, 165 insertions(+), 132 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index b1db0297a120b..96d50b69f4afd 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -106,6 +106,9 @@ public class CommitRequestManagerTest { private OffsetCommitCallbackInvoker offsetCommitCallbackInvoker; private final Metrics metrics = new Metrics(); private Properties props; + private CommitRequestManager.OffsetFetchRequestState offsetFetchRequestState; + private RequestState requestState; + private CommitRequestManager commitRequestManager; private final int defaultApiTimeoutMs = 60000; @@ -123,6 +126,36 @@ public void setup() { this.props.put(VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); } + @Test + public void testOffsetFetchRequestStateToStringBase() { + ConsumerConfig config = mock(ConsumerConfig.class); + this.commitRequestManager = new CommitRequestManager( + time, + logContext, + subscriptionState, + config, + coordinatorRequestManager, + offsetCommitCallbackInvoker, + "groupId", + Optional.of("groupInstanceId"), + metrics); + this.offsetFetchRequestState = commitRequestManager.new OffsetFetchRequestState( + mock(Set.class), + 10, 100, 1000, + mock(CommitRequestManager.MemberInfo.class) + ); + this.requestState = new RequestState( + logContext, + "CommitRequestManager", + 10, + 100); + + String target = requestState.toStringBase() + + ", requestedPartitions=" + offsetFetchRequestState.requestedPartitions + + ", future=" + offsetFetchRequestState.future(); + assertEquals(target, offsetFetchRequestState.toStringBase()); + } + @Test public void testPollSkipIfCoordinatorUnknown() { CommitRequestManager commitRequestManager = create(false, 0); @@ -227,8 +260,8 @@ public void testPollEnsureEmptyPendingRequestAfterPoll() { CommitRequestManager commitRequestManager = create(true, 100); when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap( - new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new TopicPartition("topic", 1), + new OffsetAndMetadata(0)); commitRequestManager.commitAsync(offsets); assertEquals(1, commitRequestManager.unsentOffsetCommitRequests().size()); assertEquals(1, commitRequestManager.poll(time.milliseconds()).unsentRequests.size()); @@ -250,10 +283,10 @@ public void testAsyncAutocommitNotRetriedAfterException() { List futures = assertPoll(1, commitRequestManager); // Complete the autocommit request exceptionally. It should fail right away, without retry. futures.get(0).onComplete(mockOffsetCommitResponse( - "topic", - 1, - (short) 1, - Errors.COORDINATOR_LOAD_IN_PROGRESS)); + "topic", + 1, + (short) 1, + Errors.COORDINATOR_LOAD_IN_PROGRESS)); // When polling again before the auto-commit interval no request should be generated // (making sure we wait for the backoff, to check that the failed request is not being @@ -285,8 +318,8 @@ public void testCommitSyncRetriedAfterExpectedRetriableException(Errors error) { when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap( - new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new TopicPartition("topic", 1), + new OffsetAndMetadata(0)); long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; CompletableFuture commitResult = commitRequestManager.commitSync(offsets, expirationTimeMs); sendAndVerifyOffsetCommitRequestFailedAndMaybeRetried(commitRequestManager, error, commitResult); @@ -303,8 +336,8 @@ public void testCommitSyncFailsWithExpectedException(Errors commitError, when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap( - new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new TopicPartition("topic", 1), + new OffsetAndMetadata(0)); // Send sync offset commit that fails and verify it propagates the expected exception. long expirationTimeMs = time.milliseconds() + retryBackoffMs; @@ -315,13 +348,13 @@ public void testCommitSyncFailsWithExpectedException(Errors commitError, private static Stream commitSyncExpectedExceptions() { return Stream.of( - Arguments.of(Errors.FENCED_INSTANCE_ID, CommitFailedException.class), - Arguments.of(Errors.UNKNOWN_MEMBER_ID, CommitFailedException.class), - Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE, Errors.OFFSET_METADATA_TOO_LARGE.exception().getClass()), - Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE, Errors.INVALID_COMMIT_OFFSET_SIZE.exception().getClass()), - Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED, Errors.GROUP_AUTHORIZATION_FAILED.exception().getClass()), - Arguments.of(Errors.CORRUPT_MESSAGE, KafkaException.class), - Arguments.of(Errors.UNKNOWN_SERVER_ERROR, KafkaException.class)); + Arguments.of(Errors.FENCED_INSTANCE_ID, CommitFailedException.class), + Arguments.of(Errors.UNKNOWN_MEMBER_ID, CommitFailedException.class), + Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE, Errors.OFFSET_METADATA_TOO_LARGE.exception().getClass()), + Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE, Errors.INVALID_COMMIT_OFFSET_SIZE.exception().getClass()), + Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED, Errors.GROUP_AUTHORIZATION_FAILED.exception().getClass()), + Arguments.of(Errors.CORRUPT_MESSAGE, KafkaException.class), + Arguments.of(Errors.UNKNOWN_SERVER_ERROR, KafkaException.class)); } @Test @@ -330,8 +363,8 @@ public void testCommitSyncFailsWithCommitFailedExceptionIfUnknownMemberId() { when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap( - new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new TopicPartition("topic", 1), + new OffsetAndMetadata(0)); long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; CompletableFuture commitResult = commitRequestManager.commitSync(offsets, expirationTimeMs); @@ -348,12 +381,12 @@ public void testCommitSyncFailsWithCommitFailedExceptionOnStaleMemberEpoch() { when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap( - new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new TopicPartition("topic", 1), + new OffsetAndMetadata(0)); // Send commit request expected to be retried on retriable errors CompletableFuture commitResult = commitRequestManager.commitSync( - offsets, time.milliseconds() + defaultApiTimeoutMs); + offsets, time.milliseconds() + defaultApiTimeoutMs); completeOffsetCommitRequestWithError(commitRequestManager, Errors.STALE_MEMBER_EPOCH); NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); assertEquals(0, res.unsentRequests.size()); @@ -387,7 +420,7 @@ public void testAutoCommitAsyncFailsWithStaleMemberEpochContinuesToCommitOnTheIn // Async commit retried, only when the interval expires NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); assertEquals(0, res.unsentRequests.size(), "No request should be generated until the " + - "interval expires"); + "interval expires"); time.sleep(100); commitRequestManager.updateAutoCommitTimer(time.milliseconds()); res = commitRequestManager.poll(time.milliseconds()); @@ -401,8 +434,8 @@ public void testCommitAsyncFailsWithRetriableOnCoordinatorDisconnected() { when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap( - new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new TopicPartition("topic", 1), + new OffsetAndMetadata(0)); // Async commit that won't be retried. CompletableFuture commitResult = commitRequestManager.commitAsync(offsets); @@ -456,18 +489,18 @@ public void testAutoCommitBeforeRevocationNotBlockedByAutoCommitOnIntervalInflig NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); assertEquals(1, res.unsentRequests.size()); NetworkClientDelegate.FutureCompletionHandler autoCommitOnInterval = - res.unsentRequests.get(0).handler(); + res.unsentRequests.get(0).handler(); // Another auto-commit request should be sent if a revocation happens, even if an // auto-commit on the interval is in-flight. CompletableFuture autoCommitBeforeRevocation = - commitRequestManager.maybeAutoCommitSyncBeforeRevocation(200); + commitRequestManager.maybeAutoCommitSyncBeforeRevocation(200); assertEquals(1, commitRequestManager.pendingRequests.unsentOffsetCommits.size()); // Receive response for initial auto-commit on interval autoCommitOnInterval.onComplete(buildOffsetCommitClientResponse(new OffsetCommitResponse(0, new HashMap<>()))); assertFalse(autoCommitBeforeRevocation.isDone(), "Auto-commit before revocation should " + - "not complete until it receives a response"); + "not complete until it receives a response"); } @Test @@ -484,7 +517,7 @@ public void testAutocommitInterceptorsInvoked() { // complete the unsent request to trigger interceptor futures.get(0).onComplete(buildOffsetCommitClientResponse(new OffsetCommitResponse(0, new HashMap<>()))); verify(offsetCommitCallbackInvoker).enqueueInterceptorInvocation( - eq(Collections.singletonMap(t1p, new OffsetAndMetadata(100L))) + eq(Collections.singletonMap(t1p, new OffsetAndMetadata(100L))) ); } @@ -501,7 +534,7 @@ public void testAutocommitInterceptorsNotInvokedOnError() { // complete the unsent request to trigger interceptor futures.get(0).onComplete(buildOffsetCommitClientResponse( - new OffsetCommitResponse(0, Collections.singletonMap(t1p, Errors.NETWORK_EXCEPTION))) + new OffsetCommitResponse(0, Collections.singletonMap(t1p, Errors.NETWORK_EXCEPTION))) ); Mockito.verify(offsetCommitCallbackInvoker, never()).enqueueInterceptorInvocation(any()); } @@ -568,7 +601,7 @@ public void testAutoCommitOnIntervalSkippedIfPreviousOneInFlight() { // When a response for the inflight is received, a next auto-commit should be sent when // polling the manager. inflightCommitResult.onComplete( - mockOffsetCommitResponse(t1p.topic(), t1p.partition(), (short) 1, Errors.NONE)); + mockOffsetCommitResponse(t1p.topic(), t1p.partition(), (short) 1, Errors.NONE)); assertPoll(1, commitRequestManager); } @@ -601,10 +634,10 @@ public void testOffsetFetchRequestErroredRequests(final Errors error, final bool Set partitions = new HashSet<>(); partitions.add(new TopicPartition("t1", 0)); List>> futures = sendAndVerifyDuplicatedOffsetFetchRequests( - commitRequestManager, - partitions, - 1, - error); + commitRequestManager, + partitions, + 1, + error); // we only want to make sure to purge the outbound buffer for non-retriables, so retriable will be re-queued. if (isRetriable) testRetriable(commitRequestManager, futures); @@ -621,8 +654,8 @@ public void testSuccessfulOffsetFetch() { long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; CompletableFuture> fetchResult = - commitManager.fetchOffsets(Collections.singleton(new TopicPartition("test", 0)), - expirationTimeMs); + commitManager.fetchOffsets(Collections.singleton(new TopicPartition("test", 0)), + expirationTimeMs); // Send fetch request NetworkClientDelegate.PollResult result = commitManager.poll(time.milliseconds()); @@ -635,9 +668,9 @@ public void testSuccessfulOffsetFetch() { long expectedOffset = 100; NetworkClientDelegate.UnsentRequest req = result.unsentRequests.get(0); Map topicPartitionData = - Collections.singletonMap( - tp, - new OffsetFetchResponse.PartitionData(expectedOffset, Optional.of(1), "", Errors.NONE)); + Collections.singletonMap( + tp, + new OffsetFetchResponse.PartitionData(expectedOffset, Optional.of(1), "", Errors.NONE)); req.handler().onComplete(buildOffsetFetchClientResponse(req, topicPartitionData, Errors.NONE, false)); // Validate request future completes with the response received @@ -654,7 +687,7 @@ public void testSuccessfulOffsetFetch() { assertTrue(offsetsAndMetadata.containsKey(tp)); assertEquals(expectedOffset, offsetsAndMetadata.get(tp).offset()); assertEquals(0, commitManager.pendingRequests.inflightOffsetFetches.size(), "Inflight " + - "request should be removed from the queue when a response is received."); + "request should be removed from the queue when a response is received."); } @ParameterizedTest @@ -721,7 +754,7 @@ public void testOffsetCommitRequestErroredRequestsNotRetriedForAsyncCommit(final when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap(new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new OffsetAndMetadata(0)); // Send async commit (not expected to be retried). CompletableFuture commitResult = commitRequestManager.commitAsync(offsets); @@ -744,8 +777,8 @@ public void testOffsetCommitSyncTimeoutNotReturnedOnPollAndFails() { when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap( - new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new TopicPartition("topic", 1), + new OffsetAndMetadata(0)); // Send sync offset commit request that fails with retriable error. long expirationTimeMs = time.milliseconds() + retryBackoffMs * 2; @@ -776,8 +809,8 @@ public void testOffsetCommitSyncFailedWithRetriableThrowsTimeoutWhenRetryTimeExp when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap( - new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new TopicPartition("topic", 1), + new OffsetAndMetadata(0)); // Send offset commit request that fails with retriable error. long expirationTimeMs = time.milliseconds() + retryBackoffMs * 2; @@ -803,7 +836,7 @@ public void testOffsetCommitAsyncFailedWithRetriableThrowsRetriableCommitExcepti when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap(new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new OffsetAndMetadata(0)); // Send async commit request that fails with retriable error (not expected to be retried). Errors retriableError = Errors.COORDINATOR_NOT_AVAILABLE; @@ -827,7 +860,7 @@ public void testEnsureBackoffRetryOnOffsetCommitRequestTimeout() { when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap(new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new OffsetAndMetadata(0)); long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; commitRequestManager.commitSync(offsets, expirationTimeMs); @@ -952,7 +985,7 @@ public void testSyncOffsetFetchFailsWithStaleEpochAndNotRetriedIfMemberNotInGrou // Send request that is expected to fail with invalid epoch. long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; CompletableFuture> requestResult = - commitRequestManager.fetchOffsets(partitions, expirationTimeMs); + commitRequestManager.fetchOffsets(partitions, expirationTimeMs); // Mock member not having a valid epoch anymore (left/failed/fenced). commitRequestManager.onMemberEpochUpdated(Optional.empty(), Optional.empty()); @@ -999,7 +1032,7 @@ public void testAutoCommitSyncBeforeRevocationRetriesOnRetriableAndStaleEpoch(Er if ((error.exception() instanceof RetriableException || error == Errors.STALE_MEMBER_EPOCH) && error != Errors.UNKNOWN_TOPIC_OR_PARTITION) { assertEquals(1, commitRequestManager.pendingRequests.unsentOffsetCommits.size(), - "Request to be retried should be added to the outbound queue"); + "Request to be retried should be added to the outbound queue"); // Request should be retried with backoff NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); @@ -1015,7 +1048,7 @@ public void testAutoCommitSyncBeforeRevocationRetriesOnRetriableAndStaleEpoch(Er } } else { assertEquals(0, commitRequestManager.pendingRequests.unsentOffsetCommits.size(), - "Non-retriable failed request should be removed from the outbound queue"); + "Non-retriable failed request should be removed from the outbound queue"); // Request should not be retried, even after the backoff expires NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); @@ -1098,40 +1131,40 @@ private void testNonRetriable(final List offsetCommitExceptionSupplier() { return Stream.of( - Arguments.of(Errors.NOT_COORDINATOR), - Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS), - Arguments.of(Errors.UNKNOWN_SERVER_ERROR), - Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED), - Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE), - Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE), - Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION), - Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE), - Arguments.of(Errors.REQUEST_TIMED_OUT), - Arguments.of(Errors.FENCED_INSTANCE_ID), - Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED), - Arguments.of(Errors.STALE_MEMBER_EPOCH), - Arguments.of(Errors.UNKNOWN_MEMBER_ID)); + Arguments.of(Errors.NOT_COORDINATOR), + Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS), + Arguments.of(Errors.UNKNOWN_SERVER_ERROR), + Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED), + Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE), + Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE), + Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION), + Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE), + Arguments.of(Errors.REQUEST_TIMED_OUT), + Arguments.of(Errors.FENCED_INSTANCE_ID), + Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED), + Arguments.of(Errors.STALE_MEMBER_EPOCH), + Arguments.of(Errors.UNKNOWN_MEMBER_ID)); } // Supplies (error, isRetriable) private static Stream offsetFetchExceptionSupplier() { // fetchCommit is only retrying on a subset of RetriableErrors return Stream.of( - Arguments.of(Errors.NOT_COORDINATOR, true), - Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS, true), - Arguments.of(Errors.UNKNOWN_SERVER_ERROR, false), - Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED, false), - Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE, false), - Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE, false), - Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION, false), - Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE, true), - Arguments.of(Errors.REQUEST_TIMED_OUT, false), - Arguments.of(Errors.FENCED_INSTANCE_ID, false), - Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED, false), - Arguments.of(Errors.UNKNOWN_MEMBER_ID, false), - // Adding STALE_MEMBER_EPOCH as non-retriable here because it is only retried if a new - // member epoch is received. Tested separately. - Arguments.of(Errors.STALE_MEMBER_EPOCH, false)); + Arguments.of(Errors.NOT_COORDINATOR, true), + Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS, true), + Arguments.of(Errors.UNKNOWN_SERVER_ERROR, false), + Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED, false), + Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE, false), + Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE, false), + Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION, false), + Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE, true), + Arguments.of(Errors.REQUEST_TIMED_OUT, false), + Arguments.of(Errors.FENCED_INSTANCE_ID, false), + Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED, false), + Arguments.of(Errors.UNKNOWN_MEMBER_ID, false), + // Adding STALE_MEMBER_EPOCH as non-retriable here because it is only retried if a new + // member epoch is received. Tested separately. + Arguments.of(Errors.STALE_MEMBER_EPOCH, false)); } /** @@ -1140,9 +1173,9 @@ private static Stream offsetFetchExceptionSupplier() { */ private static Stream offsetFetchRetriableCoordinatorErrors() { return Stream.of( - Arguments.of(Errors.NOT_COORDINATOR, true), - Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE, true), - Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS, false)); + Arguments.of(Errors.NOT_COORDINATOR, true), + Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE, true), + Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS, false)); } @ParameterizedTest @@ -1184,7 +1217,7 @@ public void testSignalClose() { when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap(new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new OffsetAndMetadata(0)); commitRequestManager.commitAsync(offsets); commitRequestManager.signalClose(); @@ -1203,10 +1236,10 @@ private static void assertEmptyPendingRequests(CommitRequestManager commitReques // Supplies (error, isRetriable) private static Stream partitionDataErrorSupplier() { return Stream.of( - Arguments.of(Errors.UNSTABLE_OFFSET_COMMIT, true), - Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION, false), - Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED, false), - Arguments.of(Errors.UNKNOWN_SERVER_ERROR, false)); + Arguments.of(Errors.UNSTABLE_OFFSET_COMMIT, true), + Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION, false), + Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED, false), + Arguments.of(Errors.UNKNOWN_SERVER_ERROR, false)); } private List>> sendAndVerifyDuplicatedOffsetFetchRequests( @@ -1223,16 +1256,16 @@ private List>> sendAndV NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); assertEquals(1, res.unsentRequests.size()); res.unsentRequests.get(0).handler().onComplete(buildOffsetFetchClientResponse(res.unsentRequests.get(0), - partitions, error)); + partitions, error)); res = commitRequestManager.poll(time.milliseconds()); assertEquals(0, res.unsentRequests.size()); return futures; } private void sendAndVerifyOffsetCommitRequestFailedAndMaybeRetried( - final CommitRequestManager commitRequestManager, - final Errors error, - final CompletableFuture commitResult) { + final CommitRequestManager commitRequestManager, + final Errors error, + final CompletableFuture commitResult) { completeOffsetCommitRequestWithError(commitRequestManager, error); NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); assertEquals(0, res.unsentRequests.size()); @@ -1247,15 +1280,15 @@ private void sendAndVerifyOffsetCommitRequestFailedAndMaybeRetried( } private List assertPoll( - final int numRes, - final CommitRequestManager manager) { + final int numRes, + final CommitRequestManager manager) { return assertPoll(true, numRes, manager); } private List assertPoll( - final boolean coordinatorDiscovered, - final int numRes, - final CommitRequestManager manager) { + final boolean coordinatorDiscovered, + final int numRes, + final CommitRequestManager manager) { if (coordinatorDiscovered) { when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); } else { @@ -1303,22 +1336,22 @@ private ClientResponse buildOffsetFetchClientResponse( } private ClientResponse buildOffsetFetchClientResponseDisconnected( - final NetworkClientDelegate.UnsentRequest request) { + final NetworkClientDelegate.UnsentRequest request) { return buildOffsetFetchClientResponse(request, Collections.emptyMap(), Errors.NONE, true); } private ClientResponse buildOffsetCommitClientResponse(final OffsetCommitResponse commitResponse) { short apiVersion = 1; return new ClientResponse( - new RequestHeader(ApiKeys.OFFSET_COMMIT, apiVersion, "", 1), - null, - "-1", - time.milliseconds(), - time.milliseconds(), - false, - null, - null, - commitResponse + new RequestHeader(ApiKeys.OFFSET_COMMIT, apiVersion, "", 1), + null, + "-1", + time.milliseconds(), + time.milliseconds(), + false, + null, + null, + commitResponse ); } @@ -1336,17 +1369,17 @@ public ClientResponse mockOffsetCommitResponse(String topic, long receivedTimeMs, Errors error) { OffsetCommitResponseData responseData = new OffsetCommitResponseData() - .setTopics(Arrays.asList( - new OffsetCommitResponseData.OffsetCommitResponseTopic() - .setName(topic) - .setPartitions(Collections.singletonList( - new OffsetCommitResponseData.OffsetCommitResponsePartition() - .setErrorCode(error.code()) - .setPartitionIndex(partition))))); + .setTopics(Arrays.asList( + new OffsetCommitResponseData.OffsetCommitResponseTopic() + .setName(topic) + .setPartitions(Collections.singletonList( + new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setErrorCode(error.code()) + .setPartitionIndex(partition))))); OffsetCommitResponse response = mock(OffsetCommitResponse.class); when(response.data()).thenReturn(responseData); return new ClientResponse( - new RequestHeader(ApiKeys.OFFSET_COMMIT, apiKeyVersion, "", 1), + new RequestHeader(ApiKeys.OFFSET_COMMIT, apiKeyVersion, "", 1), null, "-1", createdTimeMs, @@ -1362,25 +1395,25 @@ public ClientResponse mockOffsetCommitResponseDisconnected(String topic, int par short apiKeyVersion, NetworkClientDelegate.UnsentRequest unsentRequest) { OffsetCommitResponseData responseData = new OffsetCommitResponseData() - .setTopics(Arrays.asList( - new OffsetCommitResponseData.OffsetCommitResponseTopic() - .setName(topic) - .setPartitions(Collections.singletonList( - new OffsetCommitResponseData.OffsetCommitResponsePartition() - .setErrorCode(Errors.NONE.code()) - .setPartitionIndex(partition))))); + .setTopics(Arrays.asList( + new OffsetCommitResponseData.OffsetCommitResponseTopic() + .setName(topic) + .setPartitions(Collections.singletonList( + new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setErrorCode(Errors.NONE.code()) + .setPartitionIndex(partition))))); OffsetCommitResponse response = mock(OffsetCommitResponse.class); when(response.data()).thenReturn(responseData); return new ClientResponse( - new RequestHeader(ApiKeys.OFFSET_COMMIT, apiKeyVersion, "", 1), - unsentRequest.handler(), - "-1", - time.milliseconds(), - time.milliseconds(), - true, - null, - null, - new OffsetCommitResponse(responseData) + new RequestHeader(ApiKeys.OFFSET_COMMIT, apiKeyVersion, "", 1), + unsentRequest.handler(), + "-1", + time.milliseconds(), + time.milliseconds(), + true, + null, + null, + new OffsetCommitResponse(responseData) ); } @@ -1409,7 +1442,7 @@ private ClientResponse buildOffsetFetchClientResponse( private KafkaMetric getMetric(String name) { return metrics.metrics().get(metrics.metricName( - name, - CONSUMER_COORDINATOR_METRICS)); + name, + CONSUMER_COORDINATOR_METRICS)); } } From 7a5f17237d6e7378bb055057b9b4159cba0e90e0 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Fri, 24 May 2024 14:29:25 -0500 Subject: [PATCH 03/22] Added to toStringBase() Added more relevant fields in toStringBase() and updated the test to reflect the changes in OffsetFetchRequestState.toStringBase() --- .../consumer/internals/CommitRequestManager.java | 4 +++- .../internals/CommitRequestManagerTest.java | 13 +++++++++++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java index 1f487877ca1de..281f15272bceb 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java @@ -1093,7 +1093,9 @@ private void chainFuture( public String toStringBase() { return super.toStringBase() + ", requestedPartitions=" + requestedPartitions + - ", future=" + future; + ", future=" + future + + ", memberId=" + memberInfo.memberId.orElse("undefined") + + ", memberEpoch=" + (memberInfo.memberEpoch.isPresent() ? memberInfo.memberEpoch : "undefined"); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index 96d50b69f4afd..63899dc70c6c4 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -129,6 +129,10 @@ public void setup() { @Test public void testOffsetFetchRequestStateToStringBase() { ConsumerConfig config = mock(ConsumerConfig.class); + CommitRequestManager.MemberInfo memberInfo = mock(CommitRequestManager.MemberInfo.class); + memberInfo.memberId = Optional.empty(); + memberInfo.memberEpoch = Optional.empty(); + this.commitRequestManager = new CommitRequestManager( time, logContext, @@ -139,11 +143,13 @@ public void testOffsetFetchRequestStateToStringBase() { "groupId", Optional.of("groupInstanceId"), metrics); + this.offsetFetchRequestState = commitRequestManager.new OffsetFetchRequestState( mock(Set.class), 10, 100, 1000, - mock(CommitRequestManager.MemberInfo.class) + memberInfo ); + this.requestState = new RequestState( logContext, "CommitRequestManager", @@ -152,7 +158,10 @@ public void testOffsetFetchRequestStateToStringBase() { String target = requestState.toStringBase() + ", requestedPartitions=" + offsetFetchRequestState.requestedPartitions + - ", future=" + offsetFetchRequestState.future(); + ", future=" + offsetFetchRequestState.future() + + ", memberId=" + offsetFetchRequestState.memberInfo.memberId.orElse("undefined") + + ", memberEpoch=" + (offsetFetchRequestState.memberInfo.memberEpoch.isPresent() ? offsetFetchRequestState.memberInfo.memberEpoch : "undefined"); + assertEquals(target, offsetFetchRequestState.toStringBase()); } From 4f5680989ac2a920d865ffe3f6fea6d1d6f67f4c Mon Sep 17 00:00:00 2001 From: brenden20 Date: Fri, 24 May 2024 17:34:53 -0500 Subject: [PATCH 04/22] Add println and example hashcode Added println to toStringBase() test to see if it works. Also added example hashcode --- .../internals/CommitRequestManager.java | 19 +++++++++++++++++++ .../internals/CommitRequestManagerTest.java | 3 +-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java index 281f15272bceb..3571767e3b40d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java @@ -1089,6 +1089,25 @@ private void chainFuture( }); } + // override hashCode() +// public class MyClass { +// private int id; +// private String name; +// +// // Constructors, getters, setters, etc. +// +// @Override +// public int hashCode() { +// final int prime = 31; +// int result = 1; +// result = prime * result + id; +// result = prime * result + ((name == null) ? 0 : name.hashCode()); +// return result; +// } +// +// // Other methods like equals(), toString(), etc. +// } + @Override public String toStringBase() { return super.toStringBase() + diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index 63899dc70c6c4..d9d585e144db1 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -161,8 +161,7 @@ public void testOffsetFetchRequestStateToStringBase() { ", future=" + offsetFetchRequestState.future() + ", memberId=" + offsetFetchRequestState.memberInfo.memberId.orElse("undefined") + ", memberEpoch=" + (offsetFetchRequestState.memberInfo.memberEpoch.isPresent() ? offsetFetchRequestState.memberInfo.memberEpoch : "undefined"); - - assertEquals(target, offsetFetchRequestState.toStringBase()); + System.out.println(offsetFetchRequestState.toString()); } @Test From 1dea30d9cc561b28b34c3dac8a0189f2482121da Mon Sep 17 00:00:00 2001 From: brenden20 Date: Tue, 28 May 2024 13:15:57 -0500 Subject: [PATCH 05/22] Edited toStringBase test Added an assertEquals statement --- .../clients/consumer/internals/CommitRequestManagerTest.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index d9d585e144db1..63899dc70c6c4 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -161,7 +161,8 @@ public void testOffsetFetchRequestStateToStringBase() { ", future=" + offsetFetchRequestState.future() + ", memberId=" + offsetFetchRequestState.memberInfo.memberId.orElse("undefined") + ", memberEpoch=" + (offsetFetchRequestState.memberInfo.memberEpoch.isPresent() ? offsetFetchRequestState.memberInfo.memberEpoch : "undefined"); - System.out.println(offsetFetchRequestState.toString()); + + assertEquals(target, offsetFetchRequestState.toStringBase()); } @Test From cee03b703e0c57e9b4917817d8f2d2217a8588d5 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Tue, 28 May 2024 14:45:05 -0500 Subject: [PATCH 06/22] Added to toStringBase and updated test Added fields inherited from RetriableRequestState to the toStringBase method. Had to add a get method for expirationTimeMs to make it work. Also updated the test to reflect the changes. --- .../internals/CommitRequestManager.java | 7 +++++++ .../internals/CommitRequestManagerTest.java | 17 ++++++++++++----- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java index 3571767e3b40d..16484e8f5e555 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java @@ -880,6 +880,10 @@ boolean retryTimeoutExpired(long currentTimeMs) { } abstract void removeRequest(); + + protected Optional expirationTimeMs() { + return expirationTimeMs; + } } class OffsetFetchRequestState extends RetriableRequestState { @@ -1111,6 +1115,9 @@ private void chainFuture( @Override public String toStringBase() { return super.toStringBase() + + ", memberInfo=" + memberInfo + + ", expirationTimeMs=" + (expirationTimeMs().isPresent() ? expirationTimeMs() : "undefined") + + ", isExpired=" + isExpired + ", requestedPartitions=" + requestedPartitions + ", future=" + future + ", memberId=" + memberInfo.memberId.orElse("undefined") + diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index 63899dc70c6c4..845c9aa805532 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -128,6 +128,9 @@ public void setup() { @Test public void testOffsetFetchRequestStateToStringBase() { + final long retryBackoffMs = 10; + final long retryBackoffMaxMs = 100; + final long expirationTimeMs = 1000; ConsumerConfig config = mock(ConsumerConfig.class); CommitRequestManager.MemberInfo memberInfo = mock(CommitRequestManager.MemberInfo.class); memberInfo.memberId = Optional.empty(); @@ -146,22 +149,26 @@ public void testOffsetFetchRequestStateToStringBase() { this.offsetFetchRequestState = commitRequestManager.new OffsetFetchRequestState( mock(Set.class), - 10, 100, 1000, + retryBackoffMs, retryBackoffMaxMs, expirationTimeMs, memberInfo ); this.requestState = new RequestState( logContext, "CommitRequestManager", - 10, - 100); + retryBackoffMs, + retryBackoffMaxMs); String target = requestState.toStringBase() + + ", memberInfo=" + memberInfo + + ", expirationTimeMs=" + (offsetFetchRequestState.expirationTimeMs().isPresent() ? offsetFetchRequestState.expirationTimeMs() : "undefined") + + ", isExpired=" + offsetFetchRequestState.isExpired + ", requestedPartitions=" + offsetFetchRequestState.requestedPartitions + ", future=" + offsetFetchRequestState.future() + - ", memberId=" + offsetFetchRequestState.memberInfo.memberId.orElse("undefined") + - ", memberEpoch=" + (offsetFetchRequestState.memberInfo.memberEpoch.isPresent() ? offsetFetchRequestState.memberInfo.memberEpoch : "undefined"); + ", memberId=" + memberInfo.memberId.orElse("undefined") + + ", memberEpoch=" + (memberInfo.memberEpoch.isPresent() ? memberInfo.memberEpoch : "undefined"); + System.out.println(target); assertEquals(target, offsetFetchRequestState.toStringBase()); } From 42c7826727260de872814eee6a796fe169236556 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Tue, 28 May 2024 15:50:04 -0500 Subject: [PATCH 07/22] Remove hashcode comment Remove hashcode comment --- .../internals/CommitRequestManager.java | 19 ------------------- .../internals/CommitRequestManagerTest.java | 1 - 2 files changed, 20 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java index 16484e8f5e555..d87e7469130df 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java @@ -1093,25 +1093,6 @@ private void chainFuture( }); } - // override hashCode() -// public class MyClass { -// private int id; -// private String name; -// -// // Constructors, getters, setters, etc. -// -// @Override -// public int hashCode() { -// final int prime = 31; -// int result = 1; -// result = prime * result + id; -// result = prime * result + ((name == null) ? 0 : name.hashCode()); -// return result; -// } -// -// // Other methods like equals(), toString(), etc. -// } - @Override public String toStringBase() { return super.toStringBase() + diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index 845c9aa805532..c54223fd80d8b 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -168,7 +168,6 @@ public void testOffsetFetchRequestStateToStringBase() { ", memberId=" + memberInfo.memberId.orElse("undefined") + ", memberEpoch=" + (memberInfo.memberEpoch.isPresent() ? memberInfo.memberEpoch : "undefined"); - System.out.println(target); assertEquals(target, offsetFetchRequestState.toStringBase()); } From 935f0b868f254fad78762a3682dba85dad25a8b6 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Wed, 29 May 2024 08:52:41 -0500 Subject: [PATCH 08/22] Add testing comment Added visible for testing comment --- .../kafka/clients/consumer/internals/CommitRequestManager.java | 1 + 1 file changed, 1 insertion(+) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java index d87e7469130df..054d474ccfae0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java @@ -881,6 +881,7 @@ boolean retryTimeoutExpired(long currentTimeMs) { abstract void removeRequest(); + // Visible for testing protected Optional expirationTimeMs() { return expirationTimeMs; } From 272d5de00a1c77c40d2875f62ada238814543582 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Wed, 29 May 2024 09:18:26 -0500 Subject: [PATCH 09/22] Updated toStringBase and added toString Updated toStringBase() to have better output formatting, also added a toString() method for MemberInfo class. Updated toStringBase() test to reflect changes --- .../consumer/internals/CommitRequestManager.java | 14 +++++++++----- .../internals/CommitRequestManagerTest.java | 13 +++++-------- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java index 054d474ccfae0..40ff73c7cde32 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java @@ -1097,13 +1097,11 @@ private void chainFuture( @Override public String toStringBase() { return super.toStringBase() + - ", memberInfo=" + memberInfo + - ", expirationTimeMs=" + (expirationTimeMs().isPresent() ? expirationTimeMs() : "undefined") + + ", memberInfo={" + memberInfo + + "}, expirationTimeMs=" + (expirationTimeMs().isPresent() ? expirationTimeMs() : "undefined") + ", isExpired=" + isExpired + ", requestedPartitions=" + requestedPartitions + - ", future=" + future + - ", memberId=" + memberInfo.memberId.orElse("undefined") + - ", memberEpoch=" + (memberInfo.memberEpoch.isPresent() ? memberInfo.memberEpoch : "undefined"); + ", future=" + future; } } @@ -1295,5 +1293,11 @@ static class MemberInfo { this.memberId = Optional.empty(); this.memberEpoch = Optional.empty(); } + + @Override + public String toString() { + return "memberId=" + memberId.orElse("undefined") + + ", memberEpoch=" + (memberEpoch.isPresent() ? memberEpoch : "undefined"); + } } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index c54223fd80d8b..f6c0352c04f71 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -132,9 +132,7 @@ public void testOffsetFetchRequestStateToStringBase() { final long retryBackoffMaxMs = 100; final long expirationTimeMs = 1000; ConsumerConfig config = mock(ConsumerConfig.class); - CommitRequestManager.MemberInfo memberInfo = mock(CommitRequestManager.MemberInfo.class); - memberInfo.memberId = Optional.empty(); - memberInfo.memberEpoch = Optional.empty(); + CommitRequestManager.MemberInfo memberInfo = new CommitRequestManager.MemberInfo(); this.commitRequestManager = new CommitRequestManager( time, @@ -160,14 +158,13 @@ public void testOffsetFetchRequestStateToStringBase() { retryBackoffMaxMs); String target = requestState.toStringBase() + - ", memberInfo=" + memberInfo + - ", expirationTimeMs=" + (offsetFetchRequestState.expirationTimeMs().isPresent() ? offsetFetchRequestState.expirationTimeMs() : "undefined") + + ", memberInfo={" + offsetFetchRequestState.memberInfo + + "}, expirationTimeMs=" + (offsetFetchRequestState.expirationTimeMs().isPresent() ? offsetFetchRequestState.expirationTimeMs() : "undefined") + ", isExpired=" + offsetFetchRequestState.isExpired + ", requestedPartitions=" + offsetFetchRequestState.requestedPartitions + - ", future=" + offsetFetchRequestState.future() + - ", memberId=" + memberInfo.memberId.orElse("undefined") + - ", memberEpoch=" + (memberInfo.memberEpoch.isPresent() ? memberInfo.memberEpoch : "undefined"); + ", future=" + offsetFetchRequestState.future(); + System.out.println(target); assertEquals(target, offsetFetchRequestState.toStringBase()); } From 7aa6ba5cd1c7fd3950d71574ceef9ea404da5bd0 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Thu, 30 May 2024 15:51:22 -0500 Subject: [PATCH 10/22] Remove debugging print Remove debugging print --- .../clients/consumer/internals/CommitRequestManagerTest.java | 1 - 1 file changed, 1 deletion(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index f6c0352c04f71..d57908955c114 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -164,7 +164,6 @@ public void testOffsetFetchRequestStateToStringBase() { ", requestedPartitions=" + offsetFetchRequestState.requestedPartitions + ", future=" + offsetFetchRequestState.future(); - System.out.println(target); assertEquals(target, offsetFetchRequestState.toStringBase()); } From b560db32bdd388207cd53a5a08706d5406999f50 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Mon, 3 Jun 2024 09:16:15 -0500 Subject: [PATCH 11/22] Implementing PR suggestions Updated testOffsetFetchRequestStateToStringBase(), changed some variable scope and changed the target string slightly. Also made a small update to toStringBase() and MemberInfo.toString() --- .../internals/CommitRequestManager.java | 8 ++++---- .../internals/CommitRequestManagerTest.java | 20 ++++++++----------- 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java index 40ff73c7cde32..f6a11fe4b7d58 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java @@ -1097,8 +1097,8 @@ private void chainFuture( @Override public String toStringBase() { return super.toStringBase() + - ", memberInfo={" + memberInfo + - "}, expirationTimeMs=" + (expirationTimeMs().isPresent() ? expirationTimeMs() : "undefined") + + ", memberInfo=" + memberInfo + + ", expirationTimeMs=" + (expirationTimeMs().isPresent() ? expirationTimeMs() : "undefined") + ", isExpired=" + isExpired + ", requestedPartitions=" + requestedPartitions + ", future=" + future; @@ -1296,8 +1296,8 @@ static class MemberInfo { @Override public String toString() { - return "memberId=" + memberId.orElse("undefined") + - ", memberEpoch=" + (memberEpoch.isPresent() ? memberEpoch : "undefined"); + return "MemberInfo{" + "memberId=" + memberId.orElse("undefined") + + ", memberEpoch=" + (memberEpoch.isPresent() ? memberEpoch : "undefined") + "}"; } } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index d57908955c114..dc5f163b93300 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -106,9 +106,6 @@ public class CommitRequestManagerTest { private OffsetCommitCallbackInvoker offsetCommitCallbackInvoker; private final Metrics metrics = new Metrics(); private Properties props; - private CommitRequestManager.OffsetFetchRequestState offsetFetchRequestState; - private RequestState requestState; - private CommitRequestManager commitRequestManager; private final int defaultApiTimeoutMs = 60000; @@ -128,13 +125,10 @@ public void setup() { @Test public void testOffsetFetchRequestStateToStringBase() { - final long retryBackoffMs = 10; - final long retryBackoffMaxMs = 100; - final long expirationTimeMs = 1000; ConsumerConfig config = mock(ConsumerConfig.class); CommitRequestManager.MemberInfo memberInfo = new CommitRequestManager.MemberInfo(); - this.commitRequestManager = new CommitRequestManager( + CommitRequestManager commitRequestManager = new CommitRequestManager( time, logContext, subscriptionState, @@ -145,21 +139,23 @@ public void testOffsetFetchRequestStateToStringBase() { Optional.of("groupInstanceId"), metrics); - this.offsetFetchRequestState = commitRequestManager.new OffsetFetchRequestState( + CommitRequestManager.OffsetFetchRequestState offsetFetchRequestState = commitRequestManager.new OffsetFetchRequestState( mock(Set.class), - retryBackoffMs, retryBackoffMaxMs, expirationTimeMs, + retryBackoffMs, + retryBackoffMaxMs, + 1000, memberInfo ); - this.requestState = new RequestState( + RequestState requestState = new RequestState( logContext, "CommitRequestManager", retryBackoffMs, retryBackoffMaxMs); String target = requestState.toStringBase() + - ", memberInfo={" + offsetFetchRequestState.memberInfo + - "}, expirationTimeMs=" + (offsetFetchRequestState.expirationTimeMs().isPresent() ? offsetFetchRequestState.expirationTimeMs() : "undefined") + + ", memberInfo=" + offsetFetchRequestState.memberInfo + + ", expirationTimeMs=" + (offsetFetchRequestState.expirationTimeMs().isPresent() ? offsetFetchRequestState.expirationTimeMs() : "undefined") + ", isExpired=" + offsetFetchRequestState.isExpired + ", requestedPartitions=" + offsetFetchRequestState.requestedPartitions + ", future=" + offsetFetchRequestState.future(); From 8d7ddbbfd8eadb25eae370955445906d427ebf79 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Mon, 3 Jun 2024 09:28:49 -0500 Subject: [PATCH 12/22] Fixed build error Fixed build error --- .../clients/consumer/internals/CommitRequestManagerTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index dc5f163b93300..487f11353a52a 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -140,7 +140,7 @@ public void testOffsetFetchRequestStateToStringBase() { metrics); CommitRequestManager.OffsetFetchRequestState offsetFetchRequestState = commitRequestManager.new OffsetFetchRequestState( - mock(Set.class), + new HashSet<>(), retryBackoffMs, retryBackoffMaxMs, 1000, From 3d9f58b7516ff445b297546634128bc09542373c Mon Sep 17 00:00:00 2001 From: brenden20 Date: Mon, 3 Jun 2024 10:33:16 -0500 Subject: [PATCH 13/22] Small changes to testOffsetFetchRequestStateToStringBase() Small changes to testOffsetFetchRequestStateToStringBase() --- .../consumer/internals/CommitRequestManagerTest.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index 487f11353a52a..b53efd605ae03 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -76,6 +76,7 @@ import static org.apache.kafka.clients.consumer.internals.ConsumerTestBuilder.DEFAULT_GROUP_ID; import static org.apache.kafka.clients.consumer.internals.ConsumerTestBuilder.DEFAULT_GROUP_INSTANCE_ID; import static org.apache.kafka.test.TestUtils.assertFutureThrows; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -139,6 +140,7 @@ public void testOffsetFetchRequestStateToStringBase() { Optional.of("groupInstanceId"), metrics); + // Add some topic partitions to the hashset and test for it in the target CommitRequestManager.OffsetFetchRequestState offsetFetchRequestState = commitRequestManager.new OffsetFetchRequestState( new HashSet<>(), retryBackoffMs, @@ -153,13 +155,16 @@ public void testOffsetFetchRequestStateToStringBase() { retryBackoffMs, retryBackoffMaxMs); + // Make parameterized test for expirationTimeMs String target = requestState.toStringBase() + - ", memberInfo=" + offsetFetchRequestState.memberInfo + + ", memberInfo=" + memberInfo + ", expirationTimeMs=" + (offsetFetchRequestState.expirationTimeMs().isPresent() ? offsetFetchRequestState.expirationTimeMs() : "undefined") + ", isExpired=" + offsetFetchRequestState.isExpired + ", requestedPartitions=" + offsetFetchRequestState.requestedPartitions + ", future=" + offsetFetchRequestState.future(); + System.out.println(target); + assertDoesNotThrow(requestState::toString); assertEquals(target, offsetFetchRequestState.toStringBase()); } From 426313a91d79ae7a4420c406a880e51cb2f7ef5a Mon Sep 17 00:00:00 2001 From: brenden20 Date: Mon, 3 Jun 2024 11:49:57 -0500 Subject: [PATCH 14/22] Remove debug print --- .../clients/consumer/internals/CommitRequestManagerTest.java | 1 - 1 file changed, 1 deletion(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index b53efd605ae03..21aa59db03c65 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -163,7 +163,6 @@ public void testOffsetFetchRequestStateToStringBase() { ", requestedPartitions=" + offsetFetchRequestState.requestedPartitions + ", future=" + offsetFetchRequestState.future(); - System.out.println(target); assertDoesNotThrow(requestState::toString); assertEquals(target, offsetFetchRequestState.toStringBase()); } From 4461145f6c31e0372e4e09cbe7dd0c0090d0ca3a Mon Sep 17 00:00:00 2001 From: brenden20 Date: Mon, 3 Jun 2024 12:04:29 -0500 Subject: [PATCH 15/22] Whitespace fixes --- .../internals/CommitRequestManagerTest.java | 262 +++++++++--------- 1 file changed, 131 insertions(+), 131 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index 21aa59db03c65..336b2c696eacc 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -271,8 +271,8 @@ public void testPollEnsureEmptyPendingRequestAfterPoll() { CommitRequestManager commitRequestManager = create(true, 100); when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap( - new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new TopicPartition("topic", 1), + new OffsetAndMetadata(0)); commitRequestManager.commitAsync(offsets); assertEquals(1, commitRequestManager.unsentOffsetCommitRequests().size()); assertEquals(1, commitRequestManager.poll(time.milliseconds()).unsentRequests.size()); @@ -294,10 +294,10 @@ public void testAsyncAutocommitNotRetriedAfterException() { List futures = assertPoll(1, commitRequestManager); // Complete the autocommit request exceptionally. It should fail right away, without retry. futures.get(0).onComplete(mockOffsetCommitResponse( - "topic", - 1, - (short) 1, - Errors.COORDINATOR_LOAD_IN_PROGRESS)); + "topic", + 1, + (short) 1, + Errors.COORDINATOR_LOAD_IN_PROGRESS)); // When polling again before the auto-commit interval no request should be generated // (making sure we wait for the backoff, to check that the failed request is not being @@ -329,8 +329,8 @@ public void testCommitSyncRetriedAfterExpectedRetriableException(Errors error) { when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap( - new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new TopicPartition("topic", 1), + new OffsetAndMetadata(0)); long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; CompletableFuture commitResult = commitRequestManager.commitSync(offsets, expirationTimeMs); sendAndVerifyOffsetCommitRequestFailedAndMaybeRetried(commitRequestManager, error, commitResult); @@ -347,8 +347,8 @@ public void testCommitSyncFailsWithExpectedException(Errors commitError, when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap( - new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new TopicPartition("topic", 1), + new OffsetAndMetadata(0)); // Send sync offset commit that fails and verify it propagates the expected exception. long expirationTimeMs = time.milliseconds() + retryBackoffMs; @@ -359,13 +359,13 @@ public void testCommitSyncFailsWithExpectedException(Errors commitError, private static Stream commitSyncExpectedExceptions() { return Stream.of( - Arguments.of(Errors.FENCED_INSTANCE_ID, CommitFailedException.class), - Arguments.of(Errors.UNKNOWN_MEMBER_ID, CommitFailedException.class), - Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE, Errors.OFFSET_METADATA_TOO_LARGE.exception().getClass()), - Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE, Errors.INVALID_COMMIT_OFFSET_SIZE.exception().getClass()), - Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED, Errors.GROUP_AUTHORIZATION_FAILED.exception().getClass()), - Arguments.of(Errors.CORRUPT_MESSAGE, KafkaException.class), - Arguments.of(Errors.UNKNOWN_SERVER_ERROR, KafkaException.class)); + Arguments.of(Errors.FENCED_INSTANCE_ID, CommitFailedException.class), + Arguments.of(Errors.UNKNOWN_MEMBER_ID, CommitFailedException.class), + Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE, Errors.OFFSET_METADATA_TOO_LARGE.exception().getClass()), + Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE, Errors.INVALID_COMMIT_OFFSET_SIZE.exception().getClass()), + Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED, Errors.GROUP_AUTHORIZATION_FAILED.exception().getClass()), + Arguments.of(Errors.CORRUPT_MESSAGE, KafkaException.class), + Arguments.of(Errors.UNKNOWN_SERVER_ERROR, KafkaException.class)); } @Test @@ -374,8 +374,8 @@ public void testCommitSyncFailsWithCommitFailedExceptionIfUnknownMemberId() { when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap( - new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new TopicPartition("topic", 1), + new OffsetAndMetadata(0)); long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; CompletableFuture commitResult = commitRequestManager.commitSync(offsets, expirationTimeMs); @@ -392,12 +392,12 @@ public void testCommitSyncFailsWithCommitFailedExceptionOnStaleMemberEpoch() { when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap( - new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new TopicPartition("topic", 1), + new OffsetAndMetadata(0)); // Send commit request expected to be retried on retriable errors CompletableFuture commitResult = commitRequestManager.commitSync( - offsets, time.milliseconds() + defaultApiTimeoutMs); + offsets, time.milliseconds() + defaultApiTimeoutMs); completeOffsetCommitRequestWithError(commitRequestManager, Errors.STALE_MEMBER_EPOCH); NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); assertEquals(0, res.unsentRequests.size()); @@ -431,7 +431,7 @@ public void testAutoCommitAsyncFailsWithStaleMemberEpochContinuesToCommitOnTheIn // Async commit retried, only when the interval expires NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); assertEquals(0, res.unsentRequests.size(), "No request should be generated until the " + - "interval expires"); + "interval expires"); time.sleep(100); commitRequestManager.updateAutoCommitTimer(time.milliseconds()); res = commitRequestManager.poll(time.milliseconds()); @@ -445,8 +445,8 @@ public void testCommitAsyncFailsWithRetriableOnCoordinatorDisconnected() { when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap( - new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new TopicPartition("topic", 1), + new OffsetAndMetadata(0)); // Async commit that won't be retried. CompletableFuture commitResult = commitRequestManager.commitAsync(offsets); @@ -500,18 +500,18 @@ public void testAutoCommitBeforeRevocationNotBlockedByAutoCommitOnIntervalInflig NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); assertEquals(1, res.unsentRequests.size()); NetworkClientDelegate.FutureCompletionHandler autoCommitOnInterval = - res.unsentRequests.get(0).handler(); + res.unsentRequests.get(0).handler(); // Another auto-commit request should be sent if a revocation happens, even if an // auto-commit on the interval is in-flight. CompletableFuture autoCommitBeforeRevocation = - commitRequestManager.maybeAutoCommitSyncBeforeRevocation(200); + commitRequestManager.maybeAutoCommitSyncBeforeRevocation(200); assertEquals(1, commitRequestManager.pendingRequests.unsentOffsetCommits.size()); // Receive response for initial auto-commit on interval autoCommitOnInterval.onComplete(buildOffsetCommitClientResponse(new OffsetCommitResponse(0, new HashMap<>()))); assertFalse(autoCommitBeforeRevocation.isDone(), "Auto-commit before revocation should " + - "not complete until it receives a response"); + "not complete until it receives a response"); } @Test @@ -528,7 +528,7 @@ public void testAutocommitInterceptorsInvoked() { // complete the unsent request to trigger interceptor futures.get(0).onComplete(buildOffsetCommitClientResponse(new OffsetCommitResponse(0, new HashMap<>()))); verify(offsetCommitCallbackInvoker).enqueueInterceptorInvocation( - eq(Collections.singletonMap(t1p, new OffsetAndMetadata(100L))) + eq(Collections.singletonMap(t1p, new OffsetAndMetadata(100L))) ); } @@ -545,7 +545,7 @@ public void testAutocommitInterceptorsNotInvokedOnError() { // complete the unsent request to trigger interceptor futures.get(0).onComplete(buildOffsetCommitClientResponse( - new OffsetCommitResponse(0, Collections.singletonMap(t1p, Errors.NETWORK_EXCEPTION))) + new OffsetCommitResponse(0, Collections.singletonMap(t1p, Errors.NETWORK_EXCEPTION))) ); Mockito.verify(offsetCommitCallbackInvoker, never()).enqueueInterceptorInvocation(any()); } @@ -612,7 +612,7 @@ public void testAutoCommitOnIntervalSkippedIfPreviousOneInFlight() { // When a response for the inflight is received, a next auto-commit should be sent when // polling the manager. inflightCommitResult.onComplete( - mockOffsetCommitResponse(t1p.topic(), t1p.partition(), (short) 1, Errors.NONE)); + mockOffsetCommitResponse(t1p.topic(), t1p.partition(), (short) 1, Errors.NONE)); assertPoll(1, commitRequestManager); } @@ -645,10 +645,10 @@ public void testOffsetFetchRequestErroredRequests(final Errors error, final bool Set partitions = new HashSet<>(); partitions.add(new TopicPartition("t1", 0)); List>> futures = sendAndVerifyDuplicatedOffsetFetchRequests( - commitRequestManager, - partitions, - 1, - error); + commitRequestManager, + partitions, + 1, + error); // we only want to make sure to purge the outbound buffer for non-retriables, so retriable will be re-queued. if (isRetriable) testRetriable(commitRequestManager, futures); @@ -665,8 +665,8 @@ public void testSuccessfulOffsetFetch() { long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; CompletableFuture> fetchResult = - commitManager.fetchOffsets(Collections.singleton(new TopicPartition("test", 0)), - expirationTimeMs); + commitManager.fetchOffsets(Collections.singleton(new TopicPartition("test", 0)), + expirationTimeMs); // Send fetch request NetworkClientDelegate.PollResult result = commitManager.poll(time.milliseconds()); @@ -679,9 +679,9 @@ public void testSuccessfulOffsetFetch() { long expectedOffset = 100; NetworkClientDelegate.UnsentRequest req = result.unsentRequests.get(0); Map topicPartitionData = - Collections.singletonMap( - tp, - new OffsetFetchResponse.PartitionData(expectedOffset, Optional.of(1), "", Errors.NONE)); + Collections.singletonMap( + tp, + new OffsetFetchResponse.PartitionData(expectedOffset, Optional.of(1), "", Errors.NONE)); req.handler().onComplete(buildOffsetFetchClientResponse(req, topicPartitionData, Errors.NONE, false)); // Validate request future completes with the response received @@ -698,7 +698,7 @@ public void testSuccessfulOffsetFetch() { assertTrue(offsetsAndMetadata.containsKey(tp)); assertEquals(expectedOffset, offsetsAndMetadata.get(tp).offset()); assertEquals(0, commitManager.pendingRequests.inflightOffsetFetches.size(), "Inflight " + - "request should be removed from the queue when a response is received."); + "request should be removed from the queue when a response is received."); } @ParameterizedTest @@ -765,7 +765,7 @@ public void testOffsetCommitRequestErroredRequestsNotRetriedForAsyncCommit(final when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap(new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new OffsetAndMetadata(0)); // Send async commit (not expected to be retried). CompletableFuture commitResult = commitRequestManager.commitAsync(offsets); @@ -788,8 +788,8 @@ public void testOffsetCommitSyncTimeoutNotReturnedOnPollAndFails() { when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap( - new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new TopicPartition("topic", 1), + new OffsetAndMetadata(0)); // Send sync offset commit request that fails with retriable error. long expirationTimeMs = time.milliseconds() + retryBackoffMs * 2; @@ -820,8 +820,8 @@ public void testOffsetCommitSyncFailedWithRetriableThrowsTimeoutWhenRetryTimeExp when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap( - new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new TopicPartition("topic", 1), + new OffsetAndMetadata(0)); // Send offset commit request that fails with retriable error. long expirationTimeMs = time.milliseconds() + retryBackoffMs * 2; @@ -847,7 +847,7 @@ public void testOffsetCommitAsyncFailedWithRetriableThrowsRetriableCommitExcepti when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap(new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new OffsetAndMetadata(0)); // Send async commit request that fails with retriable error (not expected to be retried). Errors retriableError = Errors.COORDINATOR_NOT_AVAILABLE; @@ -871,7 +871,7 @@ public void testEnsureBackoffRetryOnOffsetCommitRequestTimeout() { when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap(new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new OffsetAndMetadata(0)); long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; commitRequestManager.commitSync(offsets, expirationTimeMs); @@ -996,7 +996,7 @@ public void testSyncOffsetFetchFailsWithStaleEpochAndNotRetriedIfMemberNotInGrou // Send request that is expected to fail with invalid epoch. long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; CompletableFuture> requestResult = - commitRequestManager.fetchOffsets(partitions, expirationTimeMs); + commitRequestManager.fetchOffsets(partitions, expirationTimeMs); // Mock member not having a valid epoch anymore (left/failed/fenced). commitRequestManager.onMemberEpochUpdated(Optional.empty(), Optional.empty()); @@ -1043,7 +1043,7 @@ public void testAutoCommitSyncBeforeRevocationRetriesOnRetriableAndStaleEpoch(Er if ((error.exception() instanceof RetriableException || error == Errors.STALE_MEMBER_EPOCH) && error != Errors.UNKNOWN_TOPIC_OR_PARTITION) { assertEquals(1, commitRequestManager.pendingRequests.unsentOffsetCommits.size(), - "Request to be retried should be added to the outbound queue"); + "Request to be retried should be added to the outbound queue"); // Request should be retried with backoff NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); @@ -1059,7 +1059,7 @@ public void testAutoCommitSyncBeforeRevocationRetriesOnRetriableAndStaleEpoch(Er } } else { assertEquals(0, commitRequestManager.pendingRequests.unsentOffsetCommits.size(), - "Non-retriable failed request should be removed from the outbound queue"); + "Non-retriable failed request should be removed from the outbound queue"); // Request should not be retried, even after the backoff expires NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); @@ -1142,40 +1142,40 @@ private void testNonRetriable(final List offsetCommitExceptionSupplier() { return Stream.of( - Arguments.of(Errors.NOT_COORDINATOR), - Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS), - Arguments.of(Errors.UNKNOWN_SERVER_ERROR), - Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED), - Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE), - Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE), - Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION), - Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE), - Arguments.of(Errors.REQUEST_TIMED_OUT), - Arguments.of(Errors.FENCED_INSTANCE_ID), - Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED), - Arguments.of(Errors.STALE_MEMBER_EPOCH), - Arguments.of(Errors.UNKNOWN_MEMBER_ID)); + Arguments.of(Errors.NOT_COORDINATOR), + Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS), + Arguments.of(Errors.UNKNOWN_SERVER_ERROR), + Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED), + Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE), + Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE), + Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION), + Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE), + Arguments.of(Errors.REQUEST_TIMED_OUT), + Arguments.of(Errors.FENCED_INSTANCE_ID), + Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED), + Arguments.of(Errors.STALE_MEMBER_EPOCH), + Arguments.of(Errors.UNKNOWN_MEMBER_ID)); } // Supplies (error, isRetriable) private static Stream offsetFetchExceptionSupplier() { // fetchCommit is only retrying on a subset of RetriableErrors return Stream.of( - Arguments.of(Errors.NOT_COORDINATOR, true), - Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS, true), - Arguments.of(Errors.UNKNOWN_SERVER_ERROR, false), - Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED, false), - Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE, false), - Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE, false), - Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION, false), - Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE, true), - Arguments.of(Errors.REQUEST_TIMED_OUT, false), - Arguments.of(Errors.FENCED_INSTANCE_ID, false), - Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED, false), - Arguments.of(Errors.UNKNOWN_MEMBER_ID, false), - // Adding STALE_MEMBER_EPOCH as non-retriable here because it is only retried if a new - // member epoch is received. Tested separately. - Arguments.of(Errors.STALE_MEMBER_EPOCH, false)); + Arguments.of(Errors.NOT_COORDINATOR, true), + Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS, true), + Arguments.of(Errors.UNKNOWN_SERVER_ERROR, false), + Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED, false), + Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE, false), + Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE, false), + Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION, false), + Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE, true), + Arguments.of(Errors.REQUEST_TIMED_OUT, false), + Arguments.of(Errors.FENCED_INSTANCE_ID, false), + Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED, false), + Arguments.of(Errors.UNKNOWN_MEMBER_ID, false), + // Adding STALE_MEMBER_EPOCH as non-retriable here because it is only retried if a new + // member epoch is received. Tested separately. + Arguments.of(Errors.STALE_MEMBER_EPOCH, false)); } /** @@ -1184,9 +1184,9 @@ private static Stream offsetFetchExceptionSupplier() { */ private static Stream offsetFetchRetriableCoordinatorErrors() { return Stream.of( - Arguments.of(Errors.NOT_COORDINATOR, true), - Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE, true), - Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS, false)); + Arguments.of(Errors.NOT_COORDINATOR, true), + Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE, true), + Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS, false)); } @ParameterizedTest @@ -1228,7 +1228,7 @@ public void testSignalClose() { when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); Map offsets = Collections.singletonMap(new TopicPartition("topic", 1), - new OffsetAndMetadata(0)); + new OffsetAndMetadata(0)); commitRequestManager.commitAsync(offsets); commitRequestManager.signalClose(); @@ -1247,10 +1247,10 @@ private static void assertEmptyPendingRequests(CommitRequestManager commitReques // Supplies (error, isRetriable) private static Stream partitionDataErrorSupplier() { return Stream.of( - Arguments.of(Errors.UNSTABLE_OFFSET_COMMIT, true), - Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION, false), - Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED, false), - Arguments.of(Errors.UNKNOWN_SERVER_ERROR, false)); + Arguments.of(Errors.UNSTABLE_OFFSET_COMMIT, true), + Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION, false), + Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED, false), + Arguments.of(Errors.UNKNOWN_SERVER_ERROR, false)); } private List>> sendAndVerifyDuplicatedOffsetFetchRequests( @@ -1267,16 +1267,16 @@ private List>> sendAndV NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); assertEquals(1, res.unsentRequests.size()); res.unsentRequests.get(0).handler().onComplete(buildOffsetFetchClientResponse(res.unsentRequests.get(0), - partitions, error)); + partitions, error)); res = commitRequestManager.poll(time.milliseconds()); assertEquals(0, res.unsentRequests.size()); return futures; } private void sendAndVerifyOffsetCommitRequestFailedAndMaybeRetried( - final CommitRequestManager commitRequestManager, - final Errors error, - final CompletableFuture commitResult) { + final CommitRequestManager commitRequestManager, + final Errors error, + final CompletableFuture commitResult) { completeOffsetCommitRequestWithError(commitRequestManager, error); NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); assertEquals(0, res.unsentRequests.size()); @@ -1291,15 +1291,15 @@ private void sendAndVerifyOffsetCommitRequestFailedAndMaybeRetried( } private List assertPoll( - final int numRes, - final CommitRequestManager manager) { + final int numRes, + final CommitRequestManager manager) { return assertPoll(true, numRes, manager); } private List assertPoll( - final boolean coordinatorDiscovered, - final int numRes, - final CommitRequestManager manager) { + final boolean coordinatorDiscovered, + final int numRes, + final CommitRequestManager manager) { if (coordinatorDiscovered) { when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); } else { @@ -1347,22 +1347,22 @@ private ClientResponse buildOffsetFetchClientResponse( } private ClientResponse buildOffsetFetchClientResponseDisconnected( - final NetworkClientDelegate.UnsentRequest request) { + final NetworkClientDelegate.UnsentRequest request) { return buildOffsetFetchClientResponse(request, Collections.emptyMap(), Errors.NONE, true); } private ClientResponse buildOffsetCommitClientResponse(final OffsetCommitResponse commitResponse) { short apiVersion = 1; return new ClientResponse( - new RequestHeader(ApiKeys.OFFSET_COMMIT, apiVersion, "", 1), - null, - "-1", - time.milliseconds(), - time.milliseconds(), - false, - null, - null, - commitResponse + new RequestHeader(ApiKeys.OFFSET_COMMIT, apiVersion, "", 1), + null, + "-1", + time.milliseconds(), + time.milliseconds(), + false, + null, + null, + commitResponse ); } @@ -1380,17 +1380,17 @@ public ClientResponse mockOffsetCommitResponse(String topic, long receivedTimeMs, Errors error) { OffsetCommitResponseData responseData = new OffsetCommitResponseData() - .setTopics(Arrays.asList( - new OffsetCommitResponseData.OffsetCommitResponseTopic() - .setName(topic) - .setPartitions(Collections.singletonList( - new OffsetCommitResponseData.OffsetCommitResponsePartition() - .setErrorCode(error.code()) - .setPartitionIndex(partition))))); + .setTopics(Arrays.asList( + new OffsetCommitResponseData.OffsetCommitResponseTopic() + .setName(topic) + .setPartitions(Collections.singletonList( + new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setErrorCode(error.code()) + .setPartitionIndex(partition))))); OffsetCommitResponse response = mock(OffsetCommitResponse.class); when(response.data()).thenReturn(responseData); return new ClientResponse( - new RequestHeader(ApiKeys.OFFSET_COMMIT, apiKeyVersion, "", 1), + new RequestHeader(ApiKeys.OFFSET_COMMIT, apiKeyVersion, "", 1), null, "-1", createdTimeMs, @@ -1407,24 +1407,24 @@ public ClientResponse mockOffsetCommitResponseDisconnected(String topic, int par NetworkClientDelegate.UnsentRequest unsentRequest) { OffsetCommitResponseData responseData = new OffsetCommitResponseData() .setTopics(Arrays.asList( - new OffsetCommitResponseData.OffsetCommitResponseTopic() - .setName(topic) - .setPartitions(Collections.singletonList( - new OffsetCommitResponseData.OffsetCommitResponsePartition() - .setErrorCode(Errors.NONE.code()) - .setPartitionIndex(partition))))); + new OffsetCommitResponseData.OffsetCommitResponseTopic() + .setName(topic) + .setPartitions(Collections.singletonList( + new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setErrorCode(Errors.NONE.code()) + .setPartitionIndex(partition))))); OffsetCommitResponse response = mock(OffsetCommitResponse.class); when(response.data()).thenReturn(responseData); return new ClientResponse( - new RequestHeader(ApiKeys.OFFSET_COMMIT, apiKeyVersion, "", 1), - unsentRequest.handler(), - "-1", - time.milliseconds(), - time.milliseconds(), - true, - null, - null, - new OffsetCommitResponse(responseData) + new RequestHeader(ApiKeys.OFFSET_COMMIT, apiKeyVersion, "", 1), + unsentRequest.handler(), + "-1", + time.milliseconds(), + time.milliseconds(), + true, + null, + null, + new OffsetCommitResponse(responseData) ); } @@ -1453,7 +1453,7 @@ private ClientResponse buildOffsetFetchClientResponse( private KafkaMetric getMetric(String name) { return metrics.metrics().get(metrics.metricName( - name, - CONSUMER_COORDINATOR_METRICS)); + name, + CONSUMER_COORDINATOR_METRICS)); } } From a252ec103d45ed786816a32c52e26c4f301eeaa9 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Mon, 3 Jun 2024 12:06:25 -0500 Subject: [PATCH 16/22] More whitespace changes --- .../internals/CommitRequestManagerTest.java | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index 336b2c696eacc..f1cbf0623e372 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -1406,13 +1406,13 @@ public ClientResponse mockOffsetCommitResponseDisconnected(String topic, int par short apiKeyVersion, NetworkClientDelegate.UnsentRequest unsentRequest) { OffsetCommitResponseData responseData = new OffsetCommitResponseData() - .setTopics(Arrays.asList( - new OffsetCommitResponseData.OffsetCommitResponseTopic() - .setName(topic) - .setPartitions(Collections.singletonList( - new OffsetCommitResponseData.OffsetCommitResponsePartition() - .setErrorCode(Errors.NONE.code()) - .setPartitionIndex(partition))))); + .setTopics(Arrays.asList( + new OffsetCommitResponseData.OffsetCommitResponseTopic() + .setName(topic) + .setPartitions(Collections.singletonList( + new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setErrorCode(Errors.NONE.code()) + .setPartitionIndex(partition))))); OffsetCommitResponse response = mock(OffsetCommitResponse.class); when(response.data()).thenReturn(responseData); return new ClientResponse( From 33a774868f41b3ed6ee5c38ab84234beba178ad1 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Mon, 3 Jun 2024 12:08:53 -0500 Subject: [PATCH 17/22] Fix style Fix style --- .../clients/consumer/internals/CommitRequestManagerTest.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index f1cbf0623e372..20e63986dc8b5 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -146,8 +146,7 @@ public void testOffsetFetchRequestStateToStringBase() { retryBackoffMs, retryBackoffMaxMs, 1000, - memberInfo - ); + memberInfo); RequestState requestState = new RequestState( logContext, From 554ad5300620348e62bac210d1634755108ddc00 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Mon, 3 Jun 2024 12:26:24 -0500 Subject: [PATCH 18/22] Updated testOffsetFetchRequestStateToStringBase() --- .../consumer/internals/CommitRequestManagerTest.java | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index 20e63986dc8b5..1daab57807e70 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -140,9 +140,12 @@ public void testOffsetFetchRequestStateToStringBase() { Optional.of("groupInstanceId"), metrics); - // Add some topic partitions to the hashset and test for it in the target + Set requestedPartitions = new HashSet<>(); + TopicPartition topicPartition1 = new TopicPartition("topic-1", 1); + requestedPartitions.add(topicPartition1); + CommitRequestManager.OffsetFetchRequestState offsetFetchRequestState = commitRequestManager.new OffsetFetchRequestState( - new HashSet<>(), + requestedPartitions, retryBackoffMs, retryBackoffMaxMs, 1000, @@ -154,7 +157,6 @@ public void testOffsetFetchRequestStateToStringBase() { retryBackoffMs, retryBackoffMaxMs); - // Make parameterized test for expirationTimeMs String target = requestState.toStringBase() + ", memberInfo=" + memberInfo + ", expirationTimeMs=" + (offsetFetchRequestState.expirationTimeMs().isPresent() ? offsetFetchRequestState.expirationTimeMs() : "undefined") + From 7e8be50733fbdcf1e48b39902a38b23f3277f532 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Mon, 3 Jun 2024 14:55:51 -0500 Subject: [PATCH 19/22] Revert "Merge branch 'apache:trunk' into 16557" This reverts commit f5abcc5bd956dc8157b2ff1e885edd8d72689cbd, reversing changes made to 554ad5300620348e62bac210d1634755108ddc00. --- README.md | 7 +- build.gradle | 17 +- checkstyle/checkstyle.xml | 2 - .../apache/kafka/common/ShareGroupState.java | 56 ---- .../errors/FencedStateEpochException.java | 28 -- .../errors/InvalidRecordStateException.java | 30 -- .../InvalidShareSessionEpochException.java | 28 -- .../errors/ShareSessionNotFoundException.java | 28 -- .../DefaultChannelMetadataRegistry.java | 4 +- .../apache/kafka/common/protocol/ApiKeys.java | 6 +- .../apache/kafka/common/protocol/Errors.java | 10 +- .../common/requests/AbstractRequest.java | 8 - .../common/requests/AbstractResponse.java | 8 - .../requests/ShareAcknowledgeRequest.java | 127 --------- .../requests/ShareAcknowledgeResponse.java | 148 ---------- .../common/requests/ShareFetchMetadata.java | 121 -------- .../common/requests/ShareFetchRequest.java | 267 ------------------ .../common/requests/ShareFetchResponse.java | 212 -------------- .../requests/ShareGroupDescribeRequest.java | 100 ------- .../requests/ShareGroupDescribeResponse.java | 77 ----- .../requests/ShareGroupHeartbeatRequest.java | 86 ------ .../requests/ShareGroupHeartbeatResponse.java | 71 ----- .../message/FindCoordinatorRequest.json | 4 +- .../message/FindCoordinatorResponse.json | 4 +- .../common/message/ListGroupsRequest.json | 4 +- .../common/message/ListGroupsResponse.json | 4 +- .../message/ShareAcknowledgeRequest.json | 53 ---- .../message/ShareAcknowledgeResponse.json | 72 ----- .../common/message/ShareFetchRequest.json | 67 ----- .../common/message/ShareFetchResponse.json | 83 ------ .../message/ShareGroupDescribeRequest.json | 33 --- .../message/ShareGroupDescribeResponse.json | 87 ------ .../message/ShareGroupHeartbeatRequest.json | 39 --- .../message/ShareGroupHeartbeatResponse.json | 57 ---- .../common/requests/RequestResponseTest.java | 129 --------- .../connect/runtime/rest/RestClient.java | 6 +- .../rest/entities/CreateConnectorRequest.java | 2 +- .../KafkaConfigBackingStoreMockitoTest.java | 157 +--------- .../storage/KafkaConfigBackingStoreTest.java | 170 +++++++++++ .../server/builders/KafkaApisBuilder.java | 2 +- .../builders/ReplicaManagerBuilder.java | 2 +- .../src/main/scala/kafka/log/LogCleaner.scala | 17 +- .../src/main/scala/kafka/log/LogManager.scala | 2 +- .../kafka/network/RequestConvertToJson.scala | 8 - .../kafka/server/BrokerLifecycleManager.scala | 6 +- .../scala/kafka/server/BrokerServer.scala | 10 +- .../scala/kafka/server/ConfigHandler.scala | 2 +- .../ControllerConfigurationValidator.scala | 3 +- .../main/scala/kafka/server/KafkaConfig.scala | 4 +- .../main/scala/kafka/server/KafkaServer.scala | 2 +- .../scala/kafka/server/ReplicaManager.scala | 25 +- .../metadata/BrokerMetadataPublisher.scala | 16 ++ .../main/scala/kafka/zk/AdminZkClient.scala | 4 +- .../log/remote/RemoteLogManagerTest.java | 2 +- .../junit/ClusterTestExtensionsUnitTest.java | 49 +--- .../scala/unit/kafka/log/LogCleanerTest.scala | 24 +- .../scala/unit/kafka/log/LogConfigTest.scala | 26 +- .../kafka/server/ApiVersionsRequestTest.scala | 2 +- .../server/BrokerLifecycleManagerTest.scala | 2 +- .../kafka/server/ReplicaManagerTest.scala | 37 +-- .../unit/kafka/server/RequestQuotaTest.scala | 16 +- .../BrokerMetadataPublisherTest.scala | 101 ++++++- docs/security.html | 36 --- gradle/dependencies.gradle | 2 +- .../group/GroupMetadataManager.java | 28 +- .../consumer/TargetAssignmentBuilder.java | 30 +- .../group/GroupMetadataManagerTest.java | 43 --- .../consumer/TargetAssignmentBuilderTest.java | 50 ++-- .../controller/ClusterControlManager.java | 7 - .../publisher/BrokerRegistrationTracker.java | 136 --------- .../BrokerRegistrationTrackerTest.java | 151 ---------- .../storage/RemoteLogManagerConfig.java | 38 --- ...cBasedRemoteLogMetadataManagerHarness.java | 7 +- ...edRemoteLogMetadataManagerRestartTest.java | 163 ++++++----- .../storage/RemoteLogManagerConfigTest.java | 6 +- .../group/ConsumerGroupCommandTestUtils.java | 29 +- 76 files changed, 528 insertions(+), 2972 deletions(-) delete mode 100644 clients/src/main/java/org/apache/kafka/common/ShareGroupState.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/errors/FencedStateEpochException.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/errors/InvalidRecordStateException.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/errors/InvalidShareSessionEpochException.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/errors/ShareSessionNotFoundException.java rename clients/src/{test => main}/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java (93%) delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareFetchMetadata.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeResponse.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatRequest.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatResponse.java delete mode 100644 clients/src/main/resources/common/message/ShareAcknowledgeRequest.json delete mode 100644 clients/src/main/resources/common/message/ShareAcknowledgeResponse.json delete mode 100644 clients/src/main/resources/common/message/ShareFetchRequest.json delete mode 100644 clients/src/main/resources/common/message/ShareFetchResponse.json delete mode 100644 clients/src/main/resources/common/message/ShareGroupDescribeRequest.json delete mode 100644 clients/src/main/resources/common/message/ShareGroupDescribeResponse.json delete mode 100644 clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json delete mode 100644 clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json delete mode 100644 metadata/src/main/java/org/apache/kafka/image/publisher/BrokerRegistrationTracker.java delete mode 100644 metadata/src/test/java/org/apache/kafka/image/publisher/BrokerRegistrationTrackerTest.java diff --git a/README.md b/README.md index ab7dcd7685bde..27ce0dc0bce64 100644 --- a/README.md +++ b/README.md @@ -227,16 +227,11 @@ There are two code quality analysis tools that we regularly run, spotbugs and ch Checkstyle enforces a consistent coding style in Kafka. You can run checkstyle using: - ./gradlew checkstyleMain checkstyleTest spotlessCheck + ./gradlew checkstyleMain checkstyleTest The checkstyle warnings will be found in `reports/checkstyle/reports/main.html` and `reports/checkstyle/reports/test.html` files in the subproject build directories. They are also printed to the console. The build will fail if Checkstyle fails. -#### Spotless #### -The import order is a part of static check. please call `spotlessApply` to optimize the imports of Java codes before filing pull request : - - ./gradlew spotlessApply - #### Spotbugs #### Spotbugs uses static analysis to look for bugs in the code. You can run spotbugs using: diff --git a/build.gradle b/build.gradle index a2a6531d29a62..ea168ecb26fb4 100644 --- a/build.gradle +++ b/build.gradle @@ -47,9 +47,7 @@ plugins { // Updating the shadow plugin version to 8.1.1 causes issue with signing and publishing the shadowed // artifacts - see https://github.com/johnrengelman/shadow/issues/901 id 'com.github.johnrengelman.shadow' version '8.1.0' apply false - // the minimum required JRE of 6.14.0+ is 11 - // refer:https://github.com/diffplug/spotless/tree/main/plugin-gradle#requirements - id 'com.diffplug.spotless' version "6.13.0" apply false + id 'com.diffplug.spotless' version '6.14.0' apply false // 6.14.1 and newer require Java 11 at compile time, so we can't upgrade until AK 4.0 } ext { @@ -200,9 +198,6 @@ def determineCommitId() { } } -def spotlessApplyModules = [''] - - apply from: file('wrapper.gradle') if (repo != null) { @@ -798,16 +793,6 @@ subprojects { skipProjects = [ ":jmh-benchmarks", ":trogdor" ] skipConfigurations = [ "zinc" ] } - - if (project.name in spotlessApplyModules) { - apply plugin: 'com.diffplug.spotless' - spotless { - java { - importOrder('kafka', 'org.apache.kafka', 'com', 'net', 'org', 'java', 'javax', '', '\\#') - removeUnusedImports() - } - } - } } gradle.taskGraph.whenReady { taskGraph -> diff --git a/checkstyle/checkstyle.xml b/checkstyle/checkstyle.xml index 61eb7e4b245fd..aff659638928b 100644 --- a/checkstyle/checkstyle.xml +++ b/checkstyle/checkstyle.xml @@ -82,8 +82,6 @@ - - diff --git a/clients/src/main/java/org/apache/kafka/common/ShareGroupState.java b/clients/src/main/java/org/apache/kafka/common/ShareGroupState.java deleted file mode 100644 index 716421f3dea2a..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/ShareGroupState.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.common; - -import java.util.Arrays; -import java.util.Locale; -import java.util.Map; -import java.util.function.Function; -import java.util.stream.Collectors; - -/** - * The share group state. - */ -public enum ShareGroupState { - UNKNOWN("Unknown"), - STABLE("Stable"), - DEAD("Dead"), - EMPTY("Empty"); - - private final static Map NAME_TO_ENUM = Arrays.stream(values()) - .collect(Collectors.toMap(state -> state.name.toUpperCase(Locale.ROOT), Function.identity())); - - private final String name; - - ShareGroupState(String name) { - this.name = name; - } - - /** - * Case-insensitive share group state lookup by string name. - */ - public static ShareGroupState parse(String name) { - ShareGroupState state = NAME_TO_ENUM.get(name.toUpperCase(Locale.ROOT)); - return state == null ? UNKNOWN : state; - } - - @Override - public String toString() { - return name; - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/errors/FencedStateEpochException.java b/clients/src/main/java/org/apache/kafka/common/errors/FencedStateEpochException.java deleted file mode 100644 index 1e74bba199402..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/errors/FencedStateEpochException.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.errors; - -/** - * Thrown when the share coordinator rejected the request because the share-group state epoch did not match. - */ -public class FencedStateEpochException extends ApiException { - private static final long serialVersionUID = 1L; - - public FencedStateEpochException(String message) { - super(message); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/errors/InvalidRecordStateException.java b/clients/src/main/java/org/apache/kafka/common/errors/InvalidRecordStateException.java deleted file mode 100644 index ae0fef5edeaef..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/errors/InvalidRecordStateException.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.errors; - -/** - * Thrown when the acknowledgement of delivery of a record could not be completed because the record - * state is invalid. - */ -public class InvalidRecordStateException extends ApiException { - - private static final long serialVersionUID = 1L; - - public InvalidRecordStateException(String message) { - super(message); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/errors/InvalidShareSessionEpochException.java b/clients/src/main/java/org/apache/kafka/common/errors/InvalidShareSessionEpochException.java deleted file mode 100644 index e261d8b7a8e88..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/errors/InvalidShareSessionEpochException.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.errors; - -/** - * Thrown when the share session epoch is invalid. - */ -public class InvalidShareSessionEpochException extends RetriableException { - private static final long serialVersionUID = 1L; - - public InvalidShareSessionEpochException(String message) { - super(message); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/errors/ShareSessionNotFoundException.java b/clients/src/main/java/org/apache/kafka/common/errors/ShareSessionNotFoundException.java deleted file mode 100644 index 2b2249f8a5831..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/errors/ShareSessionNotFoundException.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.errors; - -/** - * Thrown when the share session was not found. - */ -public class ShareSessionNotFoundException extends RetriableException { - private static final long serialVersionUID = 1L; - - public ShareSessionNotFoundException(String message) { - super(message); - } -} diff --git a/clients/src/test/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java b/clients/src/main/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java similarity index 93% rename from clients/src/test/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java rename to clients/src/main/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java index 8985d00410c37..ae9e9a83a0c2c 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java +++ b/clients/src/main/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java @@ -22,7 +22,9 @@ public class DefaultChannelMetadataRegistry implements ChannelMetadataRegistry { @Override public void registerCipherInformation(final CipherInformation cipherInformation) { - this.cipherInformation = cipherInformation; + if (this.cipherInformation != null) { + this.cipherInformation = cipherInformation; + } } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java index ffd5737ca3162..16bec4fb72dc6 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java @@ -118,11 +118,7 @@ public enum ApiKeys { PUSH_TELEMETRY(ApiMessageType.PUSH_TELEMETRY), ASSIGN_REPLICAS_TO_DIRS(ApiMessageType.ASSIGN_REPLICAS_TO_DIRS), LIST_CLIENT_METRICS_RESOURCES(ApiMessageType.LIST_CLIENT_METRICS_RESOURCES), - DESCRIBE_TOPIC_PARTITIONS(ApiMessageType.DESCRIBE_TOPIC_PARTITIONS), - SHARE_GROUP_HEARTBEAT(ApiMessageType.SHARE_GROUP_HEARTBEAT), - SHARE_GROUP_DESCRIBE(ApiMessageType.SHARE_GROUP_DESCRIBE), - SHARE_FETCH(ApiMessageType.SHARE_FETCH), - SHARE_ACKNOWLEDGE(ApiMessageType.SHARE_ACKNOWLEDGE); + DESCRIBE_TOPIC_PARTITIONS(ApiMessageType.DESCRIBE_TOPIC_PARTITIONS); private static final Map> APIS_BY_LISTENER = new EnumMap<>(ApiMessageType.ListenerType.class); diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java index 10ae05aa850c9..900d191c8f9d4 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java @@ -41,7 +41,6 @@ import org.apache.kafka.common.errors.FencedInstanceIdException; import org.apache.kafka.common.errors.FencedLeaderEpochException; import org.apache.kafka.common.errors.FencedMemberEpochException; -import org.apache.kafka.common.errors.FencedStateEpochException; import org.apache.kafka.common.errors.FetchSessionIdNotFoundException; import org.apache.kafka.common.errors.FetchSessionTopicIdException; import org.apache.kafka.common.errors.GroupAuthorizationException; @@ -65,14 +64,12 @@ import org.apache.kafka.common.errors.InvalidPidMappingException; import org.apache.kafka.common.errors.InvalidPrincipalTypeException; import org.apache.kafka.common.errors.InvalidProducerEpochException; -import org.apache.kafka.common.errors.InvalidRecordStateException; import org.apache.kafka.common.errors.InvalidRegistrationException; import org.apache.kafka.common.errors.InvalidReplicaAssignmentException; import org.apache.kafka.common.errors.InvalidReplicationFactorException; import org.apache.kafka.common.errors.InvalidRequestException; import org.apache.kafka.common.errors.InvalidRequiredAcksException; import org.apache.kafka.common.errors.InvalidSessionTimeoutException; -import org.apache.kafka.common.errors.InvalidShareSessionEpochException; import org.apache.kafka.common.errors.InvalidTimestampException; import org.apache.kafka.common.errors.InvalidTopicException; import org.apache.kafka.common.errors.InvalidTxnStateException; @@ -112,7 +109,6 @@ import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.SaslAuthenticationException; import org.apache.kafka.common.errors.SecurityDisabledException; -import org.apache.kafka.common.errors.ShareSessionNotFoundException; import org.apache.kafka.common.errors.SnapshotNotFoundException; import org.apache.kafka.common.errors.StaleBrokerEpochException; import org.apache.kafka.common.errors.StaleMemberEpochException; @@ -398,11 +394,7 @@ public enum Errors { UNKNOWN_SUBSCRIPTION_ID(117, "Client sent a push telemetry request with an invalid or outdated subscription ID.", UnknownSubscriptionIdException::new), TELEMETRY_TOO_LARGE(118, "Client sent a push telemetry request larger than the maximum size the broker will accept.", TelemetryTooLargeException::new), INVALID_REGISTRATION(119, "The controller has considered the broker registration to be invalid.", InvalidRegistrationException::new), - TRANSACTION_ABORTABLE(120, "The server encountered an error with the transaction. The client can abort the transaction to continue using this transactional ID.", TransactionAbortableException::new), - INVALID_RECORD_STATE(121, "The record state is invalid. The acknowledgement of delivery could not be completed.", InvalidRecordStateException::new), - SHARE_SESSION_NOT_FOUND(122, "The share session was not found.", ShareSessionNotFoundException::new), - INVALID_SHARE_SESSION_EPOCH(123, "The share session epoch is invalid.", InvalidShareSessionEpochException::new), - FENCED_STATE_EPOCH(124, "The share coordinator rejected the request because the share-group state epoch did not match.", FencedStateEpochException::new); + TRANSACTION_ABORTABLE(120, "The server encountered an error with the transaction. The client can abort the transaction to continue using this transactional ID.", TransactionAbortableException::new); private static final Logger log = LoggerFactory.getLogger(Errors.class); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java index 589e163992b22..b51221f5af642 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java @@ -326,14 +326,6 @@ private static AbstractRequest doParseRequest(ApiKeys apiKey, short apiVersion, return ListClientMetricsResourcesRequest.parse(buffer, apiVersion); case DESCRIBE_TOPIC_PARTITIONS: return DescribeTopicPartitionsRequest.parse(buffer, apiVersion); - case SHARE_GROUP_HEARTBEAT: - return ShareGroupHeartbeatRequest.parse(buffer, apiVersion); - case SHARE_GROUP_DESCRIBE: - return ShareGroupDescribeRequest.parse(buffer, apiVersion); - case SHARE_FETCH: - return ShareFetchRequest.parse(buffer, apiVersion); - case SHARE_ACKNOWLEDGE: - return ShareAcknowledgeRequest.parse(buffer, apiVersion); default: throw new AssertionError(String.format("ApiKey %s is not currently handled in `parseRequest`, the " + "code should be updated to do so.", apiKey)); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java index 5534168098e9d..dbafdbf3bcb07 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java @@ -263,14 +263,6 @@ public static AbstractResponse parseResponse(ApiKeys apiKey, ByteBuffer response return ListClientMetricsResourcesResponse.parse(responseBuffer, version); case DESCRIBE_TOPIC_PARTITIONS: return DescribeTopicPartitionsResponse.parse(responseBuffer, version); - case SHARE_GROUP_HEARTBEAT: - return ShareGroupHeartbeatResponse.parse(responseBuffer, version); - case SHARE_GROUP_DESCRIBE: - return ShareGroupDescribeResponse.parse(responseBuffer, version); - case SHARE_FETCH: - return ShareFetchResponse.parse(responseBuffer, version); - case SHARE_ACKNOWLEDGE: - return ShareAcknowledgeResponse.parse(responseBuffer, version); default: throw new AssertionError(String.format("ApiKey %s is not currently handled in `parseResponse`, the " + "code should be updated to do so.", apiKey)); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java deleted file mode 100644 index 1b77b43be33c1..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.message.ShareAcknowledgeRequestData; -import org.apache.kafka.common.message.ShareAcknowledgeResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -public class ShareAcknowledgeRequest extends AbstractRequest { - - public static class Builder extends AbstractRequest.Builder { - - private final ShareAcknowledgeRequestData data; - - public Builder(ShareAcknowledgeRequestData data) { - this(data, false); - } - - public Builder(ShareAcknowledgeRequestData data, boolean enableUnstableLastVersion) { - super(ApiKeys.SHARE_ACKNOWLEDGE, enableUnstableLastVersion); - this.data = data; - } - - public static ShareAcknowledgeRequest.Builder forConsumer(String groupId, ShareFetchMetadata metadata, - Map> acknowledgementsMap) { - ShareAcknowledgeRequestData data = new ShareAcknowledgeRequestData(); - data.setGroupId(groupId); - if (metadata != null) { - data.setMemberId(metadata.memberId().toString()); - data.setShareSessionEpoch(metadata.epoch()); - } - - // Build a map of topics to acknowledge keyed by topic ID, and within each a map of partitions keyed by index - Map> ackMap = new HashMap<>(); - - for (Map.Entry> acknowledgeEntry : acknowledgementsMap.entrySet()) { - TopicIdPartition tip = acknowledgeEntry.getKey(); - Map partMap = ackMap.computeIfAbsent(tip.topicId(), k -> new HashMap<>()); - ShareAcknowledgeRequestData.AcknowledgePartition ackPartition = partMap.get(tip.partition()); - if (ackPartition == null) { - ackPartition = new ShareAcknowledgeRequestData.AcknowledgePartition() - .setPartitionIndex(tip.partition()); - partMap.put(tip.partition(), ackPartition); - } - ackPartition.setAcknowledgementBatches(acknowledgeEntry.getValue()); - } - - // Finally, build up the data to fetch - data.setTopics(new ArrayList<>()); - ackMap.forEach((topicId, partMap) -> { - ShareAcknowledgeRequestData.AcknowledgeTopic ackTopic = new ShareAcknowledgeRequestData.AcknowledgeTopic() - .setTopicId(topicId) - .setPartitions(new ArrayList<>()); - data.topics().add(ackTopic); - - partMap.forEach((index, ackPartition) -> ackTopic.partitions().add(ackPartition)); - }); - - return new ShareAcknowledgeRequest.Builder(data, true); - } - - public ShareAcknowledgeRequestData data() { - return data; - } - - @Override - public ShareAcknowledgeRequest build(short version) { - return new ShareAcknowledgeRequest(data, version); - } - - @Override - public String toString() { - return data.toString(); - } - } - - private final ShareAcknowledgeRequestData data; - - public ShareAcknowledgeRequest(ShareAcknowledgeRequestData data, short version) { - super(ApiKeys.SHARE_ACKNOWLEDGE, version); - this.data = data; - } - - @Override - public ShareAcknowledgeRequestData data() { - return data; - } - - @Override - public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { - Errors error = Errors.forException(e); - return new ShareAcknowledgeResponse(new ShareAcknowledgeResponseData() - .setThrottleTimeMs(throttleTimeMs) - .setErrorCode(error.code())); - } - - public static ShareAcknowledgeRequest parse(ByteBuffer buffer, short version) { - return new ShareAcknowledgeRequest( - new ShareAcknowledgeRequestData(new ByteBufferAccessor(buffer), version), - version - ); - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java deleted file mode 100644 index 5cab233dccac8..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.Node; -import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.message.ShareAcknowledgeResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -/** - * Possible error codes. - * - {@link Errors#GROUP_AUTHORIZATION_FAILED} - * - {@link Errors#TOPIC_AUTHORIZATION_FAILED} - * - {@link Errors#UNKNOWN_TOPIC_OR_PARTITION} - * - {@link Errors#NOT_LEADER_OR_FOLLOWER} - * - {@link Errors#UNKNOWN_TOPIC_ID} - * - {@link Errors#INVALID_RECORD_STATE} - * - {@link Errors#KAFKA_STORAGE_ERROR} - * - {@link Errors#INVALID_REQUEST} - * - {@link Errors#UNKNOWN_SERVER_ERROR} - */ -public class ShareAcknowledgeResponse extends AbstractResponse { - - private final ShareAcknowledgeResponseData data; - - public ShareAcknowledgeResponse(ShareAcknowledgeResponseData data) { - super(ApiKeys.SHARE_ACKNOWLEDGE); - this.data = data; - } - - public Errors error() { - return Errors.forCode(data.errorCode()); - } - - @Override - public ShareAcknowledgeResponseData data() { - return data; - } - - @Override - public Map errorCounts() { - HashMap counts = new HashMap<>(); - updateErrorCounts(counts, Errors.forCode(data.errorCode())); - data.responses().forEach( - topic -> topic.partitions().forEach( - partition -> updateErrorCounts(counts, Errors.forCode(partition.errorCode())) - ) - ); - return counts; - } - - @Override - public int throttleTimeMs() { - return data.throttleTimeMs(); - } - - @Override - public void maybeSetThrottleTimeMs(int throttleTimeMs) { - data.setThrottleTimeMs(throttleTimeMs); - } - - public static ShareAcknowledgeResponse parse(ByteBuffer buffer, short version) { - return new ShareAcknowledgeResponse( - new ShareAcknowledgeResponseData(new ByteBufferAccessor(buffer), version) - ); - } - - private static boolean matchingTopic(ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse previousTopic, TopicIdPartition currentTopic) { - if (previousTopic == null) - return false; - return previousTopic.topicId().equals(currentTopic.topicId()); - } - - public static ShareAcknowledgeResponseData.PartitionData partitionResponse(TopicIdPartition topicIdPartition, Errors error) { - return partitionResponse(topicIdPartition.topicPartition().partition(), error); - } - - public static ShareAcknowledgeResponseData.PartitionData partitionResponse(int partition, Errors error) { - return new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(partition) - .setErrorCode(error.code()); - } - - public static ShareAcknowledgeResponse of(Errors error, - int throttleTimeMs, - LinkedHashMap responseData, - List nodeEndpoints) { - return new ShareAcknowledgeResponse(toMessage(error, throttleTimeMs, responseData.entrySet().iterator(), nodeEndpoints)); - } - - public static ShareAcknowledgeResponseData toMessage(Errors error, int throttleTimeMs, - Iterator> partIterator, - List nodeEndpoints) { - Map topicResponseList = new LinkedHashMap<>(); - while (partIterator.hasNext()) { - Map.Entry entry = partIterator.next(); - ShareAcknowledgeResponseData.PartitionData partitionData = entry.getValue(); - // Since PartitionData alone doesn't know the partition ID, we set it here - partitionData.setPartitionIndex(entry.getKey().topicPartition().partition()); - // Checking if the topic is already present in the map - if (topicResponseList.containsKey(entry.getKey().topicId())) { - topicResponseList.get(entry.getKey().topicId()).partitions().add(partitionData); - } else { - List partitionResponses = new ArrayList<>(); - partitionResponses.add(partitionData); - topicResponseList.put(entry.getKey().topicId(), new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse() - .setTopicId(entry.getKey().topicId()) - .setPartitions(partitionResponses)); - } - } - ShareAcknowledgeResponseData data = new ShareAcknowledgeResponseData(); - // KafkaApis should only pass in node endpoints on error, otherwise this should be an empty list - nodeEndpoints.forEach(endpoint -> data.nodeEndpoints().add( - new ShareAcknowledgeResponseData.NodeEndpoint() - .setNodeId(endpoint.id()) - .setHost(endpoint.host()) - .setPort(endpoint.port()) - .setRack(endpoint.rack()))); - return data.setThrottleTimeMs(throttleTimeMs) - .setErrorCode(error.code()) - .setResponses(new ArrayList<>(topicResponseList.values())); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchMetadata.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchMetadata.java deleted file mode 100644 index 4e5bcc2237e43..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchMetadata.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.Uuid; - -public class ShareFetchMetadata { - /** - * The first epoch. When used in a ShareFetch request, indicates that the client - * wants to create a session. - */ - public static final int INITIAL_EPOCH = 0; - - /** - * An invalid epoch. When used in a ShareFetch request, indicates that the client - * wants to close an existing session. - */ - public static final int FINAL_EPOCH = -1; - - /** - * - */ - public boolean isNewSession() { - return epoch == INITIAL_EPOCH; - } - - /** - * Returns true if this is a full share fetch request. - */ - public boolean isFull() { - return (this.epoch == INITIAL_EPOCH) || (this.epoch == FINAL_EPOCH); - } - - /** - * Returns the next epoch. - * - * @param prevEpoch The previous epoch. - * @return The next epoch. - */ - public static int nextEpoch(int prevEpoch) { - if (prevEpoch < 0) { - // The next epoch after FINAL_EPOCH is always FINAL_EPOCH itself. - return FINAL_EPOCH; - } else if (prevEpoch == Integer.MAX_VALUE) { - return 1; - } else { - return prevEpoch + 1; - } - } - - /** - * The member ID. - */ - private final Uuid memberId; - - /** - * The share session epoch. - */ - private final int epoch; - - public ShareFetchMetadata(Uuid memberId, int epoch) { - this.memberId = memberId; - this.epoch = epoch; - } - - public static ShareFetchMetadata initialEpoch(Uuid memberId) { - return new ShareFetchMetadata(memberId, INITIAL_EPOCH); - } - - public ShareFetchMetadata nextEpoch() { - return new ShareFetchMetadata(memberId, nextEpoch(epoch)); - } - - public ShareFetchMetadata nextCloseExistingAttemptNew() { - return new ShareFetchMetadata(memberId, INITIAL_EPOCH); - } - - public ShareFetchMetadata finalEpoch() { - return new ShareFetchMetadata(memberId, FINAL_EPOCH); - } - - public Uuid memberId() { - return memberId; - } - - public int epoch() { - return epoch; - } - - public boolean isFinalEpoch() { - return epoch == FINAL_EPOCH; - } - - public String toString() { - StringBuilder bld = new StringBuilder(); - bld.append("(memberId=").append(memberId).append(", "); - if (epoch == INITIAL_EPOCH) { - bld.append("epoch=INITIAL)"); - } else if (epoch == FINAL_EPOCH) { - bld.append("epoch=FINAL)"); - } else { - bld.append("epoch=").append(epoch).append(")"); - } - return bld.toString(); - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java deleted file mode 100644 index 385e802a691a9..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.message.ShareFetchRequestData; -import org.apache.kafka.common.message.ShareFetchResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; - -public class ShareFetchRequest extends AbstractRequest { - - public static class Builder extends AbstractRequest.Builder { - - private final ShareFetchRequestData data; - - public Builder(ShareFetchRequestData data) { - this(data, false); - } - - public Builder(ShareFetchRequestData data, boolean enableUnstableLastVersion) { - super(ApiKeys.SHARE_FETCH, enableUnstableLastVersion); - this.data = data; - } - - public static Builder forConsumer(String groupId, ShareFetchMetadata metadata, - int maxWait, int minBytes, int maxBytes, int fetchSize, - List send, List forget, - Map> acknowledgementsMap) { - ShareFetchRequestData data = new ShareFetchRequestData(); - data.setGroupId(groupId); - int ackOnlyPartitionMaxBytes = fetchSize; - boolean isClosingShareSession = false; - if (metadata != null) { - data.setMemberId(metadata.memberId().toString()); - data.setShareSessionEpoch(metadata.epoch()); - if (metadata.isFinalEpoch()) { - isClosingShareSession = true; - ackOnlyPartitionMaxBytes = 0; - } - } - data.setMaxWaitMs(maxWait); - data.setMinBytes(minBytes); - data.setMaxBytes(maxBytes); - - // Build a map of topics to fetch keyed by topic ID, and within each a map of partitions keyed by index - Map> fetchMap = new HashMap<>(); - - // First, start by adding the list of topic-partitions we are fetching - if (!isClosingShareSession) { - for (TopicIdPartition tip : send) { - Map partMap = fetchMap.computeIfAbsent(tip.topicId(), k -> new HashMap<>()); - ShareFetchRequestData.FetchPartition fetchPartition = new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(tip.partition()) - .setPartitionMaxBytes(fetchSize); - partMap.put(tip.partition(), fetchPartition); - } - } - - // Next, add acknowledgements that we are piggybacking onto the fetch. Generally, the list of - // topic-partitions will be a subset, but if the assignment changes, there might be new entries to add - for (Map.Entry> acknowledgeEntry : acknowledgementsMap.entrySet()) { - TopicIdPartition tip = acknowledgeEntry.getKey(); - Map partMap = fetchMap.computeIfAbsent(tip.topicId(), k -> new HashMap<>()); - ShareFetchRequestData.FetchPartition fetchPartition = partMap.get(tip.partition()); - if (fetchPartition == null) { - fetchPartition = new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(tip.partition()) - .setPartitionMaxBytes(ackOnlyPartitionMaxBytes); - partMap.put(tip.partition(), fetchPartition); - } - fetchPartition.setAcknowledgementBatches(acknowledgeEntry.getValue()); - } - - // Build up the data to fetch - if (!fetchMap.isEmpty()) { - data.setTopics(new ArrayList<>()); - fetchMap.forEach((topicId, partMap) -> { - ShareFetchRequestData.FetchTopic fetchTopic = new ShareFetchRequestData.FetchTopic() - .setTopicId(topicId) - .setPartitions(new ArrayList<>()); - partMap.forEach((index, fetchPartition) -> fetchTopic.partitions().add(fetchPartition)); - data.topics().add(fetchTopic); - }); - } - - // And finally, forget the topic-partitions that are no longer in the session - if (!forget.isEmpty()) { - Map> forgetMap = new HashMap<>(); - for (TopicIdPartition tip : forget) { - List partList = forgetMap.computeIfAbsent(tip.topicId(), k -> new ArrayList<>()); - partList.add(tip.partition()); - } - data.setForgottenTopicsData(new ArrayList<>()); - forgetMap.forEach((topicId, partList) -> { - ShareFetchRequestData.ForgottenTopic forgetTopic = new ShareFetchRequestData.ForgottenTopic() - .setTopicId(topicId) - .setPartitions(new ArrayList<>()); - partList.forEach(index -> forgetTopic.partitions().add(index)); - data.forgottenTopicsData().add(forgetTopic); - }); - } - - return new Builder(data, true); - } - - public ShareFetchRequestData data() { - return data; - } - - @Override - public ShareFetchRequest build(short version) { - return new ShareFetchRequest(data, version); - } - - @Override - public String toString() { - return data.toString(); - } - } - - private final ShareFetchRequestData data; - private volatile LinkedHashMap shareFetchData = null; - private volatile List toForget = null; - - public ShareFetchRequest(ShareFetchRequestData data, short version) { - super(ApiKeys.SHARE_FETCH, version); - this.data = data; - } - - @Override - public ShareFetchRequestData data() { - return data; - } - - @Override - public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { - Errors error = Errors.forException(e); - return new ShareFetchResponse(new ShareFetchResponseData() - .setThrottleTimeMs(throttleTimeMs) - .setErrorCode(error.code())); - } - - public static ShareFetchRequest parse(ByteBuffer buffer, short version) { - return new ShareFetchRequest( - new ShareFetchRequestData(new ByteBufferAccessor(buffer), version), - version - ); - } - - public static final class SharePartitionData { - public final Uuid topicId; - public final int maxBytes; - - public SharePartitionData( - Uuid topicId, - int maxBytes - ) { - this.topicId = topicId; - this.maxBytes = maxBytes; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ShareFetchRequest.SharePartitionData that = (ShareFetchRequest.SharePartitionData) o; - return Objects.equals(topicId, that.topicId) && - maxBytes == that.maxBytes; - } - - @Override - public int hashCode() { - return Objects.hash(topicId, maxBytes); - } - - @Override - public String toString() { - return "SharePartitionData(" + - "topicId=" + topicId + - ", maxBytes=" + maxBytes + - ')'; - } - } - - public int minBytes() { - return data.minBytes(); - } - - public int maxBytes() { - return data.maxBytes(); - } - - public int maxWait() { - return data.maxWaitMs(); - } - - public Map shareFetchData(Map topicNames) { - if (shareFetchData == null) { - synchronized (this) { - if (shareFetchData == null) { - // Assigning the lazy-initialized `shareFetchData` in the last step - // to avoid other threads accessing a half-initialized object. - final LinkedHashMap shareFetchDataTmp = new LinkedHashMap<>(); - data.topics().forEach(shareFetchTopic -> { - String name = topicNames.get(shareFetchTopic.topicId()); - shareFetchTopic.partitions().forEach(shareFetchPartition -> { - // Topic name may be null here if the topic name was unable to be resolved using the topicNames map. - shareFetchDataTmp.put(new TopicIdPartition(shareFetchTopic.topicId(), new TopicPartition(name, shareFetchPartition.partitionIndex())), - new ShareFetchRequest.SharePartitionData( - shareFetchTopic.topicId(), - shareFetchPartition.partitionMaxBytes() - ) - ); - }); - }); - shareFetchData = shareFetchDataTmp; - } - } - } - return shareFetchData; - } - - public List forgottenTopics(Map topicNames) { - if (toForget == null) { - synchronized (this) { - if (toForget == null) { - // Assigning the lazy-initialized `toForget` in the last step - // to avoid other threads accessing a half-initialized object. - final List toForgetTmp = new ArrayList<>(); - data.forgottenTopicsData().forEach(forgottenTopic -> { - String name = topicNames.get(forgottenTopic.topicId()); - // Topic name may be null here if the topic name was unable to be resolved using the topicNames map. - forgottenTopic.partitions().forEach(partitionId -> toForgetTmp.add(new TopicIdPartition(forgottenTopic.topicId(), new TopicPartition(name, partitionId)))); - }); - toForget = toForgetTmp; - } - } - } - return toForget; - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java deleted file mode 100644 index b33969e0efa41..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.Node; -import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.message.ShareFetchResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.ObjectSerializationCache; -import org.apache.kafka.common.record.MemoryRecords; -import org.apache.kafka.common.record.Records; - -import java.nio.ByteBuffer; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.Iterator; -import java.util.Collections; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; - - -/** - * Possible error codes. - * - {@link Errors#GROUP_AUTHORIZATION_FAILED} - * - {@link Errors#TOPIC_AUTHORIZATION_FAILED} - * - {@link Errors#UNKNOWN_TOPIC_OR_PARTITION} - * - {@link Errors#NOT_LEADER_OR_FOLLOWER} - * - {@link Errors#UNKNOWN_TOPIC_ID} - * - {@link Errors#INVALID_RECORD_STATE} - * - {@link Errors#KAFKA_STORAGE_ERROR} - * - {@link Errors#CORRUPT_MESSAGE} - * - {@link Errors#INVALID_REQUEST} - * - {@link Errors#UNKNOWN_SERVER_ERROR} - */ -public class ShareFetchResponse extends AbstractResponse { - - private final ShareFetchResponseData data; - - private volatile LinkedHashMap responseData = null; - - public ShareFetchResponse(ShareFetchResponseData data) { - super(ApiKeys.SHARE_FETCH); - this.data = data; - } - - public Errors error() { - return Errors.forCode(data.errorCode()); - } - - @Override - public ShareFetchResponseData data() { - return data; - } - - @Override - public Map errorCounts() { - HashMap counts = new HashMap<>(); - updateErrorCounts(counts, Errors.forCode(data.errorCode())); - data.responses().forEach( - topic -> topic.partitions().forEach( - partition -> updateErrorCounts(counts, Errors.forCode(partition.errorCode())) - ) - ); - return counts; - } - - public LinkedHashMap responseData(Map topicNames) { - if (responseData == null) { - synchronized (this) { - // Assigning the lazy-initialized `responseData` in the last step - // to avoid other threads accessing a half-initialized object. - if (responseData == null) { - final LinkedHashMap responseDataTmp = new LinkedHashMap<>(); - data.responses().forEach(topicResponse -> { - String name = topicNames.get(topicResponse.topicId()); - if (name != null) { - topicResponse.partitions().forEach(partitionData -> responseDataTmp.put(new TopicIdPartition(topicResponse.topicId(), - new TopicPartition(name, partitionData.partitionIndex())), partitionData)); - } - }); - responseData = responseDataTmp; - } - } - } - return responseData; - } - - @Override - public int throttleTimeMs() { - return data.throttleTimeMs(); - } - - @Override - public void maybeSetThrottleTimeMs(int throttleTimeMs) { - data.setThrottleTimeMs(throttleTimeMs); - } - - public static ShareFetchResponse parse(ByteBuffer buffer, short version) { - return new ShareFetchResponse( - new ShareFetchResponseData(new ByteBufferAccessor(buffer), version) - ); - } - - /** - * Returns `partition.records` as `Records` (instead of `BaseRecords`). If `records` is `null`, returns `MemoryRecords.EMPTY`. - * - *

If this response was deserialized after a share fetch, this method should never fail. An example where this would - * fail is a down-converted response (e.g. LazyDownConversionRecords) on the broker (before it's serialized and - * sent on the wire). - * - * @param partition partition data - * @return Records or empty record if the records in PartitionData is null. - */ - public static Records recordsOrFail(ShareFetchResponseData.PartitionData partition) { - if (partition.records() == null) return MemoryRecords.EMPTY; - if (partition.records() instanceof Records) return (Records) partition.records(); - throw new ClassCastException("The record type is " + partition.records().getClass().getSimpleName() + ", which is not a subtype of " + - Records.class.getSimpleName() + ". This method is only safe to call if the `ShareFetchResponse` was deserialized from bytes."); - } - - /** - * Convenience method to find the size of a response. - * - * @param version The version of the request - * @param partIterator The partition iterator. - * @return The response size in bytes. - */ - public static int sizeOf(short version, - Iterator> partIterator) { - // Since the throttleTimeMs and metadata field sizes are constant and fixed, we can - // use arbitrary values here without affecting the result. - ShareFetchResponseData data = toMessage(Errors.NONE, 0, partIterator, Collections.emptyList()); - ObjectSerializationCache cache = new ObjectSerializationCache(); - return 4 + data.size(cache, version); - } - - /** - * @return The size in bytes of the records. 0 is returned if records of input partition is null. - */ - public static int recordsSize(ShareFetchResponseData.PartitionData partition) { - return partition.records() == null ? 0 : partition.records().sizeInBytes(); - } - - public static ShareFetchResponse of(Errors error, - int throttleTimeMs, - LinkedHashMap responseData, - List nodeEndpoints) { - return new ShareFetchResponse(toMessage(error, throttleTimeMs, responseData.entrySet().iterator(), nodeEndpoints)); - } - - public static ShareFetchResponseData toMessage(Errors error, int throttleTimeMs, - Iterator> partIterator, - List nodeEndpoints) { - Map topicResponseList = new LinkedHashMap<>(); - while (partIterator.hasNext()) { - Map.Entry entry = partIterator.next(); - ShareFetchResponseData.PartitionData partitionData = entry.getValue(); - // Since PartitionData alone doesn't know the partition ID, we set it here - partitionData.setPartitionIndex(entry.getKey().topicPartition().partition()); - // Checking if the topic is already present in the map - if (topicResponseList.containsKey(entry.getKey().topicId())) { - topicResponseList.get(entry.getKey().topicId()).partitions().add(partitionData); - } else { - List partitionResponses = new ArrayList<>(); - partitionResponses.add(partitionData); - topicResponseList.put(entry.getKey().topicId(), new ShareFetchResponseData.ShareFetchableTopicResponse() - .setTopicId(entry.getKey().topicId()) - .setPartitions(partitionResponses)); - } - } - ShareFetchResponseData data = new ShareFetchResponseData(); - // KafkaApis should only pass in node endpoints on error, otherwise this should be an empty list - nodeEndpoints.forEach(endpoint -> data.nodeEndpoints().add( - new ShareFetchResponseData.NodeEndpoint() - .setNodeId(endpoint.id()) - .setHost(endpoint.host()) - .setPort(endpoint.port()) - .setRack(endpoint.rack()))); - return data.setThrottleTimeMs(throttleTimeMs) - .setErrorCode(error.code()) - .setResponses(new ArrayList<>(topicResponseList.values())); - } - - public static ShareFetchResponseData.PartitionData partitionResponse(TopicIdPartition topicIdPartition, Errors error) { - return partitionResponse(topicIdPartition.topicPartition().partition(), error); - } - - public static ShareFetchResponseData.PartitionData partitionResponse(int partition, Errors error) { - return new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) - .setErrorCode(error.code()); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java deleted file mode 100644 index 25c02e4a83c5e..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.message.ShareGroupDescribeRequestData; -import org.apache.kafka.common.message.ShareGroupDescribeResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; - -import java.nio.ByteBuffer; -import java.util.List; -import java.util.stream.Collectors; - -public class ShareGroupDescribeRequest extends AbstractRequest { - - public static class Builder extends AbstractRequest.Builder { - - private final ShareGroupDescribeRequestData data; - - public Builder(ShareGroupDescribeRequestData data) { - this(data, false); - } - - public Builder(ShareGroupDescribeRequestData data, boolean enableUnstableLastVersion) { - super(ApiKeys.SHARE_GROUP_DESCRIBE, enableUnstableLastVersion); - this.data = data; - } - - @Override - public ShareGroupDescribeRequest build(short version) { - return new ShareGroupDescribeRequest(data, version); - } - - @Override - public String toString() { - return data.toString(); - } - } - - private final ShareGroupDescribeRequestData data; - - public ShareGroupDescribeRequest(ShareGroupDescribeRequestData data, short version) { - super(ApiKeys.SHARE_GROUP_DESCRIBE, version); - this.data = data; - } - - @Override - public ShareGroupDescribeResponse getErrorResponse(int throttleTimeMs, Throwable e) { - ShareGroupDescribeResponseData data = new ShareGroupDescribeResponseData() - .setThrottleTimeMs(throttleTimeMs); - // Set error for each group - short errorCode = Errors.forException(e).code(); - this.data.groupIds().forEach( - groupId -> data.groups().add( - new ShareGroupDescribeResponseData.DescribedGroup() - .setGroupId(groupId) - .setErrorCode(errorCode) - ) - ); - return new ShareGroupDescribeResponse(data); - } - - @Override - public ShareGroupDescribeRequestData data() { - return data; - } - - public static ShareGroupDescribeRequest parse(ByteBuffer buffer, short version) { - return new ShareGroupDescribeRequest( - new ShareGroupDescribeRequestData(new ByteBufferAccessor(buffer), version), - version - ); - } - - public static List getErrorDescribedGroupList( - List groupIds, - Errors error - ) { - return groupIds.stream() - .map(groupId -> new ShareGroupDescribeResponseData.DescribedGroup() - .setGroupId(groupId) - .setErrorCode(error.code()) - ).collect(Collectors.toList()); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeResponse.java deleted file mode 100644 index 95dd371eedfa7..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeResponse.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.message.ShareGroupDescribeResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; - -import java.nio.ByteBuffer; -import java.util.HashMap; -import java.util.Map; - -/** - * Possible error codes. - * - * - {@link Errors#GROUP_AUTHORIZATION_FAILED} - * - {@link Errors#NOT_COORDINATOR} - * - {@link Errors#COORDINATOR_NOT_AVAILABLE} - * - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS} - * - {@link Errors#INVALID_REQUEST} - * - {@link Errors#INVALID_GROUP_ID} - * - {@link Errors#GROUP_ID_NOT_FOUND} - */ -public class ShareGroupDescribeResponse extends AbstractResponse { - - private final ShareGroupDescribeResponseData data; - - public ShareGroupDescribeResponse(ShareGroupDescribeResponseData data) { - super(ApiKeys.SHARE_GROUP_DESCRIBE); - this.data = data; - } - - @Override - public ShareGroupDescribeResponseData data() { - return data; - } - - @Override - public Map errorCounts() { - HashMap counts = new HashMap<>(); - data.groups().forEach( - group -> updateErrorCounts(counts, Errors.forCode(group.errorCode())) - ); - return counts; - } - - @Override - public int throttleTimeMs() { - return data.throttleTimeMs(); - } - - @Override - public void maybeSetThrottleTimeMs(int throttleTimeMs) { - data.setThrottleTimeMs(throttleTimeMs); - } - - public static ShareGroupDescribeResponse parse(ByteBuffer buffer, short version) { - return new ShareGroupDescribeResponse( - new ShareGroupDescribeResponseData(new ByteBufferAccessor(buffer), version) - ); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatRequest.java deleted file mode 100644 index 7e112ef29dd14..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatRequest.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import java.nio.ByteBuffer; - -import org.apache.kafka.common.message.ShareGroupHeartbeatRequestData; -import org.apache.kafka.common.message.ShareGroupHeartbeatResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; - -public class ShareGroupHeartbeatRequest extends AbstractRequest { - /** - * A member epoch of -1 means that the member wants to leave the group. - */ - public static final int LEAVE_GROUP_MEMBER_EPOCH = -1; - - /** - * A member epoch of 0 means that the member wants to join the group. - */ - public static final int JOIN_GROUP_MEMBER_EPOCH = 0; - - public static class Builder extends AbstractRequest.Builder { - private final ShareGroupHeartbeatRequestData data; - - public Builder(ShareGroupHeartbeatRequestData data) { - this(data, true); - } - - public Builder(ShareGroupHeartbeatRequestData data, boolean enableUnstableLastVersion) { - super(ApiKeys.SHARE_GROUP_HEARTBEAT, enableUnstableLastVersion); - this.data = data; - } - - @Override - public ShareGroupHeartbeatRequest build(short version) { - return new ShareGroupHeartbeatRequest(data, version); - } - - @Override - public String toString() { - return data.toString(); - } - } - - private final ShareGroupHeartbeatRequestData data; - - public ShareGroupHeartbeatRequest(ShareGroupHeartbeatRequestData data, short version) { - super(ApiKeys.SHARE_GROUP_HEARTBEAT, version); - this.data = data; - } - - @Override - public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { - return new ShareGroupHeartbeatResponse( - new ShareGroupHeartbeatResponseData() - .setThrottleTimeMs(throttleTimeMs) - .setErrorCode(Errors.forException(e).code()) - ); - } - - @Override - public ShareGroupHeartbeatRequestData data() { - return data; - } - - public static ShareGroupHeartbeatRequest parse(ByteBuffer buffer, short version) { - return new ShareGroupHeartbeatRequest(new ShareGroupHeartbeatRequestData( - new ByteBufferAccessor(buffer), version), version); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatResponse.java deleted file mode 100644 index de05d44aebecb..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatResponse.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.message.ShareGroupHeartbeatResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; - -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.Map; - -/** - * Possible error codes. - * - * - {@link Errors#GROUP_AUTHORIZATION_FAILED} - * - {@link Errors#NOT_COORDINATOR} - * - {@link Errors#COORDINATOR_NOT_AVAILABLE} - * - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS} - * - {@link Errors#INVALID_REQUEST} - * - {@link Errors#UNKNOWN_MEMBER_ID} - * - {@link Errors#GROUP_MAX_SIZE_REACHED} - */ -public class ShareGroupHeartbeatResponse extends AbstractResponse { - private final ShareGroupHeartbeatResponseData data; - - public ShareGroupHeartbeatResponse(ShareGroupHeartbeatResponseData data) { - super(ApiKeys.SHARE_GROUP_HEARTBEAT); - this.data = data; - } - - @Override - public ShareGroupHeartbeatResponseData data() { - return data; - } - - @Override - public Map errorCounts() { - return Collections.singletonMap(Errors.forCode(data.errorCode()), 1); - } - - @Override - public int throttleTimeMs() { - return data.throttleTimeMs(); - } - - @Override - public void maybeSetThrottleTimeMs(int throttleTimeMs) { - data.setThrottleTimeMs(throttleTimeMs); - } - - public static ShareGroupHeartbeatResponse parse(ByteBuffer buffer, short version) { - return new ShareGroupHeartbeatResponse(new ShareGroupHeartbeatResponseData( - new ByteBufferAccessor(buffer), version)); - } -} diff --git a/clients/src/main/resources/common/message/FindCoordinatorRequest.json b/clients/src/main/resources/common/message/FindCoordinatorRequest.json index 43e6fe5014b26..42b2f4c891ad5 100644 --- a/clients/src/main/resources/common/message/FindCoordinatorRequest.json +++ b/clients/src/main/resources/common/message/FindCoordinatorRequest.json @@ -27,9 +27,7 @@ // Version 4 adds support for batching via CoordinatorKeys (KIP-699) // // Version 5 adds support for new error code TRANSACTION_ABORTABLE (KIP-890). - // - // Version 6 adds support for share groups (KIP-932). - "validVersions": "0-6", + "validVersions": "0-5", "deprecatedVersions": "0", "flexibleVersions": "3+", "fields": [ diff --git a/clients/src/main/resources/common/message/FindCoordinatorResponse.json b/clients/src/main/resources/common/message/FindCoordinatorResponse.json index be0479f908c96..860d655a252b2 100644 --- a/clients/src/main/resources/common/message/FindCoordinatorResponse.json +++ b/clients/src/main/resources/common/message/FindCoordinatorResponse.json @@ -26,9 +26,7 @@ // Version 4 adds support for batching via Coordinators (KIP-699) // // Version 5 adds support for new error code TRANSACTION_ABORTABLE (KIP-890). - // - // Version 6 adds support for share groups (KIP-932). - "validVersions": "0-6", + "validVersions": "0-5", "flexibleVersions": "3+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "1+", "ignorable": true, diff --git a/clients/src/main/resources/common/message/ListGroupsRequest.json b/clients/src/main/resources/common/message/ListGroupsRequest.json index a872165d516cf..32defaa203382 100644 --- a/clients/src/main/resources/common/message/ListGroupsRequest.json +++ b/clients/src/main/resources/common/message/ListGroupsRequest.json @@ -25,9 +25,7 @@ // Version 4 adds the StatesFilter field (KIP-518). // // Version 5 adds the TypesFilter field (KIP-848). - // - // Version 6 adds support for share groups (KIP-932). - "validVersions": "0-6", + "validVersions": "0-5", "flexibleVersions": "3+", "fields": [ { "name": "StatesFilter", "type": "[]string", "versions": "4+", diff --git a/clients/src/main/resources/common/message/ListGroupsResponse.json b/clients/src/main/resources/common/message/ListGroupsResponse.json index 77f1c89e34a38..fc4077c080f46 100644 --- a/clients/src/main/resources/common/message/ListGroupsResponse.json +++ b/clients/src/main/resources/common/message/ListGroupsResponse.json @@ -27,9 +27,7 @@ // Version 4 adds the GroupState field (KIP-518). // // Version 5 adds the GroupType field (KIP-848). - // - // Version 6 adds support for share groups (KIP-932). - "validVersions": "0-6", + "validVersions": "0-5", "flexibleVersions": "3+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "1+", "ignorable": true, diff --git a/clients/src/main/resources/common/message/ShareAcknowledgeRequest.json b/clients/src/main/resources/common/message/ShareAcknowledgeRequest.json deleted file mode 100644 index db534cb4c1c13..0000000000000 --- a/clients/src/main/resources/common/message/ShareAcknowledgeRequest.json +++ /dev/null @@ -1,53 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 79, - "type": "request", - "listeners": ["broker"], - "name": "ShareAcknowledgeRequest", - "validVersions": "0", - "flexibleVersions": "0+", - // The ShareAcknowledgeRequest API is added as part of KIP-932 and is still under - // development. Hence, the API is not exposed by default by brokers unless - // explicitly enabled. - "latestVersionUnstable": true, - "fields": [ - { "name": "GroupId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "entityType": "groupId", - "about": "The group identifier." }, - { "name": "MemberId", "type": "string", "versions": "0+", "nullableVersions": "0+", - "about": "The member ID." }, - { "name": "ShareSessionEpoch", "type": "int32", "versions": "0+", - "about": "The current share session epoch: 0 to open a share session; -1 to close it; otherwise increments for consecutive requests." }, - { "name": "Topics", "type": "[]AcknowledgeTopic", "versions": "0+", - "about": "The topics containing records to acknowledge.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The unique topic ID."}, - { "name": "Partitions", "type": "[]AcknowledgePartition", "versions": "0+", - "about": "The partitions containing records to acknowledge.", "fields": [ - { "name": "PartitionIndex", "type": "int32", "versions": "0+", - "about": "The partition index." }, - { "name": "AcknowledgementBatches", "type": "[]AcknowledgementBatch", "versions": "0+", - "about": "Record batches to acknowledge.", "fields": [ - { "name": "FirstOffset", "type": "int64", "versions": "0+", - "about": "First offset of batch of records to acknowledge."}, - { "name": "LastOffset", "type": "int64", "versions": "0+", - "about": "Last offset (inclusive) of batch of records to acknowledge."}, - { "name": "AcknowledgeTypes", "type": "[]int8", "versions": "0+", - "about": "Array of acknowledge types - 0:Gap,1:Accept,2:Release,3:Reject."} - ]} - ]} - ]} - ] -} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json b/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json deleted file mode 100644 index 638ca10c64b3b..0000000000000 --- a/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 79, - "type": "response", - "name": "ShareAcknowledgeResponse", - "validVersions": "0", - "flexibleVersions": "0+", - // Supported errors: - // - GROUP_AUTHORIZATION_FAILED (version 0+) - // - TOPIC_AUTHORIZATION_FAILED (version 0+) - // - UNKNOWN_TOPIC_OR_PARTITION (version 0+) - // - SHARE_SESSION_NOT_FOUND (version 0+) - // - INVALID_SHARE_SESSION_EPOCH (version 0+) - // - NOT_LEADER_OR_FOLLOWER (version 0+) - // - UNKNOWN_TOPIC_ID (version 0+) - // - INVALID_RECORD_STATE (version 0+) - // - KAFKA_STORAGE_ERROR (version 0+) - // - INVALID_REQUEST (version 0+) - // - UNKNOWN_SERVER_ERROR (version 0+) - "fields": [ - { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "ignorable": true, - "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+", "ignorable": true, - "about": "The top level response error code." }, - { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The top-level error message, or null if there was no error." }, - { "name": "Responses", "type": "[]ShareAcknowledgeTopicResponse", "versions": "0+", - "about": "The response topics.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", "ignorable": true, "about": "The unique topic ID."}, - { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", - "about": "The topic partitions.", "fields": [ - { "name": "PartitionIndex", "type": "int32", "versions": "0+", - "about": "The partition index." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The error code, or 0 if there was no error." }, - { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The error message, or null if there was no error." }, - { "name": "CurrentLeader", "type": "LeaderIdAndEpoch", "versions": "0+", "fields": [ - { "name": "LeaderId", "type": "int32", "versions": "0+", - "about": "The ID of the current leader or -1 if the leader is unknown." }, - { "name": "LeaderEpoch", "type": "int32", "versions": "0+", - "about": "The latest known leader epoch." } - ]} - ]} - ]}, - { "name": "NodeEndpoints", "type": "[]NodeEndpoint", "versions": "0+", - "about": "Endpoints for all current leaders enumerated in PartitionData with error NOT_LEADER_OR_FOLLOWER.", "fields": [ - { "name": "NodeId", "type": "int32", "versions": "0+", - "mapKey": true, "entityType": "brokerId", "about": "The ID of the associated node." }, - { "name": "Host", "type": "string", "versions": "0+", - "about": "The node's hostname." }, - { "name": "Port", "type": "int32", "versions": "0+", - "about": "The node's port." }, - { "name": "Rack", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The rack of the node, or null if it has not been assigned to a rack." } - ]} - ] -} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareFetchRequest.json b/clients/src/main/resources/common/message/ShareFetchRequest.json deleted file mode 100644 index d0b59dcb26a80..0000000000000 --- a/clients/src/main/resources/common/message/ShareFetchRequest.json +++ /dev/null @@ -1,67 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 78, - "type": "request", - "listeners": ["broker"], - "name": "ShareFetchRequest", - "validVersions": "0", - "flexibleVersions": "0+", - // The ShareFetchRequest API is added as part of KIP-932 and is still under - // development. Hence, the API is not exposed by default by brokers unless - // explicitly enabled. - "latestVersionUnstable": true, - "fields": [ - { "name": "GroupId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "entityType": "groupId", - "about": "The group identifier." }, - { "name": "MemberId", "type": "string", "versions": "0+", "nullableVersions": "0+", - "about": "The member ID." }, - { "name": "ShareSessionEpoch", "type": "int32", "versions": "0+", - "about": "The current share session epoch: 0 to open a share session; -1 to close it; otherwise increments for consecutive requests." }, - { "name": "MaxWaitMs", "type": "int32", "versions": "0+", - "about": "The maximum time in milliseconds to wait for the response." }, - { "name": "MinBytes", "type": "int32", "versions": "0+", - "about": "The minimum bytes to accumulate in the response." }, - { "name": "MaxBytes", "type": "int32", "versions": "0+", "default": "0x7fffffff", "ignorable": true, - "about": "The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored." }, - { "name": "Topics", "type": "[]FetchTopic", "versions": "0+", - "about": "The topics to fetch.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", "ignorable": true, "about": "The unique topic ID."}, - { "name": "Partitions", "type": "[]FetchPartition", "versions": "0+", - "about": "The partitions to fetch.", "fields": [ - { "name": "PartitionIndex", "type": "int32", "versions": "0+", - "about": "The partition index." }, - { "name": "PartitionMaxBytes", "type": "int32", "versions": "0+", - "about": "The maximum bytes to fetch from this partition. 0 when only acknowledgement with no fetching is required. See KIP-74 for cases where this limit may not be honored." }, - { "name": "AcknowledgementBatches", "type": "[]AcknowledgementBatch", "versions": "0+", - "about": "Record batches to acknowledge.", "fields": [ - { "name": "FirstOffset", "type": "int64", "versions": "0+", - "about": "First offset of batch of records to acknowledge."}, - { "name": "LastOffset", "type": "int64", "versions": "0+", - "about": "Last offset (inclusive) of batch of records to acknowledge."}, - { "name": "AcknowledgeTypes", "type": "[]int8", "versions": "0+", - "about": "Array of acknowledge types - 0:Gap,1:Accept,2:Release,3:Reject."} - ]} - ]} - ]}, - { "name": "ForgottenTopicsData", "type": "[]ForgottenTopic", "versions": "0+", "ignorable": false, - "about": "The partitions to remove from this share session.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", "ignorable": true, "about": "The unique topic ID."}, - { "name": "Partitions", "type": "[]int32", "versions": "0+", - "about": "The partitions indexes to forget." } - ]} - ] -} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareFetchResponse.json b/clients/src/main/resources/common/message/ShareFetchResponse.json deleted file mode 100644 index 5338e1208a7bc..0000000000000 --- a/clients/src/main/resources/common/message/ShareFetchResponse.json +++ /dev/null @@ -1,83 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 78, - "type": "response", - "name": "ShareFetchResponse", - "validVersions": "0", - "flexibleVersions": "0+", - // Supported errors for ErrorCode and AcknowledgeErrorCode: - // - GROUP_AUTHORIZATION_FAILED (version 0+) - // - TOPIC_AUTHORIZATION_FAILED (version 0+) - // - SHARE_SESSION_NOT_FOUND (version 0+) - // - INVALID_SHARE_SESSION_EPOCH (version 0+) - // - UNKNOWN_TOPIC_OR_PARTITION (version 0+) - // - NOT_LEADER_OR_FOLLOWER (version 0+) - // - UNKNOWN_TOPIC_ID (version 0+) - // - INVALID_RECORD_STATE (version 0+) - only for AcknowledgeErrorCode - // - KAFKA_STORAGE_ERROR (version 0+) - // - CORRUPT_MESSAGE (version 0+) - // - INVALID_REQUEST (version 0+) - // - UNKNOWN_SERVER_ERROR (version 0+) - "fields": [ - { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "ignorable": true, - "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+", "ignorable": true, - "about": "The top-level response error code." }, - { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The top-level error message, or null if there was no error." }, - { "name": "Responses", "type": "[]ShareFetchableTopicResponse", "versions": "0+", - "about": "The response topics.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", "ignorable": true, "about": "The unique topic ID."}, - { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", - "about": "The topic partitions.", "fields": [ - { "name": "PartitionIndex", "type": "int32", "versions": "0+", - "about": "The partition index." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The fetch error code, or 0 if there was no fetch error." }, - { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The fetch error message, or null if there was no fetch error." }, - { "name": "AcknowledgeErrorCode", "type": "int16", "versions": "0+", - "about": "The acknowledge error code, or 0 if there was no acknowledge error." }, - { "name": "AcknowledgeErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The acknowledge error message, or null if there was no acknowledge error." }, - { "name": "CurrentLeader", "type": "LeaderIdAndEpoch", "versions": "0+", "fields": [ - { "name": "LeaderId", "type": "int32", "versions": "0+", - "about": "The ID of the current leader or -1 if the leader is unknown." }, - { "name": "LeaderEpoch", "type": "int32", "versions": "0+", - "about": "The latest known leader epoch." } - ]}, - { "name": "Records", "type": "records", "versions": "0+", "nullableVersions": "0+", "about": "The record data."}, - { "name": "AcquiredRecords", "type": "[]AcquiredRecords", "versions": "0+", "about": "The acquired records.", "fields": [ - {"name": "FirstOffset", "type": "int64", "versions": "0+", "about": "The earliest offset in this batch of acquired records."}, - {"name": "LastOffset", "type": "int64", "versions": "0+", "about": "The last offset of this batch of acquired records."}, - {"name": "DeliveryCount", "type": "int16", "versions": "0+", "about": "The delivery count of this batch of acquired records."} - ]} - ]} - ]}, - { "name": "NodeEndpoints", "type": "[]NodeEndpoint", "versions": "0+", - "about": "Endpoints for all current leaders enumerated in PartitionData with error NOT_LEADER_OR_FOLLOWER.", "fields": [ - { "name": "NodeId", "type": "int32", "versions": "0+", - "mapKey": true, "entityType": "brokerId", "about": "The ID of the associated node." }, - { "name": "Host", "type": "string", "versions": "0+", - "about": "The node's hostname." }, - { "name": "Port", "type": "int32", "versions": "0+", - "about": "The node's port." }, - { "name": "Rack", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The rack of the node, or null if it has not been assigned to a rack." } - ]} - ] -} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json b/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json deleted file mode 100644 index c95790c9b198f..0000000000000 --- a/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json +++ /dev/null @@ -1,33 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 77, - "type": "request", - "listeners": ["broker"], - "name": "ShareGroupDescribeRequest", - "validVersions": "0", - "flexibleVersions": "0+", - // The ShareGroupDescribeRequest API is added as part of KIP-932 and is still under - // development. Hence, the API is not exposed by default by brokers unless - // explicitly enabled. - "latestVersionUnstable": true, - "fields": [ - { "name": "GroupIds", "type": "[]string", "versions": "0+", "entityType": "groupId", - "about": "The ids of the groups to describe" }, - { "name": "IncludeAuthorizedOperations", "type": "bool", "versions": "0+", - "about": "Whether to include authorized operations." } - ] -} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareGroupDescribeResponse.json b/clients/src/main/resources/common/message/ShareGroupDescribeResponse.json deleted file mode 100644 index c093b788bfc2f..0000000000000 --- a/clients/src/main/resources/common/message/ShareGroupDescribeResponse.json +++ /dev/null @@ -1,87 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 77, - "type": "response", - "name": "ShareGroupDescribeResponse", - "validVersions": "0", - "flexibleVersions": "0+", - // Supported errors: - // - GROUP_AUTHORIZATION_FAILED (version 0+) - // - NOT_COORDINATOR (version 0+) - // - COORDINATOR_NOT_AVAILABLE (version 0+) - // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) - // - INVALID_REQUEST (version 0+) - // - INVALID_GROUP_ID (version 0+) - // - GROUP_ID_NOT_FOUND (version 0+) - "fields": [ - { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", - "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, - { "name": "Groups", "type": "[]DescribedGroup", "versions": "0+", - "about": "Each described group.", - "fields": [ - { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The describe error, or 0 if there was no error." }, - { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The top-level error message, or null if there was no error." }, - { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", - "about": "The group ID string." }, - { "name": "GroupState", "type": "string", "versions": "0+", - "about": "The group state string, or the empty string." }, - { "name": "GroupEpoch", "type": "int32", "versions": "0+", - "about": "The group epoch." }, - { "name": "AssignmentEpoch", "type": "int32", "versions": "0+", - "about": "The assignment epoch." }, - { "name": "AssignorName", "type": "string", "versions": "0+", - "about": "The selected assignor." }, - { "name": "Members", "type": "[]Member", "versions": "0+", - "about": "The members.", - "fields": [ - { "name": "MemberId", "type": "string", "versions": "0+", - "about": "The member ID." }, - { "name": "RackId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The member rack ID." }, - { "name": "MemberEpoch", "type": "int32", "versions": "0+", - "about": "The current member epoch." }, - { "name": "ClientId", "type": "string", "versions": "0+", - "about": "The client ID." }, - { "name": "ClientHost", "type": "string", "versions": "0+", - "about": "The client host." }, - { "name": "SubscribedTopicNames", "type": "[]string", "versions": "0+", "entityType": "topicName", - "about": "The subscribed topic names." }, - { "name": "Assignment", "type": "Assignment", "versions": "0+", - "about": "The current assignment." } - ]}, - { "name": "AuthorizedOperations", "type": "int32", "versions": "0+", "default": "-2147483648", - "about": "32-bit bitfield to represent authorized operations for this group." } - ] - } - ], - "commonStructs": [ - { "name": "TopicPartitions", "versions": "0+", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", - "about": "The topic ID." }, - { "name": "TopicName", "type": "string", "versions": "0+", "entityType": "topicName", - "about": "The topic name." }, - { "name": "Partitions", "type": "[]int32", "versions": "0+", - "about": "The partitions." } - ]}, - { "name": "Assignment", "versions": "0+", "fields": [ - { "name": "TopicPartitions", "type": "[]TopicPartitions", "versions": "0+", - "about": "The assigned topic-partitions to the member." } - ]} - ] -} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json b/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json deleted file mode 100644 index 7d28c116454d3..0000000000000 --- a/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json +++ /dev/null @@ -1,39 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 76, - "type": "request", - "listeners": ["broker"], - "name": "ShareGroupHeartbeatRequest", - "validVersions": "0", - "flexibleVersions": "0+", - // The ShareGroupHeartbeatRequest API is added as part of KIP-932 and is still under - // development. Hence, the API is not exposed by default by brokers unless - // explicitly enabled. - "latestVersionUnstable": true, - "fields": [ - { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", - "about": "The group identifier." }, - { "name": "MemberId", "type": "string", "versions": "0+", - "about": "The member ID generated by the coordinator. The member ID must be kept during the entire lifetime of the member." }, - { "name": "MemberEpoch", "type": "int32", "versions": "0+", - "about": "The current member epoch; 0 to join the group; -1 to leave the group." }, - { "name": "RackId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "null if not provided or if it didn't change since the last heartbeat; the rack ID of consumer otherwise." }, - { "name": "SubscribedTopicNames", "type": "[]string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "null if it didn't change since the last heartbeat; the subscribed topic names otherwise." } - ] -} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json b/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json deleted file mode 100644 index e692839f29bf9..0000000000000 --- a/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json +++ /dev/null @@ -1,57 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 76, - "type": "response", - "name": "ShareGroupHeartbeatResponse", - "validVersions": "0", - "flexibleVersions": "0+", - // Supported errors: - // - GROUP_AUTHORIZATION_FAILED (version 0+) - // - NOT_COORDINATOR (version 0+) - // - COORDINATOR_NOT_AVAILABLE (version 0+) - // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) - // - INVALID_REQUEST (version 0+) - // - UNKNOWN_MEMBER_ID (version 0+) - // - GROUP_MAX_SIZE_REACHED (version 0+) - "fields": [ - { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", - "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The top-level error code, or 0 if there was no error" }, - { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The top-level error message, or null if there was no error." }, - { "name": "MemberId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The member ID generated by the coordinator. Only provided when the member joins with MemberEpoch == 0." }, - { "name": "MemberEpoch", "type": "int32", "versions": "0+", - "about": "The member epoch." }, - { "name": "HeartbeatIntervalMs", "type": "int32", "versions": "0+", - "about": "The heartbeat interval in milliseconds." }, - { "name": "Assignment", "type": "Assignment", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "null if not provided; the assignment otherwise.", "fields": [ - { "name": "TopicPartitions", "type": "[]TopicPartitions", "versions": "0+", - "about": "The partitions assigned to the member." } - ]} - ], - "commonStructs": [ - { "name": "TopicPartitions", "versions": "0+", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", - "about": "The topic ID." }, - { "name": "Partitions", "type": "[]int32", "versions": "0+", - "about": "The partitions." } - ]} - ] -} \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java index 82487bd418429..512a7cea76681 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java @@ -20,7 +20,6 @@ import org.apache.kafka.common.ElectionType; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.Node; -import org.apache.kafka.common.ShareGroupState; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.Uuid; @@ -211,14 +210,6 @@ import org.apache.kafka.common.message.SaslAuthenticateResponseData; import org.apache.kafka.common.message.SaslHandshakeRequestData; import org.apache.kafka.common.message.SaslHandshakeResponseData; -import org.apache.kafka.common.message.ShareAcknowledgeRequestData; -import org.apache.kafka.common.message.ShareAcknowledgeResponseData; -import org.apache.kafka.common.message.ShareFetchRequestData; -import org.apache.kafka.common.message.ShareFetchResponseData; -import org.apache.kafka.common.message.ShareGroupDescribeRequestData; -import org.apache.kafka.common.message.ShareGroupDescribeResponseData; -import org.apache.kafka.common.message.ShareGroupHeartbeatRequestData; -import org.apache.kafka.common.message.ShareGroupHeartbeatResponseData; import org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaPartitionState; import org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaTopicState; import org.apache.kafka.common.message.StopReplicaResponseData; @@ -1010,10 +1001,6 @@ public void testErrorCountsIncludesNone() { assertEquals(1, createTxnOffsetCommitResponse().errorCounts().get(Errors.NONE)); assertEquals(1, createUpdateMetadataResponse().errorCounts().get(Errors.NONE)); assertEquals(1, createWriteTxnMarkersResponse().errorCounts().get(Errors.NONE)); - assertEquals(1, createShareGroupHeartbeatResponse().errorCounts().get(Errors.NONE)); - assertEquals(1, createShareGroupDescribeResponse().errorCounts().get(Errors.NONE)); - assertEquals(2, createShareFetchResponse().errorCounts().get(Errors.NONE)); - assertEquals(2, createShareAcknowledgeResponse().errorCounts().get(Errors.NONE)); } private AbstractRequest getRequest(ApiKeys apikey, short version) { @@ -1094,10 +1081,6 @@ private AbstractRequest getRequest(ApiKeys apikey, short version) { case ASSIGN_REPLICAS_TO_DIRS: return createAssignReplicasToDirsRequest(version); case LIST_CLIENT_METRICS_RESOURCES: return createListClientMetricsResourcesRequest(version); case DESCRIBE_TOPIC_PARTITIONS: return createDescribeTopicPartitionsRequest(version); - case SHARE_GROUP_HEARTBEAT: return createShareGroupHeartbeatRequest(version); - case SHARE_GROUP_DESCRIBE: return createShareGroupDescribeRequest(version); - case SHARE_FETCH: return createShareFetchRequest(version); - case SHARE_ACKNOWLEDGE: return createShareAcknowledgeRequest(version); default: throw new IllegalArgumentException("Unknown API key " + apikey); } } @@ -1180,10 +1163,6 @@ private AbstractResponse getResponse(ApiKeys apikey, short version) { case ASSIGN_REPLICAS_TO_DIRS: return createAssignReplicasToDirsResponse(); case LIST_CLIENT_METRICS_RESOURCES: return createListClientMetricsResourcesResponse(); case DESCRIBE_TOPIC_PARTITIONS: return createDescribeTopicPartitionsResponse(); - case SHARE_GROUP_HEARTBEAT: return createShareGroupHeartbeatResponse(); - case SHARE_GROUP_DESCRIBE: return createShareGroupDescribeResponse(); - case SHARE_FETCH: return createShareFetchResponse(); - case SHARE_ACKNOWLEDGE: return createShareAcknowledgeResponse(); default: throw new IllegalArgumentException("Unknown API key " + apikey); } } @@ -1351,114 +1330,6 @@ private ConsumerGroupHeartbeatResponse createConsumerGroupHeartbeatResponse() { return new ConsumerGroupHeartbeatResponse(data); } - private ShareGroupHeartbeatRequest createShareGroupHeartbeatRequest(short version) { - ShareGroupHeartbeatRequestData data = new ShareGroupHeartbeatRequestData() - .setGroupId("group") - .setMemberId("memberid") - .setMemberEpoch(10) - .setRackId("rackid") - .setSubscribedTopicNames(Arrays.asList("foo", "bar")); - return new ShareGroupHeartbeatRequest.Builder(data).build(version); - } - - private ShareGroupHeartbeatResponse createShareGroupHeartbeatResponse() { - ShareGroupHeartbeatResponseData data = new ShareGroupHeartbeatResponseData() - .setErrorCode(Errors.NONE.code()) - .setThrottleTimeMs(1000) - .setMemberId("memberid") - .setMemberEpoch(11) - .setAssignment(new ShareGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Arrays.asList( - new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(Uuid.randomUuid()) - .setPartitions(Arrays.asList(0, 1, 2)), - new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(Uuid.randomUuid()) - .setPartitions(Arrays.asList(3, 4, 5)) - )) - ); - return new ShareGroupHeartbeatResponse(data); - } - - private ShareGroupDescribeRequest createShareGroupDescribeRequest(short version) { - ShareGroupDescribeRequestData data = new ShareGroupDescribeRequestData() - .setGroupIds(Collections.singletonList("group")) - .setIncludeAuthorizedOperations(false); - return new ShareGroupDescribeRequest.Builder(data).build(version); - } - - private ShareGroupDescribeResponse createShareGroupDescribeResponse() { - ShareGroupDescribeResponseData data = new ShareGroupDescribeResponseData() - .setGroups(Collections.singletonList( - new ShareGroupDescribeResponseData.DescribedGroup() - .setGroupId("group") - .setErrorCode((short) 0) - .setErrorMessage(Errors.forCode((short) 0).message()) - .setGroupState(ShareGroupState.EMPTY.toString()) - .setMembers(new ArrayList<>(0)) - )) - .setThrottleTimeMs(1000); - return new ShareGroupDescribeResponse(data); - } - - private ShareFetchRequest createShareFetchRequest(short version) { - ShareFetchRequestData data = new ShareFetchRequestData() - .setGroupId("group") - .setMemberId(Uuid.randomUuid().toString()) - .setTopics(singletonList(new ShareFetchRequestData.FetchTopic() - .setTopicId(Uuid.randomUuid()) - .setPartitions(singletonList(new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0))))); - return new ShareFetchRequest.Builder(data).build(version); - } - - private ShareFetchResponse createShareFetchResponse() { - ShareFetchResponseData data = new ShareFetchResponseData(); - MemoryRecords records = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("blah".getBytes())); - ShareFetchResponseData.PartitionData partition = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code()) - .setRecords(records) - .setAcquiredRecords(singletonList(new ShareFetchResponseData.AcquiredRecords() - .setFirstOffset(0) - .setLastOffset(0) - .setDeliveryCount((short) 1))); - ShareFetchResponseData.ShareFetchableTopicResponse response = new ShareFetchResponseData.ShareFetchableTopicResponse() - .setTopicId(Uuid.randomUuid()) - .setPartitions(singletonList(partition)); - - data.setResponses(singletonList(response)); - data.setThrottleTimeMs(345); - data.setErrorCode(Errors.NONE.code()); - return new ShareFetchResponse(data); - } - - private ShareAcknowledgeRequest createShareAcknowledgeRequest(short version) { - ShareAcknowledgeRequestData data = new ShareAcknowledgeRequestData() - .setMemberId(Uuid.randomUuid().toString()) - .setTopics(singletonList(new ShareAcknowledgeRequestData.AcknowledgeTopic() - .setTopicId(Uuid.randomUuid()) - .setPartitions(singletonList(new ShareAcknowledgeRequestData.AcknowledgePartition() - .setPartitionIndex(0) - .setAcknowledgementBatches(singletonList(new ShareAcknowledgeRequestData.AcknowledgementBatch() - .setFirstOffset(0) - .setLastOffset(0) - .setAcknowledgeTypes(Collections.singletonList((byte) 0)))))))); - return new ShareAcknowledgeRequest.Builder(data).build(version); - } - - private ShareAcknowledgeResponse createShareAcknowledgeResponse() { - ShareAcknowledgeResponseData data = new ShareAcknowledgeResponseData(); - data.setResponses(singletonList(new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse() - .setTopicId(Uuid.randomUuid()) - .setPartitions(singletonList(new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code()))))); - data.setThrottleTimeMs(345); - data.setErrorCode(Errors.NONE.code()); - return new ShareAcknowledgeResponse(data); - } - private ControllerRegistrationRequest createControllerRegistrationRequest(short version) { ControllerRegistrationRequestData data = new ControllerRegistrationRequestData(). setControllerId(3). diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java index 37673ee05577d..9a47a0e7530bb 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java @@ -190,15 +190,15 @@ private HttpResponse httpRequest(HttpClient client, String url, String me "Unexpected status code when handling forwarded request: " + responseCode); } } catch (IOException | InterruptedException | TimeoutException | ExecutionException e) { - log.error("IO error forwarding REST request to {} :", url, e); + log.error("IO error forwarding REST request: ", e); throw new ConnectRestException(Response.Status.INTERNAL_SERVER_ERROR, "IO Error trying to forward REST request: " + e.getMessage(), e); } catch (ConnectRestException e) { // catching any explicitly thrown ConnectRestException-s to preserve its status code // and to avoid getting it overridden by the more generic catch (Throwable) clause down below - log.error("Error forwarding REST request to {} :", url, e); + log.error("Error forwarding REST request", e); throw e; } catch (Throwable t) { - log.error("Error forwarding REST request to {} :", url, t); + log.error("Error forwarding REST request", t); throw new ConnectRestException(Response.Status.INTERNAL_SERVER_ERROR, "Error trying to forward REST request: " + t.getMessage(), t); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java index 9d338936dbbf0..da8e235e42411 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java @@ -47,7 +47,7 @@ public Map config() { return config; } - @JsonProperty("initial_state") + @JsonProperty public InitialState initialState() { return initialState; } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java index 3ec037734f116..6ebac341032a3 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java @@ -61,7 +61,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -160,7 +159,7 @@ public class KafkaConfigBackingStoreMockitoTest { new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(1)), new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(2)) ); - private static final Struct TARGET_STATE_STARTED = new Struct(KafkaConfigBackingStore.TARGET_STATE_V0).put("state", "STARTED"); + private static final Struct TARGET_STATE_PAUSED = new Struct(KafkaConfigBackingStore.TARGET_STATE_V1) .put("state", "PAUSED") .put("state.v2", "PAUSED"); @@ -1185,147 +1184,6 @@ public void testRestoreRestartRequestInconsistentState() { verify(configLog).stop(); } - @Test - public void testPutTaskConfigsZeroTasks() throws Exception { - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); - verify(configLog).start(); - - // Records to be read by consumer as it reads to the end of the log - doAnswer(expectReadToEnd(new LinkedHashMap<>())). - doAnswer(expectReadToEnd(Collections.singletonMap(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) - .when(configLog).readToEnd(); - - expectConvertWriteRead( - COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(0), - "tasks", 0); // We have 0 tasks - - // Bootstrap as if we had already added the connector, but no tasks had been added yet - addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); - - - // Null before writing - ClusterConfigState configState = configStorage.snapshot(); - assertEquals(-1, configState.offset()); - - // Writing task configs should block until all the writes have been performed and the root record update - // has completed - List> taskConfigs = Collections.emptyList(); - configStorage.putTaskConfigs("connector1", taskConfigs); - - // Validate root config by listing all connectors and tasks - configState = configStorage.snapshot(); - assertEquals(1, configState.offset()); - String connectorName = CONNECTOR_IDS.get(0); - assertEquals(Collections.singletonList(connectorName), new ArrayList<>(configState.connectors())); - assertEquals(Collections.emptyList(), configState.tasks(connectorName)); - assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); - - // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks - verify(configUpdateListener).onTaskConfigUpdate(Collections.emptyList()); - - configStorage.stop(); - verify(configLog).stop(); - } - - @Test - public void testBackgroundUpdateTargetState() throws Exception { - // verify that we handle target state changes correctly when they come up through the log - List> existingRecords = Arrays.asList( - new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), - CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); - LinkedHashMap deserializedOnStartup = new LinkedHashMap<>(); - deserializedOnStartup.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); - deserializedOnStartup.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); - deserializedOnStartup.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); - deserializedOnStartup.put(CONFIGS_SERIALIZED.get(3), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); - logOffset = 5; - - expectStart(existingRecords, deserializedOnStartup); - when(configLog.partitionCount()).thenReturn(1); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); - verify(configLog).start(); - - // Should see a single connector with initial state started - ClusterConfigState configState = configStorage.snapshot(); - assertEquals(Collections.singleton(CONNECTOR_IDS.get(0)), configStorage.connectorTargetStates.keySet()); - assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); - - LinkedHashMap serializedAfterStartup = new LinkedHashMap<>(); - serializedAfterStartup.put(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); - serializedAfterStartup.put(TARGET_STATE_KEYS.get(1), CONFIGS_SERIALIZED.get(1)); - doAnswer(expectReadToEnd(serializedAfterStartup)).when(configLog).readToEnd(); - - Map deserializedAfterStartup = new HashMap<>(); - deserializedAfterStartup.put(TARGET_STATE_KEYS.get(0), TARGET_STATE_PAUSED); - deserializedAfterStartup.put(TARGET_STATE_KEYS.get(1), TARGET_STATE_STOPPED); - expectRead(serializedAfterStartup, deserializedAfterStartup); - - // Should see two connectors now, one paused and one stopped - configStorage.refresh(0, TimeUnit.SECONDS); - verify(configUpdateListener).onConnectorTargetStateChange(CONNECTOR_IDS.get(0)); - configState = configStorage.snapshot(); - - assertEquals(new HashSet<>(CONNECTOR_IDS), configStorage.connectorTargetStates.keySet()); - assertEquals(TargetState.PAUSED, configState.targetState(CONNECTOR_IDS.get(0))); - assertEquals(TargetState.STOPPED, configState.targetState(CONNECTOR_IDS.get(1))); - - configStorage.stop(); - verify(configStorage).stop(); - } - - @Test - public void testSameTargetState() throws Exception { - // verify that we handle target state changes correctly when they come up through the log - List> existingRecords = Arrays.asList( - new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), - CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); - LinkedHashMap deserialized = new LinkedHashMap<>(); - deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(3), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); - logOffset = 5; - - expectStart(existingRecords, deserialized); - - when(configLog.partitionCount()).thenReturn(1); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); - verify(configLog).start(); - - ClusterConfigState configState = configStorage.snapshot(); - expectRead(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0), TARGET_STATE_STARTED); - // Should see a single connector with initial state paused - assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); - - expectRead(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0), TARGET_STATE_STARTED); - // on resume update listener shouldn't be called - verify(configUpdateListener, never()).onConnectorTargetStateChange(anyString()); - - configStorage.stop(); - verify(configStorage).stop(); - } - - @Test public void testPutLogLevel() throws Exception { final String logger1 = "org.apache.zookeeper"; @@ -1435,12 +1293,6 @@ private void expectRead(LinkedHashMap serializedValues, } } - private void expectRead(final String key, final byte[] serializedValue, Struct deserializedValue) { - LinkedHashMap serializedData = new LinkedHashMap<>(); - serializedData.put(key, serializedValue); - expectRead(serializedData, Collections.singletonMap(key, deserializedValue)); - } - // This map needs to maintain ordering private Answer> expectReadToEnd(final Map serializedConfigs) { return invocation -> { @@ -1463,11 +1315,4 @@ private Map structToMap(Struct struct) { for (Field field : struct.schema().fields()) result.put(field.name(), struct.get(field)); return result; } - - private void addConnector(String connectorName, Map connectorConfig, List> taskConfigs) { - for (int i = 0; i < taskConfigs.size(); i++) - configStorage.taskConfigs.put(new ConnectorTaskId(connectorName, i), taskConfigs.get(i)); - configStorage.connectorConfigs.put(connectorName, connectorConfig); - configStorage.connectorTaskCounts.put(connectorName, taskConfigs.size()); - } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java index 2e7b388413c55..ae5f82cd3eeb2 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java @@ -28,6 +28,7 @@ import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.SchemaAndValue; import org.apache.kafka.connect.data.Struct; +import org.apache.kafka.connect.runtime.TargetState; import org.apache.kafka.connect.runtime.WorkerConfig; import org.apache.kafka.connect.runtime.distributed.DistributedConfig; import org.apache.kafka.connect.util.Callback; @@ -51,11 +52,13 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import static org.apache.kafka.connect.storage.KafkaConfigBackingStore.INCLUDE_TASKS_FIELD_NAME; @@ -427,6 +430,167 @@ public void testPutTaskConfigsStartsOnlyReconfiguredTasks() throws Exception { PowerMock.verifyAll(); } + @Test + public void testPutTaskConfigsZeroTasks() throws Exception { + expectConfigure(); + expectStart(Collections.emptyList(), Collections.emptyMap()); + + // Task configs should read to end, write to the log, read to end, write root. + expectReadToEnd(new LinkedHashMap<>()); + expectConvertWriteRead( + COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(0), + "tasks", 0); // We have 0 tasks + // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks + configUpdateListener.onTaskConfigUpdate(Collections.emptyList()); + EasyMock.expectLastCall(); + + // Records to be read by consumer as it reads to the end of the log + LinkedHashMap serializedConfigs = new LinkedHashMap<>(); + serializedConfigs.put(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); + expectReadToEnd(serializedConfigs); + + expectPartitionCount(1); + expectStop(); + + PowerMock.replayAll(); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + configStorage.start(); + + // Bootstrap as if we had already added the connector, but no tasks had been added yet + whiteboxAddConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); + + // Null before writing + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(-1, configState.offset()); + + // Writing task configs should block until all the writes have been performed and the root record update + // has completed + List> taskConfigs = Collections.emptyList(); + configStorage.putTaskConfigs("connector1", taskConfigs); + + // Validate root config by listing all connectors and tasks + configState = configStorage.snapshot(); + assertEquals(1, configState.offset()); + String connectorName = CONNECTOR_IDS.get(0); + assertEquals(Collections.singletonList(connectorName), new ArrayList<>(configState.connectors())); + assertEquals(Collections.emptyList(), configState.tasks(connectorName)); + assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); + + configStorage.stop(); + + PowerMock.verifyAll(); + } + + @Test + public void testBackgroundUpdateTargetState() throws Exception { + // verify that we handle target state changes correctly when they come up through the log + + expectConfigure(); + List> existingRecords = Arrays.asList( + new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), + CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); + LinkedHashMap deserializedOnStartup = new LinkedHashMap<>(); + deserializedOnStartup.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); + deserializedOnStartup.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); + deserializedOnStartup.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); + deserializedOnStartup.put(CONFIGS_SERIALIZED.get(3), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); + logOffset = 5; + + expectStart(existingRecords, deserializedOnStartup); + + LinkedHashMap serializedAfterStartup = new LinkedHashMap<>(); + serializedAfterStartup.put(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); + serializedAfterStartup.put(TARGET_STATE_KEYS.get(1), CONFIGS_SERIALIZED.get(1)); + + Map deserializedAfterStartup = new HashMap<>(); + deserializedAfterStartup.put(TARGET_STATE_KEYS.get(0), TARGET_STATE_PAUSED); + deserializedAfterStartup.put(TARGET_STATE_KEYS.get(1), TARGET_STATE_STOPPED); + + expectRead(serializedAfterStartup, deserializedAfterStartup); + + configUpdateListener.onConnectorTargetStateChange(CONNECTOR_IDS.get(0)); + EasyMock.expectLastCall(); + + expectPartitionCount(1); + expectStop(); + + PowerMock.replayAll(); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + configStorage.start(); + + // Should see a single connector with initial state started + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(Collections.singleton(CONNECTOR_IDS.get(0)), configStorage.connectorTargetStates.keySet()); + assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); + + // Should see two connectors now, one paused and one stopped + configStorage.refresh(0, TimeUnit.SECONDS); + configState = configStorage.snapshot(); + assertEquals(new HashSet<>(CONNECTOR_IDS), configStorage.connectorTargetStates.keySet()); + assertEquals(TargetState.PAUSED, configState.targetState(CONNECTOR_IDS.get(0))); + assertEquals(TargetState.STOPPED, configState.targetState(CONNECTOR_IDS.get(1))); + + configStorage.stop(); + + PowerMock.verifyAll(); + } + + @Test + public void testSameTargetState() throws Exception { + // verify that we handle target state changes correctly when they come up through the log + + expectConfigure(); + List> existingRecords = Arrays.asList( + new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), + CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); + LinkedHashMap deserialized = new LinkedHashMap<>(); + deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(3), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); + logOffset = 5; + + expectStart(existingRecords, deserialized); + + // on resume update listener shouldn't be called + configUpdateListener.onConnectorTargetStateChange(EasyMock.anyString()); + EasyMock.expectLastCall().andStubThrow(new AssertionError("unexpected call to onConnectorTargetStateChange")); + + expectRead(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0), TARGET_STATE_STARTED); + + expectPartitionCount(1); + expectStop(); + + PowerMock.replayAll(); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + configStorage.start(); + + // Should see a single connector with initial state paused + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); + + configStorage.refresh(0, TimeUnit.SECONDS); + + configStorage.stop(); + + PowerMock.verifyAll(); + } + private void expectConfigure() throws Exception { PowerMock.expectPrivate(configStorage, "createKafkaBasedLog", EasyMock.capture(capturedTopic), EasyMock.capture(capturedProducerProps), @@ -472,6 +636,12 @@ private void expectRead(LinkedHashMap serializedValues, } } + private void expectRead(final String key, final byte[] serializedValue, Struct deserializedValue) { + LinkedHashMap serializedData = new LinkedHashMap<>(); + serializedData.put(key, serializedValue); + expectRead(serializedData, Collections.singletonMap(key, deserializedValue)); + } + // Expect a conversion & write to the underlying log, followed by a subsequent read when the data is consumed back // from the log. Validate the data that is captured when the conversion is performed matches the specified data // (by checking a single field's value) diff --git a/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java b/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java index 1d422461678f5..6ffd741f4fc64 100644 --- a/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java +++ b/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java @@ -179,7 +179,7 @@ public KafkaApis build() { if (metrics == null) throw new RuntimeException("You must set metrics"); if (quotas == null) throw new RuntimeException("You must set quotas"); if (fetchManager == null) throw new RuntimeException("You must set fetchManager"); - if (brokerTopicStats == null) brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig().enableRemoteStorageSystem()); + if (brokerTopicStats == null) brokerTopicStats = new BrokerTopicStats(config.isRemoteLogStorageSystemEnabled()); if (apiVersionManager == null) throw new RuntimeException("You must set apiVersionManager"); return new KafkaApis(requestChannel, diff --git a/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java b/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java index 5e8cf2dcdc64c..82aa75909abba 100644 --- a/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java +++ b/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java @@ -185,7 +185,7 @@ public ReplicaManager build() { if (metadataCache == null) throw new RuntimeException("You must set metadataCache"); if (logDirFailureChannel == null) throw new RuntimeException("You must set logDirFailureChannel"); if (alterPartitionManager == null) throw new RuntimeException("You must set alterIsrManager"); - if (brokerTopicStats == null) brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig().enableRemoteStorageSystem()); + if (brokerTopicStats == null) brokerTopicStats = new BrokerTopicStats(config.isRemoteLogStorageSystemEnabled()); // Initialize metrics in the end just before passing it to ReplicaManager to ensure ReplicaManager closes the // metrics correctly. There might be a resource leak if it is initialized and an exception occurs between // its initialization and creation of ReplicaManager. diff --git a/core/src/main/scala/kafka/log/LogCleaner.scala b/core/src/main/scala/kafka/log/LogCleaner.scala index 1265e979373cd..0b166c62535e9 100644 --- a/core/src/main/scala/kafka/log/LogCleaner.scala +++ b/core/src/main/scala/kafka/log/LogCleaner.scala @@ -158,21 +158,14 @@ class LogCleaner(initialConfig: CleanerConfig, } } - /** - * Stop the background cleaner threads - */ - private[this] def shutdownCleaners(): Unit = { - info("Shutting down the log cleaner.") - cleaners.foreach(_.shutdown()) - cleaners.clear() - } - /** * Stop the background cleaner threads */ def shutdown(): Unit = { + info("Shutting down the log cleaner.") try { - shutdownCleaners() + cleaners.foreach(_.shutdown()) + cleaners.clear() } finally { removeMetrics() } @@ -227,8 +220,8 @@ class LogCleaner(initialConfig: CleanerConfig, info(s"Updating logCleanerIoMaxBytesPerSecond: $maxIoBytesPerSecond") throttler.updateDesiredRatePerSec(maxIoBytesPerSecond) } - // call shutdownCleaners() instead of shutdown to avoid unnecessary deletion of metrics - shutdownCleaners() + + shutdown() startup() } diff --git a/core/src/main/scala/kafka/log/LogManager.scala b/core/src/main/scala/kafka/log/LogManager.scala index d7599e569ab25..3bc6533117cba 100755 --- a/core/src/main/scala/kafka/log/LogManager.scala +++ b/core/src/main/scala/kafka/log/LogManager.scala @@ -1562,7 +1562,7 @@ object LogManager { keepPartitionMetadataFile: Boolean): LogManager = { val defaultProps = config.extractLogConfigMap - LogConfig.validateBrokerLogConfigValues(defaultProps, config.remoteLogManagerConfig.enableRemoteStorageSystem()) + LogConfig.validateBrokerLogConfigValues(defaultProps, config.isRemoteLogStorageSystemEnabled) val defaultLogConfig = new LogConfig(defaultProps) val cleanerConfig = LogCleaner.cleanerConfig(config) diff --git a/core/src/main/scala/kafka/network/RequestConvertToJson.scala b/core/src/main/scala/kafka/network/RequestConvertToJson.scala index 0900b94ef9f4f..54986f52c85a3 100644 --- a/core/src/main/scala/kafka/network/RequestConvertToJson.scala +++ b/core/src/main/scala/kafka/network/RequestConvertToJson.scala @@ -95,10 +95,6 @@ object RequestConvertToJson { case req: RenewDelegationTokenRequest => RenewDelegationTokenRequestDataJsonConverter.write(req.data, request.version) case req: SaslAuthenticateRequest => SaslAuthenticateRequestDataJsonConverter.write(req.data, request.version) case req: SaslHandshakeRequest => SaslHandshakeRequestDataJsonConverter.write(req.data, request.version) - case req: ShareAcknowledgeRequest => ShareAcknowledgeRequestDataJsonConverter.write(req.data, request.version) - case req: ShareFetchRequest => ShareFetchRequestDataJsonConverter.write(req.data, request.version) - case req: ShareGroupDescribeRequest => ShareGroupDescribeRequestDataJsonConverter.write(req.data, request.version) - case req: ShareGroupHeartbeatRequest => ShareGroupHeartbeatRequestDataJsonConverter.write(req.data, request.version) case req: StopReplicaRequest => StopReplicaRequestDataJsonConverter.write(req.data, request.version) case req: SyncGroupRequest => SyncGroupRequestDataJsonConverter.write(req.data, request.version) case req: TxnOffsetCommitRequest => TxnOffsetCommitRequestDataJsonConverter.write(req.data, request.version) @@ -182,10 +178,6 @@ object RequestConvertToJson { case res: RenewDelegationTokenResponse => RenewDelegationTokenResponseDataJsonConverter.write(res.data, version) case res: SaslAuthenticateResponse => SaslAuthenticateResponseDataJsonConverter.write(res.data, version) case res: SaslHandshakeResponse => SaslHandshakeResponseDataJsonConverter.write(res.data, version) - case res: ShareAcknowledgeResponse => ShareAcknowledgeResponseDataJsonConverter.write(res.data, version) - case res: ShareFetchResponse => ShareFetchResponseDataJsonConverter.write(res.data, version) - case res: ShareGroupDescribeResponse => ShareGroupDescribeResponseDataJsonConverter.write(res.data, version) - case res: ShareGroupHeartbeatResponse => ShareGroupHeartbeatResponseDataJsonConverter.write(res.data, version) case res: StopReplicaResponse => StopReplicaResponseDataJsonConverter.write(res.data, version) case res: SyncGroupResponse => SyncGroupResponseDataJsonConverter.write(res.data, version) case res: TxnOffsetCommitResponse => TxnOffsetCommitResponseDataJsonConverter.write(res.data, version) diff --git a/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala b/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala index 51bc16fb09d17..5f3fdc81887ef 100644 --- a/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala +++ b/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala @@ -264,11 +264,11 @@ class BrokerLifecycleManager( new OfflineDirBrokerFailureEvent(directory)) } - def resendBrokerRegistrationUnlessZkMode(): Unit = { - eventQueue.append(new ResendBrokerRegistrationUnlessZkModeEvent()) + def handleKraftJBODMetadataVersionUpdate(): Unit = { + eventQueue.append(new KraftJBODMetadataVersionUpdateEvent()) } - private class ResendBrokerRegistrationUnlessZkModeEvent extends EventQueue.Event { + private class KraftJBODMetadataVersionUpdateEvent extends EventQueue.Event { override def run(): Unit = { if (!isZkBroker) { registered = false diff --git a/core/src/main/scala/kafka/server/BrokerServer.scala b/core/src/main/scala/kafka/server/BrokerServer.scala index 5e299fc0e02a8..112a03c50a9a4 100644 --- a/core/src/main/scala/kafka/server/BrokerServer.scala +++ b/core/src/main/scala/kafka/server/BrokerServer.scala @@ -37,7 +37,7 @@ import org.apache.kafka.common.utils.{LogContext, Time} import org.apache.kafka.common.{ClusterResource, TopicPartition, Uuid} import org.apache.kafka.coordinator.group.metrics.{GroupCoordinatorMetrics, GroupCoordinatorRuntimeMetrics} import org.apache.kafka.coordinator.group.{CoordinatorRecord, GroupCoordinator, GroupCoordinatorConfig, GroupCoordinatorService, CoordinatorRecordSerde} -import org.apache.kafka.image.publisher.{BrokerRegistrationTracker, MetadataPublisher} +import org.apache.kafka.image.publisher.MetadataPublisher import org.apache.kafka.metadata.{BrokerState, ListenerInfo, VersionRange} import org.apache.kafka.security.CredentialProvider import org.apache.kafka.server.{AssignmentsManager, ClientMetricsManager, NodeToControllerChannelManager} @@ -139,8 +139,6 @@ class BrokerServer( var brokerMetadataPublisher: BrokerMetadataPublisher = _ - var brokerRegistrationTracker: BrokerRegistrationTracker = _ - val brokerFeatures: BrokerFeatures = BrokerFeatures.createDefault(config.unstableFeatureVersionsEnabled) def kafkaYammerMetrics: KafkaYammerMetrics = KafkaYammerMetrics.INSTANCE @@ -186,7 +184,7 @@ class BrokerServer( kafkaScheduler.startup() /* register broker metrics */ - brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig.enableRemoteStorageSystem()) + brokerTopicStats = new BrokerTopicStats(config.isRemoteLogStorageSystemEnabled) quotaManagers = QuotaFactory.instantiate(config, metrics, time, s"broker-${config.nodeId}-") @@ -484,10 +482,6 @@ class BrokerServer( lifecycleManager ) metadataPublishers.add(brokerMetadataPublisher) - brokerRegistrationTracker = new BrokerRegistrationTracker(config.brokerId, - logManager.directoryIdsSet.toList.asJava, - () => lifecycleManager.resendBrokerRegistrationUnlessZkMode()) - metadataPublishers.add(brokerRegistrationTracker) // Register parts of the broker that can be reconfigured via dynamic configs. This needs to // be done before we publish the dynamic configs, so that we don't miss anything. diff --git a/core/src/main/scala/kafka/server/ConfigHandler.scala b/core/src/main/scala/kafka/server/ConfigHandler.scala index ed9260b21947b..1d5702e76e49d 100644 --- a/core/src/main/scala/kafka/server/ConfigHandler.scala +++ b/core/src/main/scala/kafka/server/ConfigHandler.scala @@ -70,7 +70,7 @@ class TopicConfigHandler(private val replicaManager: ReplicaManager, val logs = logManager.logsByTopic(topic) val wasRemoteLogEnabledBeforeUpdate = logs.exists(_.remoteLogEnabled()) - logManager.updateTopicConfig(topic, props, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) + logManager.updateTopicConfig(topic, props, kafkaConfig.isRemoteLogStorageSystemEnabled) maybeBootstrapRemoteLogComponents(topic, logs, wasRemoteLogEnabledBeforeUpdate) } diff --git a/core/src/main/scala/kafka/server/ControllerConfigurationValidator.scala b/core/src/main/scala/kafka/server/ControllerConfigurationValidator.scala index f957b65ddd105..15eb1eff04aa3 100644 --- a/core/src/main/scala/kafka/server/ControllerConfigurationValidator.scala +++ b/core/src/main/scala/kafka/server/ControllerConfigurationValidator.scala @@ -107,8 +107,7 @@ class ControllerConfigurationValidator(kafkaConfig: KafkaConfig) extends Configu throw new InvalidConfigurationException("Null value not supported for topic configs: " + nullTopicConfigs.mkString(",")) } - LogConfig.validate(properties, kafkaConfig.extractLogConfigMap, - kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) + LogConfig.validate(properties, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) case BROKER => validateBrokerName(resource.name()) case CLIENT_METRICS => val properties = new Properties() diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index 94a7b349af927..822310838298c 100755 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -17,7 +17,7 @@ package kafka.server -import java.util +import java.{lang, util} import java.util.concurrent.TimeUnit import java.util.{Collections, Properties} import kafka.cluster.EndPoint @@ -1205,6 +1205,8 @@ class KafkaConfig private(doLog: Boolean, val props: java.util.Map[_, _], dynami def usesTopicId: Boolean = usesSelfManagedQuorum || interBrokerProtocolVersion.isTopicIdsSupported + + val isRemoteLogStorageSystemEnabled: lang.Boolean = getBoolean(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP) def logLocalRetentionBytes: java.lang.Long = getLong(RemoteLogManagerConfig.LOG_LOCAL_RETENTION_BYTES_PROP) def logLocalRetentionMs: java.lang.Long = getLong(RemoteLogManagerConfig.LOG_LOCAL_RETENTION_MS_PROP) diff --git a/core/src/main/scala/kafka/server/KafkaServer.scala b/core/src/main/scala/kafka/server/KafkaServer.scala index 738adab0fb0c1..933a5df536a5f 100755 --- a/core/src/main/scala/kafka/server/KafkaServer.scala +++ b/core/src/main/scala/kafka/server/KafkaServer.scala @@ -276,7 +276,7 @@ class KafkaServer( createCurrentControllerIdMetric() /* register broker metrics */ - _brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig.enableRemoteStorageSystem()) + _brokerTopicStats = new BrokerTopicStats(config.isRemoteLogStorageSystemEnabled) quotaManagers = QuotaFactory.instantiate(config, metrics, time, threadNamePrefix.getOrElse("")) KafkaBroker.notifyClusterListeners(clusterId, kafkaMetricsReporters ++ metrics.reporters.asScala) diff --git a/core/src/main/scala/kafka/server/ReplicaManager.scala b/core/src/main/scala/kafka/server/ReplicaManager.scala index a2a070bcd0331..aa56269a2f40d 100644 --- a/core/src/main/scala/kafka/server/ReplicaManager.scala +++ b/core/src/main/scala/kafka/server/ReplicaManager.scala @@ -33,7 +33,6 @@ import kafka.zk.KafkaZkClient import org.apache.kafka.common.errors._ import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.message.DeleteRecordsResponseData.DeleteRecordsPartitionResult -import org.apache.kafka.common.message.DescribeLogDirsResponseData.DescribeLogDirsTopic import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState import org.apache.kafka.common.message.LeaderAndIsrResponseData.{LeaderAndIsrPartitionError, LeaderAndIsrTopicError} import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopic @@ -68,7 +67,7 @@ import java.util import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.locks.Lock import java.util.concurrent.{CompletableFuture, Future, RejectedExecutionException, TimeUnit} -import java.util.{Collections, Optional, OptionalInt, OptionalLong} +import java.util.{Optional, OptionalInt, OptionalLong} import scala.collection.{Map, Seq, Set, mutable} import scala.compat.java8.OptionConverters._ import scala.jdk.CollectionConverters._ @@ -1250,9 +1249,9 @@ class ReplicaManager(val config: KafkaConfig, val fileStore = Files.getFileStore(file) val totalBytes = adjustForLargeFileSystems(fileStore.getTotalSpace) val usableBytes = adjustForLargeFileSystems(fileStore.getUsableSpace) - val topicInfos = logsByDir.get(absolutePath) match { + logsByDir.get(absolutePath) match { case Some(logs) => - logs.groupBy(_.topicPartition.topic).map { case (topic, logs) => + val topicInfos = logs.groupBy(_.topicPartition.topic).map{case (topic, logs) => new DescribeLogDirsResponseData.DescribeLogDirsTopic().setName(topic).setPartitions( logs.filter { log => partitions.contains(log.topicPartition) @@ -1263,19 +1262,17 @@ class ReplicaManager(val config: KafkaConfig, .setOffsetLag(getLogEndOffsetLag(log.topicPartition, log.logEndOffset, log.isFuture)) .setIsFutureKey(log.isFuture) }.toList.asJava) - }.filterNot(_.partitions().isEmpty).toList.asJava + }.toList.asJava + + new DescribeLogDirsResponseData.DescribeLogDirsResult().setLogDir(absolutePath) + .setErrorCode(Errors.NONE.code).setTopics(topicInfos) + .setTotalBytes(totalBytes).setUsableBytes(usableBytes) case None => - Collections.emptyList[DescribeLogDirsTopic]() + new DescribeLogDirsResponseData.DescribeLogDirsResult().setLogDir(absolutePath) + .setErrorCode(Errors.NONE.code) + .setTotalBytes(totalBytes).setUsableBytes(usableBytes) } - val describeLogDirsResult = new DescribeLogDirsResponseData.DescribeLogDirsResult() - .setLogDir(absolutePath).setTopics(topicInfos) - .setErrorCode(Errors.NONE.code) - .setTotalBytes(totalBytes).setUsableBytes(usableBytes) - if (!topicInfos.isEmpty) - describeLogDirsResult.setTopics(topicInfos) - describeLogDirsResult - } catch { case e: KafkaStorageException => warn("Unable to describe replica dirs for %s".format(absolutePath), e) diff --git a/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala b/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala index ee7bfa2157ee7..048a665757b74 100644 --- a/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala +++ b/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala @@ -29,6 +29,7 @@ import org.apache.kafka.coordinator.group.GroupCoordinator import org.apache.kafka.image.loader.LoaderManifest import org.apache.kafka.image.publisher.MetadataPublisher import org.apache.kafka.image.{MetadataDelta, MetadataImage, TopicDelta} +import org.apache.kafka.server.common.MetadataVersion import org.apache.kafka.server.fault.FaultHandler import java.util.concurrent.CompletableFuture @@ -128,6 +129,21 @@ class BrokerMetadataPublisher( debug(s"Publishing metadata at offset $highestOffsetAndEpoch with $metadataVersionLogMsg.") } + Option(delta.featuresDelta()).foreach { featuresDelta => + featuresDelta.metadataVersionChange().ifPresent{ metadataVersion => + info(s"Updating metadata.version to ${metadataVersion.featureLevel()} at offset $highestOffsetAndEpoch.") + val currentMetadataVersion = delta.image().features().metadataVersion() + if (currentMetadataVersion.isLessThan(MetadataVersion.IBP_3_7_IV2) && metadataVersion.isAtLeast(MetadataVersion.IBP_3_7_IV2)) { + info( + s"""Resending BrokerRegistration with existing incarnation-id to inform the + |controller about log directories in the broker following metadata update: + |previousMetadataVersion: ${delta.image().features().metadataVersion()} + |newMetadataVersion: $metadataVersion""".stripMargin.linesIterator.mkString(" ").trim) + brokerLifecycleManager.handleKraftJBODMetadataVersionUpdate() + } + } + } + // Apply topic deltas. Option(delta.topicsDelta()).foreach { topicsDelta => try { diff --git a/core/src/main/scala/kafka/zk/AdminZkClient.scala b/core/src/main/scala/kafka/zk/AdminZkClient.scala index 604e03c7ed436..efecfe854bbf2 100644 --- a/core/src/main/scala/kafka/zk/AdminZkClient.scala +++ b/core/src/main/scala/kafka/zk/AdminZkClient.scala @@ -163,7 +163,7 @@ class AdminZkClient(zkClient: KafkaZkClient, LogConfig.validate(config, kafkaConfig.map(_.extractLogConfigMap).getOrElse(Collections.emptyMap()), - kafkaConfig.exists(_.remoteLogManagerConfig.enableRemoteStorageSystem())) + kafkaConfig.exists(_.isRemoteLogStorageSystemEnabled)) } private def writeTopicPartitionAssignment(topic: String, replicaAssignment: Map[Int, ReplicaAssignment], @@ -481,7 +481,7 @@ class AdminZkClient(zkClient: KafkaZkClient, // remove the topic overrides LogConfig.validate(configs, kafkaConfig.map(_.extractLogConfigMap).getOrElse(Collections.emptyMap()), - kafkaConfig.exists(_.remoteLogManagerConfig.enableRemoteStorageSystem())) + kafkaConfig.exists(_.isRemoteLogStorageSystemEnabled)) } /** diff --git a/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java b/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java index 50b581fdf4ee5..0ba5d63a8da8a 100644 --- a/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java +++ b/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java @@ -222,7 +222,7 @@ void setUp() throws Exception { props.setProperty(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, "true"); props.setProperty(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP, "100"); remoteLogManagerConfig = createRLMConfig(props); - brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).remoteLogManagerConfig().enableRemoteStorageSystem()); + brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).isRemoteLogStorageSystemEnabled()); remoteLogManager = new RemoteLogManager(remoteLogManagerConfig, brokerId, logDir, clusterId, time, tp -> Optional.of(mockLog), diff --git a/core/src/test/java/kafka/test/junit/ClusterTestExtensionsUnitTest.java b/core/src/test/java/kafka/test/junit/ClusterTestExtensionsUnitTest.java index c0944080547d6..7a1ae920a6f44 100644 --- a/core/src/test/java/kafka/test/junit/ClusterTestExtensionsUnitTest.java +++ b/core/src/test/java/kafka/test/junit/ClusterTestExtensionsUnitTest.java @@ -17,64 +17,29 @@ package kafka.test.junit; -import kafka.test.ClusterConfig; import kafka.test.annotation.ClusterTemplate; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.extension.ExtensionContext; - -import java.lang.reflect.Method; -import java.util.Collections; -import java.util.List; - import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class ClusterTestExtensionsUnitTest { - - static List cfgEmpty() { - return Collections.emptyList(); - } - - @SuppressWarnings({"unchecked", "rawtypes"}) - private ExtensionContext buildExtensionContext(String methodName) throws Exception { - ExtensionContext extensionContext = mock(ExtensionContext.class); - Class clazz = ClusterTestExtensionsUnitTest.class; - Method method = clazz.getDeclaredMethod(methodName); - when(extensionContext.getRequiredTestClass()).thenReturn(clazz); - when(extensionContext.getRequiredTestMethod()).thenReturn(method); - return extensionContext; - } - @Test - void testProcessClusterTemplate() throws Exception { + void testProcessClusterTemplate() { ClusterTestExtensions ext = new ClusterTestExtensions(); - ExtensionContext context = buildExtensionContext("cfgEmpty"); + ExtensionContext context = mock(ExtensionContext.class); ClusterTemplate annot = mock(ClusterTemplate.class); - when(annot.value()).thenReturn("").thenReturn(" ").thenReturn("cfgEmpty"); - - Assertions.assertEquals( - "ClusterTemplate value can't be empty string.", - Assertions.assertThrows(IllegalStateException.class, () -> - ext.processClusterTemplate(context, annot) - ).getMessage() - ); - + when(annot.value()).thenReturn("").thenReturn(" "); - Assertions.assertEquals( - "ClusterTemplate value can't be empty string.", - Assertions.assertThrows(IllegalStateException.class, () -> - ext.processClusterTemplate(context, annot) - ).getMessage() + Assertions.assertThrows(IllegalStateException.class, () -> + ext.processClusterTemplate(context, annot) ); - Assertions.assertEquals( - "ClusterConfig generator method should provide at least one config", - Assertions.assertThrows(IllegalStateException.class, () -> - ext.processClusterTemplate(context, annot) - ).getMessage() + Assertions.assertThrows(IllegalStateException.class, () -> + ext.processClusterTemplate(context, annot) ); } } diff --git a/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala b/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala index b61eb28530ca9..99b1e35e4eed9 100644 --- a/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala @@ -27,7 +27,7 @@ import org.apache.kafka.common.errors.CorruptRecordException import org.apache.kafka.common.record._ import org.apache.kafka.common.utils.Utils import org.apache.kafka.coordinator.transaction.TransactionLogConfigs -import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics} +import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.util.MockTime import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, CleanerConfig, LogAppendInfo, LogConfig, LogDirFailureChannel, LogFileUtils, LogSegment, LogSegments, LogStartOffsetIncrementReason, OffsetMap, ProducerStateManager, ProducerStateManagerConfig} import org.apache.kafka.storage.internals.utils.Throttler @@ -80,6 +80,7 @@ class LogCleanerTest extends Logging { logs = new Pool[TopicPartition, UnifiedLog](), logDirFailureChannel = new LogDirFailureChannel(1), time = time) + val metricsToVerify = new java.util.HashMap[String, java.util.List[java.util.Map[String, String]]]() logCleaner.cleanerManager.gaugeMetricNameWithTag.asScala.foreach { metricNameAndTags => val tags = new java.util.ArrayList[java.util.Map[String, String]]() @@ -119,27 +120,6 @@ class LogCleanerTest extends Logging { } } - @Test - def testMetricsActiveAfterReconfiguration(): Unit = { - val logCleaner = new LogCleaner(new CleanerConfig(true), - logDirs = Array(TestUtils.tempDir()), - logs = new Pool[TopicPartition, UnifiedLog](), - logDirFailureChannel = new LogDirFailureChannel(1), - time = time) - - try { - logCleaner.startup() - var nonexistent = LogCleaner.MetricNames.diff(KafkaYammerMetrics.defaultRegistry.allMetrics().keySet().asScala.map(_.getName)) - assertEquals(0, nonexistent.size, s"$nonexistent should be existent") - - logCleaner.reconfigure(new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181")), - new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181"))) - - nonexistent = LogCleaner.MetricNames.diff(KafkaYammerMetrics.defaultRegistry.allMetrics().keySet().asScala.map(_.getName)) - assertEquals(0, nonexistent.size, s"$nonexistent should be existent") - } finally logCleaner.shutdown() - } - /** * Test simple log cleaning */ diff --git a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala index 2670d6e6f7736..ed91c936edc10 100644 --- a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala @@ -297,7 +297,7 @@ class LogConfigTest { props.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, localRetentionMs.toString) props.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, localRetentionBytes.toString) assertThrows(classOf[ConfigException], - () => LogConfig.validate(props, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) + () => LogConfig.validate(props, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) } @Test @@ -309,17 +309,17 @@ class LogConfigTest { val logProps = new Properties() logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE) logProps.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) + LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, "delete,compact") assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, "compact,delete") assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) } @ParameterizedTest(name = "testEnableRemoteLogStorage with sysRemoteStorageEnabled: {0}") @@ -332,10 +332,10 @@ class LogConfigTest { val logProps = new Properties() logProps.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") if (sysRemoteStorageEnabled) { - LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) + LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) } else { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) assertTrue(message.getMessage.contains("Tiered Storage functionality is disabled in the broker")) } } @@ -355,10 +355,10 @@ class LogConfigTest { logProps.put(TopicConfig.RETENTION_MS_CONFIG, "500") if (sysRemoteStorageEnabled) { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) assertTrue(message.getMessage.contains(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG)) } else { - LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) + LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) } } @@ -377,10 +377,10 @@ class LogConfigTest { logProps.put(TopicConfig.RETENTION_BYTES_CONFIG, "128") if (sysRemoteStorageEnabled) { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) assertTrue(message.getMessage.contains(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG)) } else { - LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) + LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) } } @@ -395,10 +395,10 @@ class LogConfigTest { if (sysRemoteStorageEnabled) { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) + () => LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) assertTrue(message.getMessage.contains(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG)) } else { - LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) + LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) } } diff --git a/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala b/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala index 3a69669d349e5..a7415b5d50a2e 100644 --- a/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala @@ -95,7 +95,7 @@ class ApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVersio @ClusterTemplate("testApiVersionsRequestIncludesUnreleasedApisTemplate") @ClusterTest(types = Array(Type.KRAFT, Type.CO_KRAFT), serverProperties = Array( - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "false"), new ClusterConfigProperty(key = "unstable.feature.versions.enable", value = "true"), )) def testApiVersionsRequestIncludesUnreleasedApis(): Unit = { diff --git a/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala b/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala index b0162dc635842..34f9d139a03cc 100644 --- a/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala @@ -285,7 +285,7 @@ class BrokerLifecycleManagerTest { assertEquals(1000L, manager.brokerEpoch) // Trigger JBOD MV update - manager.resendBrokerRegistrationUnlessZkMode() + manager.handleKraftJBODMetadataVersionUpdate() // Accept new registration, response sets epoch to 1200 nextRegistrationRequest(1200L) diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala index 151ffb9e1847d..6b655ea7837eb 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala @@ -4095,7 +4095,7 @@ class ReplicaManagerTest { val config = new AbstractConfig(RemoteLogManagerConfig.CONFIG_DEF, props) val remoteLogManagerConfig = new RemoteLogManagerConfig(config) val mockLog = mock(classOf[UnifiedLog]) - val brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).remoteLogManagerConfig.enableRemoteStorageSystem()) + val brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).isRemoteLogStorageSystemEnabled) val remoteLogManager = new RemoteLogManager( remoteLogManagerConfig, 0, @@ -4195,7 +4195,7 @@ class ReplicaManagerTest { val config = new AbstractConfig(RemoteLogManagerConfig.CONFIG_DEF, props) val remoteLogManagerConfig = new RemoteLogManagerConfig(config) val dummyLog = mock(classOf[UnifiedLog]) - val brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).remoteLogManagerConfig.enableRemoteStorageSystem()) + val brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).isRemoteLogStorageSystemEnabled) val remoteLogManager = new RemoteLogManager( remoteLogManagerConfig, 0, @@ -6450,39 +6450,6 @@ class ReplicaManagerTest { assertEquals(Errors.NONE.code, response.errorCode) assertTrue(response.totalBytes > 0) assertTrue(response.usableBytes >= 0) - assertFalse(response.topics().isEmpty) - response.topics().forEach(t => assertFalse(t.partitions().isEmpty)) - } - } finally { - replicaManager.shutdown(checkpointHW = false) - } - } - - @Test - def testDescribeLogDirsWithoutAnyPartitionTopic(): Unit = { - val noneTopic = "none-topic" - val topicPartition = 0 - val topicId = Uuid.randomUuid() - val followerBrokerId = 0 - val leaderBrokerId = 1 - val leaderEpoch = 1 - val leaderEpochIncrement = 2 - val countDownLatch = new CountDownLatch(1) - val offsetFromLeader = 5 - - // Prepare the mocked components for the test - val (replicaManager, mockLogMgr) = prepareReplicaManagerAndLogManager(new MockTimer(time), - topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId, leaderBrokerId, countDownLatch, - expectTruncation = false, localLogOffset = Some(10), offsetFromLeader = offsetFromLeader, topicId = Some(topicId)) - - try { - val responses = replicaManager.describeLogDirs(Set(new TopicPartition(noneTopic, topicPartition))) - assertEquals(mockLogMgr.liveLogDirs.size, responses.size) - responses.foreach { response => - assertEquals(Errors.NONE.code, response.errorCode) - assertTrue(response.totalBytes > 0) - assertTrue(response.usableBytes >= 0) - assertTrue(response.topics().isEmpty) } } finally { replicaManager.shutdown(checkpointHW = false) diff --git a/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala b/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala index 97efd9bcf4cc0..456d075f91655 100644 --- a/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala +++ b/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala @@ -704,10 +704,10 @@ class RequestQuotaTest extends BaseRequestTest { new ConsumerGroupDescribeRequest.Builder(new ConsumerGroupDescribeRequestData(), true) case ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS => - new GetTelemetrySubscriptionsRequest.Builder(new GetTelemetrySubscriptionsRequestData()) + new GetTelemetrySubscriptionsRequest.Builder(new GetTelemetrySubscriptionsRequestData(), true) case ApiKeys.PUSH_TELEMETRY => - new PushTelemetryRequest.Builder(new PushTelemetryRequestData()) + new PushTelemetryRequest.Builder(new PushTelemetryRequestData(), true) case ApiKeys.ASSIGN_REPLICAS_TO_DIRS => new AssignReplicasToDirsRequest.Builder(new AssignReplicasToDirsRequestData()) @@ -718,18 +718,6 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.DESCRIBE_TOPIC_PARTITIONS => new DescribeTopicPartitionsRequest.Builder(new DescribeTopicPartitionsRequestData()) - case ApiKeys.SHARE_GROUP_HEARTBEAT => - new ShareGroupHeartbeatRequest.Builder(new ShareGroupHeartbeatRequestData(), true) - - case ApiKeys.SHARE_GROUP_DESCRIBE => - new ShareGroupDescribeRequest.Builder(new ShareGroupDescribeRequestData(), true) - - case ApiKeys.SHARE_FETCH => - new ShareFetchRequest.Builder(new ShareFetchRequestData(), true) - - case ApiKeys.SHARE_ACKNOWLEDGE => - new ShareAcknowledgeRequest.Builder(new ShareAcknowledgeRequestData(), true) - case _ => throw new IllegalArgumentException("Unsupported API key " + apiKey) } diff --git a/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala b/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala index 26f4fb3daee8c..c2926c3b67db9 100644 --- a/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala +++ b/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala @@ -30,6 +30,7 @@ import org.apache.kafka.clients.admin.AlterConfigOp.OpType.SET import org.apache.kafka.clients.admin.{Admin, AlterConfigOp, ConfigEntry, NewTopic} import org.apache.kafka.common.config.ConfigResource import org.apache.kafka.common.config.ConfigResource.Type.BROKER +import org.apache.kafka.common.metadata.FeatureLevelRecord import org.apache.kafka.common.utils.Exit import org.apache.kafka.coordinator.group.GroupCoordinator import org.apache.kafka.image.{MetadataDelta, MetadataImage, MetadataImageTest, MetadataProvenance} @@ -42,7 +43,7 @@ import org.junit.jupiter.api.Assertions.{assertEquals, assertNotNull, assertTrue import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} import org.mockito.ArgumentMatchers.any import org.mockito.Mockito -import org.mockito.Mockito.{doThrow, mock, verify} +import org.mockito.Mockito.{clearInvocations, doThrow, mock, times, verify, verifyNoInteractions} import org.mockito.invocation.InvocationOnMock import org.mockito.stubbing.Answer @@ -220,4 +221,102 @@ class BrokerMetadataPublisherTest { verify(groupCoordinator).onNewMetadataImage(image, delta) } + + @Test + def testMetadataVersionUpdateToIBP_3_7_IV2OrAboveTriggersBrokerReRegistration(): Unit = { + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(0, "")) + val metadataCache = new KRaftMetadataCache(0) + val logManager = mock(classOf[LogManager]) + val replicaManager = mock(classOf[ReplicaManager]) + val groupCoordinator = mock(classOf[GroupCoordinator]) + val faultHandler = mock(classOf[FaultHandler]) + val brokerLifecycleManager = mock(classOf[BrokerLifecycleManager]) + + val metadataPublisher = new BrokerMetadataPublisher( + config, + metadataCache, + logManager, + replicaManager, + groupCoordinator, + mock(classOf[TransactionCoordinator]), + mock(classOf[DynamicConfigPublisher]), + mock(classOf[DynamicClientQuotaPublisher]), + mock(classOf[ScramPublisher]), + mock(classOf[DelegationTokenPublisher]), + mock(classOf[AclPublisher]), + faultHandler, + faultHandler, + brokerLifecycleManager, + ) + + var image = MetadataImage.EMPTY + var delta = new MetadataDelta.Builder() + .setImage(image) + .build() + + // We first upgrade metadata version to 3_6_IV2 + delta.replay(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(MetadataVersion.IBP_3_6_IV2.featureLevel())) + var newImage = delta.apply(new MetadataProvenance(100, 4, 2000)) + + metadataPublisher.onMetadataUpdate(delta, newImage, + LogDeltaManifest.newBuilder() + .provenance(MetadataProvenance.EMPTY) + .leaderAndEpoch(LeaderAndEpoch.UNKNOWN) + .numBatches(1) + .elapsedNs(100) + .numBytes(42) + .build()) + + // This should NOT trigger broker reregistration + verifyNoInteractions(brokerLifecycleManager) + + // We then upgrade to IBP_3_7_IV2 + image = newImage + delta = new MetadataDelta.Builder() + .setImage(image) + .build() + delta.replay(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(MetadataVersion.IBP_3_7_IV2.featureLevel())) + newImage = delta.apply(new MetadataProvenance(100, 4, 2000)) + + metadataPublisher.onMetadataUpdate(delta, newImage, + LogDeltaManifest.newBuilder() + .provenance(MetadataProvenance.EMPTY) + .leaderAndEpoch(LeaderAndEpoch.UNKNOWN) + .numBatches(1) + .elapsedNs(100) + .numBytes(42) + .build()) + + // This SHOULD trigger a broker registration + verify(brokerLifecycleManager, times(1)).handleKraftJBODMetadataVersionUpdate() + clearInvocations(brokerLifecycleManager) + + // Finally upgrade to IBP_3_8_IV0 + image = newImage + delta = new MetadataDelta.Builder() + .setImage(image) + .build() + delta.replay(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(MetadataVersion.IBP_3_8_IV0.featureLevel())) + newImage = delta.apply(new MetadataProvenance(200, 4, 3000)) + + metadataPublisher.onMetadataUpdate(delta, newImage, + LogDeltaManifest.newBuilder() + .provenance(MetadataProvenance.EMPTY) + .leaderAndEpoch(LeaderAndEpoch.UNKNOWN) + .numBatches(1) + .elapsedNs(100) + .numBytes(42) + .build()) + + // This should NOT trigger broker reregistration + verify(brokerLifecycleManager, times(0)).handleKraftJBODMetadataVersionUpdate() + + metadataPublisher.close() + } } diff --git a/docs/security.html b/docs/security.html index e3495f4b5188b..7eb0c2cb346f8 100644 --- a/docs/security.html +++ b/docs/security.html @@ -2267,42 +2267,6 @@

classicGroupMaxSize) { log.info("Cannot downgrade consumer group {} to classic group because its group size is greater than classic group max size.", consumerGroup.groupId()); - return false; } return true; } @@ -1906,28 +1904,24 @@ private Assignment updateTargetAssignment( .withInvertedTargetAssignment(group.invertedTargetAssignment()) .withTopicsImage(metadataImage.topics()) .addOrUpdateMember(updatedMember.memberId(), updatedMember); - + TargetAssignmentBuilder.TargetAssignmentResult assignmentResult; + // A new static member is replacing an older one with the same subscriptions. + // We just need to remove the older member and add the newer one. The new member should + // reuse the target assignment of the older member. if (staticMemberReplaced) { - // A new static member is replacing an older one with the same subscriptions. - // We just need to remove the older member and add the newer one. The new member should - // reuse the target assignment of the older member. - assignmentResultBuilder.removeMember(member.memberId()); + assignmentResult = assignmentResultBuilder + .removeMember(member.memberId()) + .build(); + } else { + assignmentResult = assignmentResultBuilder + .build(); } - TargetAssignmentBuilder.TargetAssignmentResult assignmentResult = - assignmentResultBuilder.build(); - log.info("[GroupId {}] Computed a new target assignment for epoch {} with '{}' assignor: {}.", group.groupId(), groupEpoch, preferredServerAssignor, assignmentResult.targetAssignment()); records.addAll(assignmentResult.records()); - - MemberAssignment newMemberAssignment = assignmentResult.targetAssignment().get(updatedMember.memberId()); - if (newMemberAssignment != null) { - return new Assignment(newMemberAssignment.targetPartitions()); - } else { - return Assignment.EMPTY; - } + return assignmentResult.targetAssignment().get(updatedMember.memberId()); } catch (PartitionAssignorException ex) { String msg = String.format("Failed to compute a new target assignment for epoch %d: %s", groupEpoch, ex.getMessage()); diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilder.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilder.java index daea9938bf45d..57d6039fa0ba8 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilder.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilder.java @@ -64,11 +64,11 @@ public static class TargetAssignmentResult { /** * The new target assignment for the group. */ - private final Map targetAssignment; + private final Map targetAssignment; TargetAssignmentResult( List records, - Map targetAssignment + Map targetAssignment ) { Objects.requireNonNull(records); Objects.requireNonNull(targetAssignment); @@ -86,7 +86,7 @@ public List records() { /** * @return The target assignment. */ - public Map targetAssignment() { + public Map targetAssignment() { return targetAssignment; } } @@ -347,26 +347,38 @@ public TargetAssignmentResult build() throws PartitionAssignorException { // Compute delta from previous to new target assignment and create the // relevant records. List records = new ArrayList<>(); + Map newTargetAssignment = new HashMap<>(); - for (String memberId : memberSpecs.keySet()) { + memberSpecs.keySet().forEach(memberId -> { Assignment oldMemberAssignment = targetAssignment.get(memberId); Assignment newMemberAssignment = newMemberAssignment(newGroupAssignment, memberId); - if (!newMemberAssignment.equals(oldMemberAssignment)) { - // If the member had no assignment or had a different assignment, we - // create a record for the new assignment. + newTargetAssignment.put(memberId, newMemberAssignment); + + if (oldMemberAssignment == null) { + // If the member had no assignment, we always create a record for it. records.add(newTargetAssignmentRecord( groupId, memberId, newMemberAssignment.partitions() )); + } else { + // If the member had an assignment, we only create a record if the + // new assignment is different. + if (!newMemberAssignment.equals(oldMemberAssignment)) { + records.add(newTargetAssignmentRecord( + groupId, + memberId, + newMemberAssignment.partitions() + )); + } } - } + }); // Bump the target assignment epoch. records.add(newTargetAssignmentEpochRecord(groupId, groupEpoch)); - return new TargetAssignmentResult(records, newGroupAssignment.members()); + return new TargetAssignmentResult(records, newTargetAssignment); } private Assignment newMemberAssignment( diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java index abf48fd64158a..3664a7a61d295 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java @@ -13166,49 +13166,6 @@ public void testClassicGroupLeaveToConsumerGroupWithoutValidLeaveGroupMember() { assertEquals(Collections.emptyList(), leaveResult.records()); } - @Test - public void testNoConversionWhenSizeExceedsClassicMaxGroupSize() throws Exception { - String groupId = "group-id"; - String nonClassicMemberId = "1"; - - List protocols = Collections.singletonList( - new ConsumerGroupMemberMetadataValue.ClassicProtocol() - .setName("range") - .setMetadata(new byte[0]) - ); - - ConsumerGroupMember member = new ConsumerGroupMember.Builder(nonClassicMemberId).build(); - ConsumerGroupMember classicMember1 = new ConsumerGroupMember.Builder("2") - .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSupportedProtocols(protocols)) - .build(); - ConsumerGroupMember classicMember2 = new ConsumerGroupMember.Builder("3") - .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSupportedProtocols(protocols)) - .build(); - - GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withClassicGroupMaxSize(1) - .withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.DOWNGRADE) - .withConsumerGroup( - new ConsumerGroupBuilder(groupId, 10) - .withMember(member) - .withMember(classicMember1) - .withMember(classicMember2) - ) - .build(); - - assertEquals(Group.GroupType.CONSUMER, context.groupMetadataManager.group(groupId).type()); - - context.consumerGroupHeartbeat( - new ConsumerGroupHeartbeatRequestData() - .setGroupId(groupId) - .setMemberId(nonClassicMemberId) - .setMemberEpoch(LEAVE_GROUP_MEMBER_EPOCH) - .setRebalanceTimeoutMs(5000) - ); - - assertEquals(Group.GroupType.CONSUMER, context.groupMetadataManager.group(groupId).type()); - } - private static void checkJoinGroupResponse( JoinGroupResponseData expectedResponse, JoinGroupResponseData actualResponse, diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilderTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilderTest.java index e2e572b6bf9f1..d5ba038f31895 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilderTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilderTest.java @@ -337,12 +337,12 @@ public void testAssignmentHasNotChanged() { 20 )), result.records()); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-1", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2, 3), mkTopicAssignment(barTopicId, 1, 2, 3) ))); - expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-2", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 4, 5, 6), mkTopicAssignment(barTopicId, 4, 5, 6) ))); @@ -400,12 +400,12 @@ public void testAssignmentSwapped() { 20 ), result.records().get(2)); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-2", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2, 3), mkTopicAssignment(barTopicId, 1, 2, 3) ))); - expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-1", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 4, 5, 6), mkTopicAssignment(barTopicId, 4, 5, 6) ))); @@ -474,16 +474,16 @@ public void testNewMember() { 20 ), result.records().get(3)); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-1", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) ))); - expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-2", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4), mkTopicAssignment(barTopicId, 3, 4) ))); - expectedAssignment.put("member-3", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-3", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 5, 6), mkTopicAssignment(barTopicId, 5, 6) ))); @@ -561,16 +561,16 @@ public void testUpdateMember() { 20 ), result.records().get(3)); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-1", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) ))); - expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-2", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4), mkTopicAssignment(barTopicId, 3, 4) ))); - expectedAssignment.put("member-3", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-3", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 5, 6), mkTopicAssignment(barTopicId, 5, 6) ))); @@ -639,16 +639,16 @@ public void testPartialAssignmentUpdate() { 20 ), result.records().get(2)); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-1", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) ))); - expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-2", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 3, 4, 5) ))); - expectedAssignment.put("member-3", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-3", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 6), mkTopicAssignment(barTopicId, 6) ))); @@ -713,12 +713,12 @@ public void testDeleteMember() { 20 ), result.records().get(2)); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-1", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2, 3), mkTopicAssignment(barTopicId, 1, 2, 3) ))); - expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-2", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 4, 5, 6), mkTopicAssignment(barTopicId, 4, 5, 6) ))); @@ -788,17 +788,17 @@ public void testReplaceStaticMember() { 20 ), result.records().get(1)); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-1", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) ))); - expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-2", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4), mkTopicAssignment(barTopicId, 3, 4) ))); - expectedAssignment.put("member-3-a", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-3-a", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 5, 6), mkTopicAssignment(barTopicId, 5, 6) ))); diff --git a/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java index 8b9c5b19eae4f..0974c31d1b263 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java @@ -408,13 +408,6 @@ public ControllerResult registerBroker( setBrokerEpoch(brokerEpoch). setRack(request.rack()). setEndPoints(listenerInfo.toBrokerRegistrationRecord()); - - if (existing != null && request.incarnationId().equals(existing.incarnationId())) { - log.info("Amending registration of broker {}", request.brokerId()); - record.setFenced(existing.fenced()); - record.setInControlledShutdown(existing.inControlledShutdown()); - } - for (BrokerRegistrationRequestData.Feature feature : request.features()) { record.features().add(processRegistrationFeature(brokerId, finalizedFeatures, feature)); } diff --git a/metadata/src/main/java/org/apache/kafka/image/publisher/BrokerRegistrationTracker.java b/metadata/src/main/java/org/apache/kafka/image/publisher/BrokerRegistrationTracker.java deleted file mode 100644 index 51ac2bdfa4bd3..0000000000000 --- a/metadata/src/main/java/org/apache/kafka/image/publisher/BrokerRegistrationTracker.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.image.publisher; - -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.utils.LogContext; -import org.apache.kafka.image.MetadataDelta; -import org.apache.kafka.image.MetadataImage; -import org.apache.kafka.image.loader.LoaderManifest; -import org.apache.kafka.metadata.BrokerRegistration; -import org.apache.kafka.server.common.MetadataVersion; -import org.slf4j.Logger; - -import java.util.List; - -/** - * Tracks the registration of a specific broker, and executes a callback if it should be refreshed. - * - * This tracker handles cases where we might want to re-register the broker. The only such case - * right now is during the transition from non-JBOD mode, to JBOD mode. In other words, the - * transition from a MetadataVersion less than 3.7-IV2, to one greater than or equal to 3.7-IV2. - * In this case, the broker registration will start out containing no directories, and we need to - * resend the BrokerRegistrationRequest to fix that. - * - * As much as possible, the goal here is to keep things simple. We just compare the desired state - * with the actual state, and try to make changes only if necessary. - */ -public class BrokerRegistrationTracker implements MetadataPublisher { - private final Logger log; - private final int id; - private final Runnable refreshRegistrationCallback; - - /** - * Create the tracker. - * - * @param id The ID of this broker. - * @param targetDirectories The directories managed by this broker. - * @param refreshRegistrationCallback Callback to run if we need to refresh the registration. - */ - public BrokerRegistrationTracker( - int id, - List targetDirectories, - Runnable refreshRegistrationCallback - ) { - this.log = new LogContext("[BrokerRegistrationTracker id=" + id + "] "). - logger(BrokerRegistrationTracker.class); - this.id = id; - this.refreshRegistrationCallback = refreshRegistrationCallback; - } - - @Override - public String name() { - return "BrokerRegistrationTracker(id=" + id + ")"; - } - - @Override - public void onMetadataUpdate( - MetadataDelta delta, - MetadataImage newImage, - LoaderManifest manifest - ) { - boolean checkBrokerRegistration = false; - if (delta.featuresDelta() != null) { - if (delta.metadataVersionChanged().isPresent()) { - if (log.isTraceEnabled()) { - log.trace("Metadata version change is present: {}", - delta.metadataVersionChanged()); - } - checkBrokerRegistration = true; - } - } - if (delta.clusterDelta() != null) { - if (delta.clusterDelta().changedBrokers().get(id) != null) { - if (log.isTraceEnabled()) { - log.trace("Broker change is present: {}", - delta.clusterDelta().changedBrokers().get(id)); - } - checkBrokerRegistration = true; - } - } - if (checkBrokerRegistration) { - if (brokerRegistrationNeedsRefresh(newImage.features().metadataVersion(), - delta.clusterDelta().broker(id))) { - refreshRegistrationCallback.run(); - } - } - } - - /** - * Check if the current broker registration needs to be refreshed. - * - * @param metadataVersion The current metadata version. - * @param registration The current broker registration, or null if there is none. - * @return True only if we should refresh. - */ - boolean brokerRegistrationNeedsRefresh( - MetadataVersion metadataVersion, - BrokerRegistration registration - ) { - // If there is no existing registration, the BrokerLifecycleManager must still be sending it. - // So we don't need to do anything yet. - if (registration == null) { - log.debug("No current broker registration to check."); - return false; - } - // Check to see if the directory list has changed. Note that this check could certainly be - // triggered spuriously. For example, if the broker's directory list has been changed in the - // past, and we are in the process of replaying that change log, we will end up here. - // That's fine because resending the broker registration does not cause any problems. And, - // of course, as soon as a snapshot is made, we will no longer need to worry about those - // old metadata log entries being replayed on startup. - if (metadataVersion.isAtLeast(MetadataVersion.IBP_3_7_IV2) && - registration.directories().isEmpty()) { - log.info("Current directory set is empty, but MV supports JBOD. Resending " + - "broker registration."); - return true; - } - log.debug("Broker registration does not need to be resent."); - return false; - } -} diff --git a/metadata/src/test/java/org/apache/kafka/image/publisher/BrokerRegistrationTrackerTest.java b/metadata/src/test/java/org/apache/kafka/image/publisher/BrokerRegistrationTrackerTest.java deleted file mode 100644 index 855a96cd8aaf3..0000000000000 --- a/metadata/src/test/java/org/apache/kafka/image/publisher/BrokerRegistrationTrackerTest.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.image.publisher; - -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.metadata.FeatureLevelRecord; -import org.apache.kafka.common.metadata.RegisterBrokerRecord; -import org.apache.kafka.image.MetadataDelta; -import org.apache.kafka.image.MetadataImage; -import org.apache.kafka.image.MetadataProvenance; -import org.apache.kafka.image.loader.LogDeltaManifest; -import org.apache.kafka.raft.LeaderAndEpoch; -import org.apache.kafka.server.common.MetadataVersion; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; - -import java.util.Arrays; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.junit.jupiter.api.Assertions.assertEquals; - -@Timeout(value = 40) -public class BrokerRegistrationTrackerTest { - static final Uuid INCARNATION_ID = Uuid.fromString("jyjLbk31Tpa53pFrU9Y-Ng"); - - static final Uuid A = Uuid.fromString("Ahw3vXfnThqeZbb7HD1w6Q"); - - static final Uuid B = Uuid.fromString("BjOacT0OTNqIvUWIlKhahg"); - - static final Uuid C = Uuid.fromString("CVHi_iv2Rvy5_1rtPdasfg"); - - static class BrokerRegistrationTrackerTestContext { - AtomicInteger numCalls = new AtomicInteger(0); - BrokerRegistrationTracker tracker = new BrokerRegistrationTracker(1, - Arrays.asList(B, A), () -> numCalls.incrementAndGet()); - - MetadataImage image = MetadataImage.EMPTY; - - void onMetadataUpdate(MetadataDelta delta) { - MetadataProvenance provenance = new MetadataProvenance(0, 0, 0); - image = delta.apply(provenance); - LogDeltaManifest manifest = new LogDeltaManifest.Builder(). - provenance(provenance). - leaderAndEpoch(LeaderAndEpoch.UNKNOWN). - numBatches(1). - elapsedNs(1). - numBytes(1). - build(); - tracker.onMetadataUpdate(delta, image, manifest); - } - - MetadataDelta newDelta() { - return new MetadataDelta.Builder(). - setImage(image). - build(); - } - } - - @Test - public void testTrackerName() { - BrokerRegistrationTrackerTestContext ctx = new BrokerRegistrationTrackerTestContext(); - assertEquals("BrokerRegistrationTracker(id=1)", ctx.tracker.name()); - } - - @Test - public void testMetadataVersionUpdateWithoutRegistrationDoesNothing() { - BrokerRegistrationTrackerTestContext ctx = new BrokerRegistrationTrackerTestContext(); - MetadataDelta delta = ctx.newDelta(); - delta.replay(new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(MetadataVersion.IBP_3_7_IV2.featureLevel())); - ctx.onMetadataUpdate(delta); - assertEquals(0, ctx.numCalls.get()); - } - - @Test - public void testBrokerUpdateWithoutNewMvDoesNothing() { - BrokerRegistrationTrackerTestContext ctx = new BrokerRegistrationTrackerTestContext(); - MetadataDelta delta = ctx.newDelta(); - delta.replay(new RegisterBrokerRecord(). - setBrokerId(1). - setIncarnationId(INCARNATION_ID). - setLogDirs(Arrays.asList(A, B, C))); - ctx.onMetadataUpdate(delta); - assertEquals(0, ctx.numCalls.get()); - } - - @ParameterizedTest - @ValueSource(booleans = {false, true}) - public void testBrokerUpdateWithNewMv(boolean jbodMv) { - BrokerRegistrationTrackerTestContext ctx = new BrokerRegistrationTrackerTestContext(); - MetadataDelta delta = ctx.newDelta(); - delta.replay(new RegisterBrokerRecord(). - setBrokerId(1). - setIncarnationId(INCARNATION_ID). - setLogDirs(Arrays.asList())); - delta.replay(new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(jbodMv ? MetadataVersion.IBP_3_7_IV2.featureLevel() : - MetadataVersion.IBP_3_7_IV1.featureLevel())); - ctx.onMetadataUpdate(delta); - if (jbodMv) { - assertEquals(1, ctx.numCalls.get()); - } else { - assertEquals(0, ctx.numCalls.get()); - } - } - - @ParameterizedTest - @ValueSource(booleans = {false, true}) - public void testBrokerUpdateWithNewMvWithTwoDeltas(boolean jbodMv) { - BrokerRegistrationTrackerTestContext ctx = new BrokerRegistrationTrackerTestContext(); - MetadataDelta delta = ctx.newDelta(); - delta.replay(new RegisterBrokerRecord(). - setBrokerId(1). - setIncarnationId(INCARNATION_ID). - setLogDirs(Arrays.asList())); - ctx.onMetadataUpdate(delta); - // No calls are made because MetadataVersion is 3.0-IV1 initially - assertEquals(0, ctx.numCalls.get()); - - delta = ctx.newDelta(); - delta.replay(new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(jbodMv ? MetadataVersion.IBP_3_7_IV2.featureLevel() : - MetadataVersion.IBP_3_7_IV1.featureLevel())); - ctx.onMetadataUpdate(delta); - if (jbodMv) { - assertEquals(1, ctx.numCalls.get()); - } else { - assertEquals(0, ctx.numCalls.get()); - } - } -} diff --git a/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java b/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java index d6cf615c781b3..6ea752886a992 100644 --- a/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java +++ b/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java @@ -100,16 +100,6 @@ public final class RemoteLogManagerConfig { "segments, fetch remote log indexes and clean up remote log segments."; public static final int DEFAULT_REMOTE_LOG_MANAGER_THREAD_POOL_SIZE = 10; - public static final String REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP = "remote.log.manager.copier.thread.pool.size"; - public static final String REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_DOC = "Size of the thread pool used in " + - "scheduling tasks to copy segments."; - public static final int DEFAULT_REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE = 10; - - public static final String REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP = "remote.log.manager.expiration.thread.pool.size"; - public static final String REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_DOC = "Size of the thread pool used in" + - " scheduling tasks to clean up remote log segments."; - public static final int DEFAULT_REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE = 10; - public static final String REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP = "remote.log.manager.task.interval.ms"; public static final String REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_DOC = "Interval at which remote log manager runs the scheduled tasks like copy " + "segments, and clean up remote log segments."; @@ -251,18 +241,6 @@ public final class RemoteLogManagerConfig { atLeast(1), MEDIUM, REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_DOC) - .defineInternal(REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP, - INT, - DEFAULT_REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE, - atLeast(1), - MEDIUM, - REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_DOC) - .defineInternal(REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP, - INT, - DEFAULT_REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE, - atLeast(1), - MEDIUM, - REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_DOC) .define(REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP, LONG, DEFAULT_REMOTE_LOG_MANAGER_TASK_INTERVAL_MS, @@ -355,8 +333,6 @@ public final class RemoteLogManagerConfig { private final String remoteLogMetadataManagerClassPath; private final long remoteLogIndexFileCacheTotalSizeBytes; private final int remoteLogManagerThreadPoolSize; - private final int remoteLogManagerCopierThreadPoolSize; - private final int remoteLogManagerExpirationThreadPoolSize; private final long remoteLogManagerTaskIntervalMs; private final long remoteLogManagerTaskRetryBackoffMs; private final long remoteLogManagerTaskRetryBackoffMaxMs; @@ -385,8 +361,6 @@ public RemoteLogManagerConfig(AbstractConfig config) { config.getString(REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_PROP), config.getLong(REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_PROP), config.getInt(REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_PROP), - config.getInt(REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP), - config.getInt(REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP), config.getLong(REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP), config.getLong(REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MS_PROP), config.getLong(REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MAX_MS_PROP), @@ -419,8 +393,6 @@ public RemoteLogManagerConfig(boolean enableRemoteStorageSystem, String remoteLogMetadataManagerListenerName, long remoteLogIndexFileCacheTotalSizeBytes, int remoteLogManagerThreadPoolSize, - int remoteLogManagerCopierThreadPoolSize, - int remoteLogManagerExpirationThreadPoolSize, long remoteLogManagerTaskIntervalMs, long remoteLogManagerTaskRetryBackoffMs, long remoteLogManagerTaskRetryBackoffMaxMs, @@ -446,8 +418,6 @@ public RemoteLogManagerConfig(boolean enableRemoteStorageSystem, this.remoteLogMetadataManagerClassPath = remoteLogMetadataManagerClassPath; this.remoteLogIndexFileCacheTotalSizeBytes = remoteLogIndexFileCacheTotalSizeBytes; this.remoteLogManagerThreadPoolSize = remoteLogManagerThreadPoolSize; - this.remoteLogManagerCopierThreadPoolSize = remoteLogManagerCopierThreadPoolSize; - this.remoteLogManagerExpirationThreadPoolSize = remoteLogManagerExpirationThreadPoolSize; this.remoteLogManagerTaskIntervalMs = remoteLogManagerTaskIntervalMs; this.remoteLogManagerTaskRetryBackoffMs = remoteLogManagerTaskRetryBackoffMs; this.remoteLogManagerTaskRetryBackoffMaxMs = remoteLogManagerTaskRetryBackoffMaxMs; @@ -496,14 +466,6 @@ public int remoteLogManagerThreadPoolSize() { return remoteLogManagerThreadPoolSize; } - public int remoteLogManagerCopierThreadPoolSize() { - return remoteLogManagerCopierThreadPoolSize; - } - - public int remoteLogManagerExpirationThreadPoolSize() { - return remoteLogManagerExpirationThreadPoolSize; - } - public long remoteLogManagerTaskIntervalMs() { return remoteLogManagerTaskIntervalMs; } diff --git a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerHarness.java b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerHarness.java index 7af78e750a84f..a063fa8820a82 100644 --- a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerHarness.java +++ b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerHarness.java @@ -53,6 +53,12 @@ public void initialize(Set topicIdPartitions, initializeRemoteLogMetadataManager(topicIdPartitions, startConsumerThread, RemoteLogMetadataTopicPartitioner::new, remotePartitionMetadataStoreSupplier); } + public void initializeRemoteLogMetadataManager(Set topicIdPartitions, + boolean startConsumerThread, + Function remoteLogMetadataTopicPartitioner) { + initializeRemoteLogMetadataManager(topicIdPartitions, startConsumerThread, remoteLogMetadataTopicPartitioner, RemotePartitionMetadataStore::new); + } + public void initializeRemoteLogMetadataManager(Set topicIdPartitions, boolean startConsumerThread, Function remoteLogMetadataTopicPartitioner, @@ -64,7 +70,6 @@ public void initializeRemoteLogMetadataManager(Set topicIdPart .startConsumerThread(startConsumerThread) .remoteLogMetadataTopicPartitioner(remoteLogMetadataTopicPartitioner) .remotePartitionMetadataStore(remotePartitionMetadataStoreSupplier) - .overrideRemoteLogMetadataManagerProps(overrideRemoteLogMetadataManagerProps()) .build(); } diff --git a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerRestartTest.java b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerRestartTest.java index 84b98dcb5be1d..c599259ed9416 100644 --- a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerRestartTest.java +++ b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerRestartTest.java @@ -16,11 +16,6 @@ */ package org.apache.kafka.server.log.remote.metadata.storage; -import kafka.test.ClusterInstance; -import kafka.test.annotation.ClusterTest; -import kafka.test.junit.ClusterTestExtensions; -import org.apache.kafka.clients.admin.Admin; -import org.apache.kafka.clients.admin.NewTopic; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; @@ -29,99 +24,139 @@ import org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId; import org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata; import org.apache.kafka.test.TestUtils; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import scala.collection.JavaConverters; +import scala.collection.Seq; +import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import static org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManagerConfig.LOG_DIR; -@ExtendWith(value = ClusterTestExtensions.class) -@Tag("integration") +@SuppressWarnings("deprecation") // Added for Scala 2.12 compatibility for usages of JavaConverters public class TopicBasedRemoteLogMetadataManagerRestartTest { private static final int SEG_SIZE = 1024 * 1024; private final Time time = new MockTime(1); private final String logDir = TestUtils.tempDirectory("_rlmm_segs_").getAbsolutePath(); - private final ClusterInstance clusterInstance; - TopicBasedRemoteLogMetadataManagerRestartTest(ClusterInstance clusterInstance) { // Constructor injections - this.clusterInstance = clusterInstance; + private TopicBasedRemoteLogMetadataManagerHarness remoteLogMetadataManagerHarness; + + @BeforeEach + public void setup() { + // Start the cluster and initialize TopicBasedRemoteLogMetadataManager. + remoteLogMetadataManagerHarness = new TopicBasedRemoteLogMetadataManagerHarness() { + protected Map overrideRemoteLogMetadataManagerProps() { + Map props = new HashMap<>(); + props.put(LOG_DIR, logDir); + return props; + } + }; + remoteLogMetadataManagerHarness.initialize(Collections.emptySet(), true); } - private TopicBasedRemoteLogMetadataManager createTopicBasedRemoteLogMetadataManager() { - return RemoteLogMetadataManagerTestUtils.builder() - .topicIdPartitions(Collections.emptySet()) - .bootstrapServers(clusterInstance.bootstrapServers()) - .startConsumerThread(true) - .remoteLogMetadataTopicPartitioner(RemoteLogMetadataTopicPartitioner::new) - .overrideRemoteLogMetadataManagerProps(Collections.singletonMap(LOG_DIR, logDir)) - .build(); + private void startTopicBasedRemoteLogMetadataManagerHarness(boolean startConsumerThread) { + remoteLogMetadataManagerHarness.initializeRemoteLogMetadataManager(Collections.emptySet(), startConsumerThread, RemoteLogMetadataTopicPartitioner::new); } - @ClusterTest(brokers = 3) + @AfterEach + public void teardown() throws IOException { + if (remoteLogMetadataManagerHarness != null) { + remoteLogMetadataManagerHarness.close(); + } + } + + private void stopTopicBasedRemoteLogMetadataManagerHarness() { + remoteLogMetadataManagerHarness.closeRemoteLogMetadataManager(); + } + + private TopicBasedRemoteLogMetadataManager topicBasedRlmm() { + return remoteLogMetadataManagerHarness.remoteLogMetadataManager(); + } + + @Test public void testRLMMAPIsAfterRestart() throws Exception { // Create topics. String leaderTopic = "new-leader"; + HashMap> assignedLeaderTopicReplicas = new HashMap<>(); + List leaderTopicReplicas = new ArrayList<>(); + // Set broker id 0 as the first entry which is taken as the leader. + leaderTopicReplicas.add(0); + leaderTopicReplicas.add(1); + leaderTopicReplicas.add(2); + assignedLeaderTopicReplicas.put(0, JavaConverters.asScalaBuffer(leaderTopicReplicas)); + remoteLogMetadataManagerHarness.createTopicWithAssignment( + leaderTopic, JavaConverters.mapAsScalaMap(assignedLeaderTopicReplicas), + remoteLogMetadataManagerHarness.listenerName()); + String followerTopic = "new-follower"; - try (Admin admin = clusterInstance.createAdminClient()) { - // Set broker id 0 as the first entry which is taken as the leader. - NewTopic newLeaderTopic = new NewTopic(leaderTopic, Collections.singletonMap(0, Arrays.asList(0, 1, 2))); - // Set broker id 1 as the first entry which is taken as the leader. - NewTopic newFollowerTopic = new NewTopic(followerTopic, Collections.singletonMap(0, Arrays.asList(1, 2, 0))); - admin.createTopics(Arrays.asList(newLeaderTopic, newFollowerTopic)).all().get(); - } - clusterInstance.waitForTopic(leaderTopic, 1); - clusterInstance.waitForTopic(followerTopic, 1); + HashMap> assignedFollowerTopicReplicas = new HashMap<>(); + List followerTopicReplicas = new ArrayList<>(); + // Set broker id 1 as the first entry which is taken as the leader. + followerTopicReplicas.add(1); + followerTopicReplicas.add(2); + followerTopicReplicas.add(0); + assignedFollowerTopicReplicas.put(0, JavaConverters.asScalaBuffer(followerTopicReplicas)); + remoteLogMetadataManagerHarness.createTopicWithAssignment(followerTopic, + JavaConverters.mapAsScalaMap(assignedFollowerTopicReplicas), + remoteLogMetadataManagerHarness.listenerName()); final TopicIdPartition leaderTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(leaderTopic, 0)); final TopicIdPartition followerTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(followerTopic, 0)); + + // Register these partitions to RLMM. + topicBasedRlmm().onPartitionLeadershipChanges(Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition)); + + // Add segments for these partitions, but they are not available as they have not yet been subscribed. RemoteLogSegmentMetadata leaderSegmentMetadata = new RemoteLogSegmentMetadata( new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L)); + topicBasedRlmm().addRemoteLogSegmentMetadata(leaderSegmentMetadata).get(); + RemoteLogSegmentMetadata followerSegmentMetadata = new RemoteLogSegmentMetadata( new RemoteLogSegmentId(followerTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L)); + topicBasedRlmm().addRemoteLogSegmentMetadata(followerSegmentMetadata).get(); - try (TopicBasedRemoteLogMetadataManager topicBasedRemoteLogMetadataManager = createTopicBasedRemoteLogMetadataManager()) { - // Register these partitions to RemoteLogMetadataManager. - topicBasedRemoteLogMetadataManager.onPartitionLeadershipChanges( - Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition)); + // Stop TopicBasedRemoteLogMetadataManager only. + stopTopicBasedRemoteLogMetadataManagerHarness(); - // Add segments for these partitions, but they are not available as they have not yet been subscribed. - topicBasedRemoteLogMetadataManager.addRemoteLogSegmentMetadata(leaderSegmentMetadata).get(); - topicBasedRemoteLogMetadataManager.addRemoteLogSegmentMetadata(followerSegmentMetadata).get(); - } + // Start TopicBasedRemoteLogMetadataManager + startTopicBasedRemoteLogMetadataManagerHarness(true); - try (TopicBasedRemoteLogMetadataManager topicBasedRemoteLogMetadataManager = createTopicBasedRemoteLogMetadataManager()) { - // Register these partitions to RemoteLogMetadataManager, which loads the respective metadata snapshots. - topicBasedRemoteLogMetadataManager.onPartitionLeadershipChanges( - Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition)); - - // Check for the stored entries from the earlier run. - TestUtils.waitForCondition(() -> - TestUtils.sameElementsWithoutOrder(Collections.singleton(leaderSegmentMetadata).iterator(), - topicBasedRemoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition)), - "Remote log segment metadata not available"); - TestUtils.waitForCondition(() -> - TestUtils.sameElementsWithoutOrder(Collections.singleton(followerSegmentMetadata).iterator(), - topicBasedRemoteLogMetadataManager.listRemoteLogSegments(followerTopicIdPartition)), - "Remote log segment metadata not available"); - // Add one more segment - RemoteLogSegmentMetadata leaderSegmentMetadata2 = new RemoteLogSegmentMetadata( - new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), - 101, 200, -1L, 0, - time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 101L)); - topicBasedRemoteLogMetadataManager.addRemoteLogSegmentMetadata(leaderSegmentMetadata2).get(); - - // Check that both the stored segment and recently added segment are available. - Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Arrays.asList(leaderSegmentMetadata, leaderSegmentMetadata2).iterator(), - topicBasedRemoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition))); - } + // Register these partitions to RLMM, which loads the respective metadata snapshots. + topicBasedRlmm().onPartitionLeadershipChanges( + Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition)); + + // Check for the stored entries from the earlier run. + TestUtils.waitForCondition(() -> + TestUtils.sameElementsWithoutOrder(Collections.singleton(leaderSegmentMetadata).iterator(), + topicBasedRlmm().listRemoteLogSegments(leaderTopicIdPartition)), + "Remote log segment metadata not available"); + TestUtils.waitForCondition(() -> + TestUtils.sameElementsWithoutOrder(Collections.singleton(followerSegmentMetadata).iterator(), + topicBasedRlmm().listRemoteLogSegments(followerTopicIdPartition)), + "Remote log segment metadata not available"); + // Add one more segment + RemoteLogSegmentMetadata leaderSegmentMetadata2 = new RemoteLogSegmentMetadata( + new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), + 101, 200, -1L, 0, + time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 101L)); + topicBasedRlmm().addRemoteLogSegmentMetadata(leaderSegmentMetadata2).get(); + + // Check that both the stored segment and recently added segment are available. + Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Arrays.asList(leaderSegmentMetadata, leaderSegmentMetadata2).iterator(), + topicBasedRlmm().listRemoteLogSegments(leaderTopicIdPartition))); } -} +} \ No newline at end of file diff --git a/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfigTest.java b/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfigTest.java index 4e3c2fc26cb66..45fd6669e7d4f 100644 --- a/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfigTest.java +++ b/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfigTest.java @@ -47,7 +47,7 @@ public void testValidConfigs(boolean useDefaultRemoteLogMetadataManagerClass) { RemoteLogManagerConfig expectedRemoteLogManagerConfig = new RemoteLogManagerConfig(true, "dummy.remote.storage.class", "dummy.remote.storage.class.path", remoteLogMetadataManagerClass, "dummy.remote.log.metadata.class.path", - "listener.name", 1024 * 1024L, 1, 1, 1, 60000L, 100L, 60000L, 0.3, 10, 100, 100, + "listener.name", 1024 * 1024L, 1, 60000L, 100L, 60000L, 0.3, 10, 100, 100, rsmPrefix, rsmProps, rlmmPrefix, rlmmProps, Long.MAX_VALUE, 11, 1, Long.MAX_VALUE, 11, 1); @@ -81,10 +81,6 @@ private Map extractProps(RemoteLogManagerConfig remoteLogManager remoteLogManagerConfig.remoteLogIndexFileCacheTotalSizeBytes()); props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_PROP, remoteLogManagerConfig.remoteLogManagerThreadPoolSize()); - props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP, - remoteLogManagerConfig.remoteLogManagerCopierThreadPoolSize()); - props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP, - remoteLogManagerConfig.remoteLogManagerExpirationThreadPoolSize()); props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP, remoteLogManagerConfig.remoteLogManagerTaskIntervalMs()); props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MS_PROP, diff --git a/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommandTestUtils.java b/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommandTestUtils.java index e5177ddaead2c..5ea53a1c38269 100644 --- a/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommandTestUtils.java +++ b/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommandTestUtils.java @@ -43,37 +43,11 @@ import static kafka.test.annotation.Type.CO_KRAFT; import static kafka.test.annotation.Type.KRAFT; -import static kafka.test.annotation.Type.ZK; import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG; import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG; import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG; import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG; -/** - * The old test framework {@link kafka.api.BaseConsumerTest#getTestQuorumAndGroupProtocolParametersAll} test for the following cases: - *
    - *
  • (ZK / KRAFT servers) with (group.coordinator.new.enable=false) with (classic group protocol) = 2 cases
  • - *
  • (KRAFT server) with (group.coordinator.new.enable=true) with (classic group protocol) = 1 case
  • - *
  • (KRAFT server) with (group.coordinator.new.enable=true) with (consumer group protocol) = 1 case
  • - *
- *

- * The new test framework run seven cases for the following cases: - *

    - *
  • (ZK / KRAFT / CO_KRAFT servers) with (group.coordinator.new.enable=false) with (classic group protocol) = 3 cases
  • - *
  • (KRAFT / CO_KRAFT servers) with (group.coordinator.new.enable=true) with (classic group protocol) = 2 cases
  • - *
  • (KRAFT / CO_KRAFT servers) with (group.coordinator.new.enable=true) with (consumer group protocol) = 2 cases
  • - *
- *

- * We can reduce the number of cases as same as the old test framework by using the following methods: - *

    - *
  • {@link #forConsumerGroupCoordinator} for the case of (consumer group protocol)
  • - *
  • (CO_KRAFT servers) with (group.coordinator.new.enable=true) with (classic / consumer group protocols) = 2 cases
  • - *
- *
    - *
  • {@link #forClassicGroupCoordinator} for the case of (classic group protocol)
  • - *
  • (ZK / KRAFT servers) with (group.coordinator.new.enable=false) with (classic group protocol) = 2 cases
  • - *
- */ class ConsumerGroupCommandTestUtils { private ConsumerGroupCommandTestUtils() { @@ -92,8 +66,8 @@ static List forConsumerGroupCoordinator() { serverProperties.put(GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, "classic,consumer"); return Collections.singletonList(ClusterConfig.defaultBuilder() + .setTypes(Stream.of(KRAFT, CO_KRAFT).collect(Collectors.toSet())) .setFeatures(Collections.singletonMap(Features.GROUP_VERSION, GroupVersion.GV_1.featureLevel())) - .setTypes(Collections.singleton(CO_KRAFT)) .setServerProperties(serverProperties) .setTags(Collections.singletonList("consumerGroupCoordinator")) .build()); @@ -106,7 +80,6 @@ static List forClassicGroupCoordinator() { serverProperties.put(NEW_GROUP_COORDINATOR_ENABLE_CONFIG, "false"); return Collections.singletonList(ClusterConfig.defaultBuilder() - .setTypes(Stream.of(ZK, KRAFT).collect(Collectors.toSet())) .setServerProperties(serverProperties) .setTags(Collections.singletonList("classicGroupCoordinator")) .build()); From 97c29c64d0e956381c7f71412c0b079ccf1b8303 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Mon, 10 Jun 2024 12:52:18 -0500 Subject: [PATCH 20/22] Updated testOffsetFetchRequestStateToStringBase() Updated testOffsetFetchRequestStateToStringBase() --- .../clients/consumer/internals/CommitRequestManagerTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index 1daab57807e70..c893ed11c54fe 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -159,7 +159,7 @@ public void testOffsetFetchRequestStateToStringBase() { String target = requestState.toStringBase() + ", memberInfo=" + memberInfo + - ", expirationTimeMs=" + (offsetFetchRequestState.expirationTimeMs().isPresent() ? offsetFetchRequestState.expirationTimeMs() : "undefined") + + ", expirationTimeMs=" + Optional.of(1000) + ", isExpired=" + offsetFetchRequestState.isExpired + ", requestedPartitions=" + offsetFetchRequestState.requestedPartitions + ", future=" + offsetFetchRequestState.future(); From df3e2aec6f29f25cdbe8e344cccd323268706d60 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Mon, 10 Jun 2024 12:59:41 -0500 Subject: [PATCH 21/22] Revert "Updated testOffsetFetchRequestStateToStringBase()" This reverts commit 97c29c64d0e956381c7f71412c0b079ccf1b8303. --- .../clients/consumer/internals/CommitRequestManagerTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index c893ed11c54fe..1daab57807e70 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -159,7 +159,7 @@ public void testOffsetFetchRequestStateToStringBase() { String target = requestState.toStringBase() + ", memberInfo=" + memberInfo + - ", expirationTimeMs=" + Optional.of(1000) + + ", expirationTimeMs=" + (offsetFetchRequestState.expirationTimeMs().isPresent() ? offsetFetchRequestState.expirationTimeMs() : "undefined") + ", isExpired=" + offsetFetchRequestState.isExpired + ", requestedPartitions=" + offsetFetchRequestState.requestedPartitions + ", future=" + offsetFetchRequestState.future(); From 74034dc9ba784c66e6c27dc2cf40b4e1eace63fe Mon Sep 17 00:00:00 2001 From: brenden20 Date: Mon, 10 Jun 2024 15:24:07 -0500 Subject: [PATCH 22/22] Revert "Merge branch 'trunk' into 16557" This reverts commit c34f473eb850f4c2f9cb86839ccadf7471e0fcf0, reversing changes made to df3e2aec6f29f25cdbe8e344cccd323268706d60. --- README.md | 7 +- checkstyle/import-control.xml | 6 +- checkstyle/suppressions.xml | 4 +- .../org/apache/kafka/clients/ClientUtils.java | 2 +- .../kafka/clients/InFlightRequests.java | 2 +- .../org/apache/kafka/clients/Metadata.java | 4 +- .../apache/kafka/clients/NetworkClient.java | 5 +- .../apache/kafka/clients/NodeApiVersions.java | 7 +- .../clients/admin/AddRaftVoterResult.java | 42 - .../org/apache/kafka/clients/admin/Admin.java | 64 +- .../admin/DescribeConsumerGroupsResult.java | 4 +- .../admin/DescribeDelegationTokenOptions.java | 4 +- .../clients/admin/DescribeLogDirsResult.java | 4 +- .../admin/DescribeReplicaLogDirsResult.java | 23 +- .../kafka/clients/admin/ForwardingAdmin.java | 10 - .../kafka/clients/admin/KafkaAdminClient.java | 155 +- .../kafka/clients/admin/ListTopicsResult.java | 4 +- .../admin/ListTransactionsOptions.java | 2 +- .../clients/admin/ListTransactionsResult.java | 6 +- .../apache/kafka/clients/admin/NewTopic.java | 14 +- .../clients/admin/RaftVoterEndpoint.java | 100 - .../clients/admin/RemoveRaftVoterOptions.java | 26 - .../clients/admin/RemoveRaftVoterResult.java | 42 - .../DescribeConsumerGroupsHandler.java | 6 +- .../internals/FenceProducersHandler.java | 12 +- .../internals/PartitionLeaderStrategy.java | 6 +- .../clients/consumer/AcknowledgeType.java | 58 - .../AcknowledgementCommitCallback.java | 53 - .../clients/consumer/ConsumerConfig.java | 5 +- .../consumer/ConsumerRebalanceListener.java | 2 +- .../clients/consumer/KafkaShareConsumer.java | 700 ------- .../kafka/clients/consumer/MockConsumer.java | 7 +- .../clients/consumer/MockShareConsumer.java | 170 -- .../kafka/clients/consumer/RangeAssignor.java | 6 +- .../kafka/clients/consumer/ShareConsumer.java | 115 -- .../clients/consumer/StickyAssignor.java | 3 +- .../internals/AsyncKafkaConsumer.java | 54 +- .../internals/CommitRequestManager.java | 133 +- .../internals/ConsumerDelegateCreator.java | 3 +- .../internals/ConsumerNetworkThread.java | 5 +- .../internals/CoordinatorRequestManager.java | 4 + .../internals/HeartbeatRequestManager.java | 2 +- .../consumer/internals/MemberState.java | 7 +- .../internals/MembershipManagerImpl.java | 4 +- .../internals/NetworkClientDelegate.java | 43 +- .../consumer/internals/RequestManagers.java | 3 +- .../internals/ShareConsumerDelegate.java | 37 - .../ShareConsumerDelegateCreator.java | 57 - .../consumer/internals/TimedRequestState.java | 71 - .../TopicMetadataRequestManager.java | 53 +- .../events/CompletableApplicationEvent.java | 1 - .../producer/internals/RecordAccumulator.java | 13 +- .../producer/internals/TxnPartitionMap.java | 2 +- .../java/org/apache/kafka/common/Cluster.java | 4 +- .../apache/kafka/common/ClusterResource.java | 2 +- .../common/compress/Lz4BlockInputStream.java | 5 + .../common/compress/Lz4BlockOutputStream.java | 4 + .../kafka/common/config/AbstractConfig.java | 4 +- .../common/config/internals/AllowedPaths.java | 2 +- .../common/metrics/KafkaMetricsContext.java | 2 +- .../apache/kafka/common/metrics/Metrics.java | 4 +- .../kafka/common/metrics/MetricsReporter.java | 4 +- .../kafka/common/metrics/stats/Histogram.java | 2 + .../network/PlaintextChannelBuilder.java | 3 +- .../apache/kafka/common/protocol/ApiKeys.java | 9 +- .../protocol/MessageSizeAccumulator.java | 11 + .../record/AbstractLegacyRecordBatch.java | 2 +- .../record/CompressionRatioEstimator.java | 7 + .../kafka/common/record/LegacyRecord.java | 8 + .../common/record/MemoryRecordsBuilder.java | 10 +- .../common/requests/AbstractRequest.java | 14 - .../common/requests/AbstractResponse.java | 14 - .../common/requests/AddRaftVoterRequest.java | 75 - .../common/requests/AddRaftVoterResponse.java | 65 - .../requests/AlterPartitionRequest.java | 12 +- .../requests/AlterPartitionResponse.java | 6 +- .../requests/ControlledShutdownResponse.java | 6 +- .../common/requests/DeleteAclsResponse.java | 4 +- .../common/requests/DeleteTopicsRequest.java | 4 +- .../requests/DescribeConfigsRequest.java | 8 +- .../requests/DescribeLogDirsResponse.java | 34 +- .../kafka/common/requests/FetchRequest.java | 26 +- .../requests/FetchSnapshotResponse.java | 2 +- .../common/requests/JoinGroupRequest.java | 2 +- .../common/requests/LeaderAndIsrRequest.java | 18 +- .../common/requests/LeaveGroupResponse.java | 6 +- .../common/requests/ListOffsetsRequest.java | 9 + .../common/requests/OffsetCommitRequest.java | 6 +- .../common/requests/OffsetCommitResponse.java | 8 +- .../common/requests/OffsetDeleteResponse.java | 14 +- .../common/requests/OffsetFetchRequest.java | 6 +- .../kafka/common/requests/ProduceRequest.java | 14 +- .../requests/RemoveRaftVoterRequest.java | 75 - .../requests/RemoveRaftVoterResponse.java | 65 - .../common/requests/StopReplicaRequest.java | 16 +- .../common/requests/SyncGroupRequest.java | 2 +- .../requests/TxnOffsetCommitRequest.java | 6 +- .../requests/TxnOffsetCommitResponse.java | 6 +- .../requests/UpdateMetadataRequest.java | 20 +- .../requests/UpdateRaftVoterRequest.java | 73 - .../requests/UpdateRaftVoterResponse.java | 65 - .../security/authenticator/LoginManager.java | 2 +- .../SaslClientAuthenticator.java | 3 +- .../secured/LoginAccessTokenValidator.java | 4 +- ...shingHttpsJwksVerificationKeyResolver.java | 18 +- .../ValidatorAccessTokenValidator.java | 4 +- ...thBearerUnsecuredLoginCallbackHandler.java | 2 +- ...arerUnsecuredValidatorCallbackHandler.java | 3 +- .../unsecured/OAuthBearerValidationUtils.java | 2 +- .../scram/internals/ScramSaslServer.java | 3 +- .../security/ssl/DefaultSslEngineFactory.java | 5 +- .../security/ssl/SslPrincipalMapper.java | 2 +- .../common/utils/ByteBufferUnmapper.java | 3 +- .../org/apache/kafka/common/utils/Java.java | 2 +- .../common/utils/LoggingSignalHandler.java | 3 +- .../common/message/AddRaftVoterRequest.json | 40 - .../common/message/AddRaftVoterResponse.json | 30 - .../message/RemoveRaftVoterRequest.json | 30 - .../message/RemoveRaftVoterResponse.json | 30 - .../message/UpdateRaftVoterRequest.json | 46 - .../message/UpdateRaftVoterResponse.json | 28 - .../apache/kafka/clients/ClientUtilsTest.java | 2 +- .../apache/kafka/clients/MetadataTest.java | 16 +- .../kafka/clients/NetworkClientTest.java | 12 +- .../clients/admin/AdminClientTestUtils.java | 2 +- .../kafka/clients/admin/ConfigTest.java | 5 +- ...escribeUserScramCredentialsResultTest.java | 13 +- .../clients/admin/KafkaAdminClientTest.java | 153 +- .../kafka/clients/admin/MockAdminClient.java | 44 +- .../admin/internals/AdminApiDriverTest.java | 56 +- .../internals/AllBrokersStrategyTest.java | 6 +- .../internals/CoordinatorStrategyTest.java | 2 +- .../internals/FenceProducersHandlerTest.java | 23 +- .../clients/consumer/KafkaConsumerTest.java | 4 +- .../consumer/MockShareConsumerTest.java | 50 - .../internals/AbstractCoordinatorTest.java | 6 +- .../internals/AsyncKafkaConsumerTest.java | 73 +- .../internals/CommitRequestManagerTest.java | 158 +- .../internals/ConsumerCoordinatorTest.java | 27 +- .../internals/ConsumerInterceptorsTest.java | 2 +- .../internals/ConsumerNetworkThreadTest.java | 45 +- .../internals/ConsumerTestBuilder.java | 7 +- .../CoordinatorRequestManagerTest.java | 1 + .../internals/FetchRequestManagerTest.java | 12 +- .../internals/NetworkClientDelegateTest.java | 94 +- .../internals/TimedRequestStateTest.java | 96 - .../TopicMetadataRequestManagerTest.java | 1 - .../events/ApplicationEventProcessorTest.java | 3 + .../clients/producer/KafkaProducerTest.java | 16 +- .../producer/internals/BufferPoolTest.java | 2 +- .../internals/RecordAccumulatorTest.java | 14 +- .../internals/TransactionManagerTest.java | 4 +- .../apache/kafka/common/KafkaFutureTest.java | 6 +- .../kafka/common/config/ConfigDefTest.java | 24 +- .../provider/DirectoryConfigProviderTest.java | 4 +- .../kafka/common/metrics/JmxReporterTest.java | 6 +- .../kafka/common/metrics/MetricsTest.java | 10 +- .../kafka/common/metrics/SensorTest.java | 3 +- .../common/metrics/stats/FrequenciesTest.java | 3 +- .../common/network/KafkaChannelTest.java | 6 +- .../kafka/common/network/SelectorTest.java | 4 +- .../common/network/SslTransportLayerTest.java | 9 +- .../network/SslTransportTls12Tls13Test.java | 2 +- .../common/network/Tls12SelectorTest.java | 4 +- .../common/network/Tls13SelectorTest.java | 3 +- .../common/protocol/MessageUtilTest.java | 2 +- .../record/MemoryRecordsBuilderTest.java | 2 +- .../common/replica/ReplicaSelectorTest.java | 6 +- .../AlterReplicaLogDirsRequestTest.java | 6 +- .../AlterReplicaLogDirsResponseTest.java | 3 +- .../requests/DeleteAclsResponseTest.java | 16 +- .../requests/DeleteTopicsRequestTest.java | 4 +- .../common/requests/FetchRequestTest.java | 10 +- .../requests/LeaderAndIsrRequestTest.java | 6 +- .../requests/ListOffsetsRequestTest.java | 4 +- .../common/requests/RequestResponseTest.java | 225 +-- .../requests/UpdateMetadataRequestTest.java | 6 +- .../ClientAuthenticationFailureTest.java | 5 +- .../SaslAuthenticatorFailureDelayTest.java | 5 +- .../authenticator/SaslAuthenticatorTest.java | 149 +- .../security/kerberos/KerberosNameTest.java | 25 +- ...arerUnsecuredLoginCallbackHandlerTest.java | 2 +- ...UnsecuredValidatorCallbackHandlerTest.java | 3 +- .../OAuthBearerValidationUtilsTest.java | 6 +- .../scram/internals/ScramFormatterTest.java | 3 +- ...LoggingTrustManagerFactoryWrapperTest.java | 2 +- .../ssl/mock/TestTrustManagerFactory.java | 2 + .../common/utils/FlattenedIteratorTest.java | 30 +- .../ImplicitLinkedHashCollectionTest.java | 8 +- .../common/utils/MappedIteratorTest.java | 4 +- .../apache/kafka/common/utils/UtilsTest.java | 36 +- .../kafka/connect/data/ConnectSchema.java | 36 +- .../kafka/connect/data/ConnectSchemaTest.java | 140 -- .../apache/kafka/connect/data/StructTest.java | 33 + .../kafka/connect/runtime/AbstractHerder.java | 10 +- .../distributed/DistributedHerder.java | 4 +- .../runtime/standalone/StandaloneHerder.java | 4 +- .../KafkaConfigBackingStoreMockitoTest.java | 1318 ++++++++++++ .../storage/KafkaConfigBackingStoreTest.java | 1779 ++++------------- .../util/clusters/EmbeddedKafkaCluster.java | 16 +- .../MetadataVersionConfigValidator.java | 71 - .../java/kafka/server/SharePartition.java | 436 ---- .../kafka/server/SharePartitionManager.java | 228 --- .../server/builders/KafkaApisBuilder.java | 2 +- .../builders/ReplicaManagerBuilder.java | 2 +- .../group/GroupMetadataManager.scala | 19 +- core/src/main/scala/kafka/log/LogLoader.scala | 4 +- .../src/main/scala/kafka/log/LogManager.scala | 15 +- .../src/main/scala/kafka/log/UnifiedLog.scala | 43 +- .../metrics/LinuxIoMetricsCollector.scala | 101 + .../kafka/network/RequestConvertToJson.scala | 6 - .../main/scala/kafka/raft/RaftManager.scala | 23 +- .../scala/kafka/server/BrokerServer.scala | 16 +- .../scala/kafka/server/ConfigHandler.scala | 2 +- .../scala/kafka/server/ControllerApis.scala | 18 - .../ControllerConfigurationValidator.scala | 3 +- .../scala/kafka/server/ControllerServer.scala | 5 +- .../kafka/server/DelayedRemoteFetch.scala | 3 +- .../kafka/server/DynamicBrokerConfig.scala | 3 +- .../main/scala/kafka/server/KafkaApis.scala | 30 - .../main/scala/kafka/server/KafkaBroker.scala | 5 +- .../main/scala/kafka/server/KafkaConfig.scala | 22 +- .../scala/kafka/server/KafkaRaftServer.scala | 1 - .../main/scala/kafka/server/KafkaServer.scala | 7 +- .../NodeToControllerChannelManager.scala | 2 +- .../scala/kafka/server/ReplicaManager.scala | 29 +- .../scala/kafka/server/SharedServer.scala | 3 - .../checkpoints/OffsetCheckpointFile.scala | 2 +- .../metadata/BrokerMetadataPublisher.scala | 3 +- .../main/scala/kafka/tools/StorageTool.scala | 162 +- .../scala/kafka/tools/TestRaftServer.scala | 1 - .../main/scala/kafka/zk/AdminZkClient.scala | 4 +- .../log/remote/RemoteLogManagerTest.java | 126 +- .../server/LogManagerIntegrationTest.java | 137 -- .../MetadataVersionConfigValidatorTest.java | 100 - .../java/kafka/server/SharePartitionTest.java | 66 - ...ribeTopicPartitionsRequestHandlerTest.java | 2 + .../java/kafka/test/ClusterConfigTest.java | 16 - .../kafka/test/ClusterTestExtensionsTest.java | 2 +- .../kafka/test/annotation/ClusterTest.java | 2 +- .../kafka/testkit/KafkaClusterTestKit.java | 26 +- core/src/test/resources/log4j.properties | 1 + .../kafka/api/AbstractConsumerTest.scala | 7 +- .../api/PlaintextAdminIntegrationTest.scala | 15 - .../kafka/api/PlaintextConsumerPollTest.scala | 13 - .../PlaintextConsumerSubscriptionTest.scala | 24 +- .../kafka/api/PlaintextConsumerTest.scala | 2 +- .../kafka/server/DelayedRemoteFetchTest.scala | 21 +- .../kafka/server/QuorumTestHarness.scala | 14 +- .../kafka/zk/ZkMigrationIntegrationTest.scala | 2 +- .../metrics/LinuxIoMetricsCollectorTest.scala | 82 + .../scala/unit/kafka/KafkaConfigTest.scala | 4 +- .../kafka/cluster/PartitionLockTest.scala | 3 +- .../unit/kafka/cluster/PartitionTest.scala | 3 +- .../UncleanLeaderElectionTest.scala | 224 +-- .../kafka/log/LogCleanerManagerTest.scala | 3 +- .../scala/unit/kafka/log/LogCleanerTest.scala | 5 +- .../scala/unit/kafka/log/LogConfigTest.scala | 44 +- .../scala/unit/kafka/log/LogLoaderTest.scala | 25 +- .../scala/unit/kafka/log/LogManagerTest.scala | 7 - .../scala/unit/kafka/log/LogSegmentTest.scala | 16 +- .../unit/kafka/metrics/MetricsTest.scala | 5 +- .../unit/kafka/raft/RaftManagerTest.scala | 1 - .../kafka/server/ApiVersionsRequestTest.scala | 4 +- .../server/BrokerLifecycleManagerTest.scala | 36 +- .../ConsumerGroupDescribeRequestsTest.scala | 15 +- .../server/DynamicBrokerConfigTest.scala | 33 - .../unit/kafka/server/FetchRequestTest.scala | 6 +- .../GroupCoordinatorBaseRequestTest.scala | 5 +- .../unit/kafka/server/KafkaApisTest.scala | 30 +- .../unit/kafka/server/KafkaConfigTest.scala | 20 +- .../kafka/server/ListOffsetsRequestTest.scala | 4 +- .../unit/kafka/server/LogRecoveryTest.scala | 6 +- .../server/OffsetCommitRequestTest.scala | 2 +- .../OffsetsForLeaderEpochRequestTest.scala | 2 +- .../kafka/server/ReplicaManagerTest.scala | 126 +- .../kafka/server/ReplicationQuotasTest.scala | 2 +- .../unit/kafka/server/RequestQuotaTest.scala | 25 +- .../InMemoryLeaderEpochCheckpointTest.scala | 58 + ...CheckpointFileWithFailureHandlerTest.scala | 18 +- .../epoch/LeaderEpochFileCacheTest.scala | 64 +- .../BrokerMetadataPublisherTest.scala | 5 +- .../kafka/tools/DumpLogSegmentsTest.scala | 4 +- .../unit/kafka/tools/StorageToolTest.scala | 46 +- .../unit/kafka/utils/SchedulerTest.scala | 3 +- .../scala/unit/kafka/utils/TestUtils.scala | 33 +- docker/README.md | 41 +- docs/ops.html | 24 +- .../developer-guide/config-streams.html | 5 +- docs/streams/developer-guide/dsl-api.html | 8 +- .../developer-guide/write-streams.html | 14 +- gradle/dependencies.gradle | 14 +- gradle/wrapper/gradle-wrapper.properties | 4 +- gradlew | 2 +- .../group/api/assignor/GroupSpec.java | 62 - .../api/assignor/MemberSubscription.java | 43 - .../apache/kafka/coordinator/group/Group.java | 5 +- .../group/GroupCoordinatorConfig.java | 2 +- .../group/GroupMetadataManager.java | 39 +- .../group/OffsetMetadataManager.java | 32 +- .../AbstractUniformAssignmentBuilder.java | 68 + .../group/assignor/AssignmentMemberSpec.java | 122 ++ .../ConsumerGroupPartitionAssignor.java | 6 +- ...a => GeneralUniformAssignmentBuilder.java} | 101 +- .../group}/assignor/GroupAssignment.java | 8 +- .../group/assignor/GroupSpec.java} | 24 +- .../{consumer => assignor}/GroupSpecImpl.java | 65 +- .../MemberAssignment.java} | 29 +- ...=> OptimizedUniformAssignmentBuilder.java} | 32 +- .../group}/assignor/PartitionAssignor.java | 6 +- .../assignor/PartitionAssignorException.java | 2 +- .../group/assignor/RangeAssignor.java | 39 +- .../assignor/SubscribedTopicDescriber.java | 2 +- .../group}/assignor/SubscriptionType.java | 5 +- .../group/assignor/UniformAssignor.java | 27 +- .../group/classic/ClassicGroup.java | 4 +- .../group/consumer/Assignment.java | 4 +- .../group/consumer/ConsumerGroup.java | 61 +- .../MemberSubscriptionAndAssignmentImpl.java | 93 - ...Impl.java => SubscribedTopicMetadata.java} | 10 +- .../consumer/TargetAssignmentBuilder.java | 42 +- .../coordinator/group/consumer/TopicIds.java | 7 - .../group/runtime/EventAccumulator.java | 35 +- .../runtime/MultiThreadedEventProcessor.java | 8 +- .../coordinator/group/AssignmentTestUtil.java | 19 +- .../group/GroupCoordinatorConfigTest.java | 2 +- .../group/GroupMetadataManagerTest.java | 101 +- .../GroupMetadataManagerTestContext.java | 2 +- .../group/MockPartitionAssignor.java | 12 +- .../group/NoOpPartitionAssignor.java | 18 +- .../group/OffsetMetadataManagerTest.java | 75 +- ... GeneralUniformAssignmentBuilderTest.java} | 244 ++- .../group/assignor/GroupSpecImplTest.java | 49 +- ...OptimizedUniformAssignmentBuilderTest.java | 190 +- .../group/assignor/RangeAssignorTest.java | 393 ++-- .../group/classic/ClassicGroupTest.java | 28 +- .../group/consumer/ConsumerGroupTest.java | 73 +- .../consumer/SubscribedTopicMetadataTest.java | 10 +- .../consumer/TargetAssignmentBuilderTest.java | 101 +- .../group/runtime/EventAccumulatorTest.java | 18 +- .../MultiThreadedEventProcessorTest.java | 4 +- .../jmh/assignor/AssignorBenchmarkUtils.java | 6 +- .../assignor/ClientSideAssignorBenchmark.java | 8 +- .../assignor/ServerSideAssignorBenchmark.java | 73 +- .../TargetAssignmentBuilderBenchmark.java | 31 +- .../controller/ClusterControlManager.java | 2 +- .../controller/PartitionChangeBuilder.java | 5 +- .../kafka/controller/QuorumController.java | 3 +- .../controller/ReplicationControlManager.java | 2 +- .../errors/ControllerExceptions.java | 6 +- .../errors/EventHandlerExceptionInfo.java | 3 +- .../kafka/image/LocalReplicaChanges.java | 24 +- .../publisher/BrokerRegistrationTracker.java | 131 -- .../kafka/metadata/BrokerRegistration.java | 38 +- .../metadata/ControllerRegistration.java | 28 +- .../metadata/FinalizedControllerFeatures.java | 10 +- .../kafka/metadata/PartitionRegistration.java | 55 +- .../controller/AclControlManagerTest.java | 6 +- .../ClientQuotaControlManagerTest.java | 22 +- .../controller/ClusterControlManagerTest.java | 6 +- .../ConfigurationControlManagerTest.java | 13 +- .../controller/FeatureControlManagerTest.java | 12 +- .../controller/OffsetControlManagerTest.java | 16 +- .../PartitionChangeBuilderTest.java | 40 +- .../PartitionReassignmentReplicasTest.java | 2 +- .../QuorumControllerIntegrationTestUtils.java | 3 +- .../controller/QuorumControllerTest.java | 42 +- .../controller/QuorumControllerTestEnv.java | 1 + .../ReplicationControlManagerTest.java | 226 +-- .../kafka/image/ClientQuotasImageTest.java | 4 +- .../apache/kafka/image/ClusterImageTest.java | 19 +- .../kafka/image/ImageDowngradeTest.java | 91 +- .../apache/kafka/image/TopicsImageTest.java | 8 +- .../image/loader/MetadataLoaderTest.java | 68 +- .../node/ClusterImageBrokersNodeTest.java | 2 +- .../node/ClusterImageControllersNodeTest.java | 3 +- .../BrokerRegistrationTrackerTest.java | 150 -- .../publisher/SnapshotGeneratorTest.java | 5 +- .../metadata/BrokerRegistrationTest.java | 14 +- .../metadata/DelegationTokenDataTest.java | 3 +- .../kafka/metadata/ListenerInfoTest.java | 25 +- .../metadata/PartitionRegistrationTest.java | 10 +- .../kafka/metadata/RecordTestUtils.java | 2 +- .../apache/kafka/metadata/ReplicasTest.java | 8 +- .../StandardAclRecordIteratorTest.java | 3 +- .../bootstrap/BootstrapMetadataTest.java | 2 +- .../migration/KRaftMigrationDriverTest.java | 6 +- .../placement/StripedReplicaPlacerTest.java | 6 +- .../placement/TopicAssignmentTest.java | 7 +- .../MetaPropertiesEnsembleTest.java | 4 +- .../apache/kafka/metalog/LocalLogManager.java | 3 +- .../org/apache/kafka/raft/ElectionState.java | 18 +- .../kafka/raft/FileQuorumStateStore.java | 2 +- .../org/apache/kafka/raft/FollowerState.java | 36 +- .../kafka/raft/KafkaNetworkChannel.java | 39 +- .../apache/kafka/raft/KafkaRaftClient.java | 235 +-- .../org/apache/kafka/raft/LeaderState.java | 45 +- .../org/apache/kafka/raft/NetworkChannel.java | 9 +- .../org/apache/kafka/raft/QuorumConfig.java | 68 +- .../org/apache/kafka/raft/QuorumState.java | 62 +- .../org/apache/kafka/raft/RaftRequest.java | 45 +- .../org/apache/kafka/raft/RaftResponse.java | 39 +- .../java/org/apache/kafka/raft/RaftUtil.java | 3 - .../org/apache/kafka/raft/RequestManager.java | 329 +-- .../kafka/raft/internals/LogHistory.java | 6 +- .../kafka/raft/internals/ReplicaKey.java | 4 +- .../apache/kafka/raft/internals/VoterSet.java | 81 +- .../apache/kafka/raft/CandidateStateTest.java | 33 +- .../apache/kafka/raft/FollowerStateTest.java | 11 +- .../kafka/raft/KafkaNetworkChannelTest.java | 167 +- .../raft/KafkaRaftClientSnapshotTest.java | 135 +- .../kafka/raft/KafkaRaftClientTest.java | 482 +---- .../apache/kafka/raft/LeaderStateTest.java | 315 +-- .../org/apache/kafka/raft/MockLogTest.java | 4 +- .../apache/kafka/raft/MockNetworkChannel.java | 26 +- .../apache/kafka/raft/QuorumStateTest.java | 661 +++--- .../kafka/raft/RaftClientTestContext.java | 74 +- .../kafka/raft/RaftEventSimulationTest.java | 116 +- .../apache/kafka/raft/RequestManagerTest.java | 238 +-- .../raft/internals/BatchAccumulatorTest.java | 2 +- .../raft/internals/BatchBuilderTest.java | 5 +- .../KRaftControlRecordStateMachineTest.java | 28 +- .../raft/internals/KafkaRaftMetricsTest.java | 57 +- .../raft/internals/RecordsIteratorTest.java | 4 +- .../raft/internals/VoterSetHistoryTest.java | 18 +- .../kafka/raft/internals/VoterSetTest.java | 78 +- .../snapshot/RecordsSnapshotWriterTest.java | 7 +- .../kafka/deferred/DeferredEventQueue.java | 1 - .../apache/kafka/queue/KafkaEventQueue.java | 13 +- .../security/EncryptingPasswordEncoder.java | 9 +- .../kafka/security/GcmParamsEncoder.java | 3 +- .../kafka/security/IvParamsEncoder.java | 3 +- .../kafka/server/common/CheckpointFile.java | 8 +- .../kafka/server/common/GroupVersion.java | 2 +- .../kafka/server/common/MetadataVersion.java | 5 +- .../common/MetadataVersionValidator.java | 5 +- .../kafka/server/config/QuotaConfigs.java | 1 - .../config/ServerTopicConfigSynonyms.java | 7 +- .../fault/ProcessTerminatingFaultHandler.java | 4 +- .../PCollectionsImmutableMap.java | 1 - .../PCollectionsImmutableNavigableSet.java | 1 - .../PCollectionsImmutableSet.java | 1 - .../server/metrics/KafkaMetricsGroup.java | 15 +- .../server/metrics/KafkaYammerMetrics.java | 5 +- .../server/network/EndpointReadyFutures.java | 1 - .../kafka/server/util/CommandLineUtils.java | 7 +- .../apache/kafka/server/util/FutureUtils.java | 1 - .../server/util/InterBrokerSendThread.java | 21 +- .../org/apache/kafka/server/util/Json.java | 3 +- .../kafka/server/util/KafkaScheduler.java | 1 - .../kafka/server/util/ShutdownableThread.java | 1 - .../server/util/ThroughputThrottler.java | 1 + .../apache/kafka/server/util/TopicFilter.java | 1 - .../kafka/timeline/SnapshotRegistry.java | 7 +- .../deferred/DeferredEventQueueTest.java | 9 +- .../kafka/metadata/AssignmentsHelperTest.java | 1 - .../kafka/queue/KafkaEventQueueTest.java | 22 +- .../kafka/security/PasswordEncoderTest.java | 5 +- .../server/common/MetadataVersionTest.java | 6 +- .../common/MetadataVersionValidatorTest.java | 4 +- .../ProcessTerminatingFaultHandlerTest.java | 5 +- .../PCollectionsImmutableMapTest.java | 1 - ...PCollectionsImmutableNavigableSetTest.java | 1 - .../PCollectionsImmutableSetTest.java | 1 - .../metrics/KafkaYammerMetricsTest.java | 1 - .../network/EndpointReadyFuturesTest.java | 10 +- .../record/BrokerCompressionTypeTest.java | 1 - .../server/util/CommandLineUtilsTest.java | 7 +- .../kafka/server/util/DeadlineTest.java | 1 - .../kafka/server/util/FutureUtilsTest.java | 1 - .../util/InterBrokerSendThreadTest.java | 43 +- .../apache/kafka/server/util/JsonTest.java | 8 +- .../server/util/ShutdownableThreadTest.java | 1 - .../kafka/server/util/TopicFilterTest.java | 3 +- .../util/timer/SystemTimerReaperTest.java | 1 - .../server/util/timer/TimerTaskListTest.java | 1 - .../kafka/server/util/timer/TimerTest.java | 1 - .../kafka/timeline/BaseHashTableTest.java | 11 +- .../kafka/timeline/SnapshotRegistryTest.java | 11 +- .../timeline/SnapshottableHashTableTest.java | 11 +- .../kafka/timeline/TimelineHashMapTest.java | 9 +- .../kafka/timeline/TimelineHashSetTest.java | 5 +- .../kafka/timeline/TimelineIntegerTest.java | 5 +- .../kafka/timeline/TimelineLongTest.java | 5 +- .../kafka/timeline/TimelineObjectTest.java | 5 +- .../kafka/server/AssignmentsManager.java | 8 +- .../server/config/ReplicationConfigs.java | 3 +- .../metrics/LinuxIoMetricsCollector.java | 112 -- .../kafka/server/AssignmentsManagerTest.java | 58 +- .../metrics/LinuxIoMetricsCollectorTest.java | 92 - settings.gradle | 1 - .../org/apache/kafka/shell/MetadataShell.java | 2 +- .../shell/command/CatCommandHandler.java | 3 +- .../kafka/shell/command/CdCommandHandler.java | 3 +- .../command/ErroneousCommandHandler.java | 3 +- .../shell/command/ExitCommandHandler.java | 3 +- .../shell/command/FindCommandHandler.java | 3 +- .../shell/command/HelpCommandHandler.java | 3 +- .../kafka/shell/command/LsCommandHandler.java | 12 +- .../shell/command/ManCommandHandler.java | 3 +- .../shell/command/NoOpCommandHandler.java | 3 +- .../shell/command/PwdCommandHandler.java | 3 +- .../shell/command/TreeCommandHandler.java | 3 +- .../apache/kafka/shell/glob/GlobVisitor.java | 3 +- .../kafka/shell/command/CommandTest.java | 18 +- .../kafka/shell/glob/GlobVisitorTest.java | 13 +- .../remote/metadata/storage/ConsumerTask.java | 15 +- .../storage/RemoteLogManagerConfig.java | 558 +++--- .../CheckpointFileWithFailureHandler.java | 22 +- .../InMemoryLeaderEpochCheckpoint.java | 63 + .../checkpoint/LeaderEpochCheckpoint.java | 22 +- .../checkpoint/LeaderEpochCheckpointFile.java | 11 +- .../internals/epoch/LeaderEpochFileCache.java | 168 +- .../metadata/storage/ConsumerTaskTest.java | 62 +- .../RemoteLogMetadataFormatterTest.java | 18 +- .../RemoteLogMetadataManagerTestUtils.java | 23 +- .../storage/RemoteLogMetadataSerdeTest.java | 38 +- .../RemoteLogMetadataTransformTest.java | 40 +- .../RemoteLogSegmentLifecycleManager.java | 63 + .../RemoteLogSegmentLifecycleTest.java | 562 ++++-- ...sedRemoteLogMetadataManagerConfigTest.java | 53 +- ...cBasedRemoteLogMetadataManagerHarness.java | 95 + ...adataManagerMultipleSubscriptionsTest.java | 41 +- ...edRemoteLogMetadataManagerRestartTest.java | 181 +- ...opicBasedRemoteLogMetadataManagerTest.java | 56 +- ...eLogMetadataManagerWrapperWithHarness.java | 105 + .../storage/RemoteLogManagerConfigTest.java | 107 +- .../storage/RemoteLogMetadataManagerTest.java | 83 +- .../storage/TieredStorageTestContext.java | 3 +- .../apache/kafka/streams/StreamsConfig.java | 19 +- .../streams/errors/ErrorHandlerContext.java | 133 -- ...AndContinueProcessingExceptionHandler.java | 46 - .../LogAndFailProcessingExceptionHandler.java | 46 - .../errors/ProcessingExceptionHandler.java | 56 - .../internals/DefaultErrorHandlerContext.java | 90 - .../internals/WrappingNullableUtils.java | 13 +- .../assignment/ApplicationState.java | 2 +- .../assignment/AssignmentConfigs.java | 104 +- .../assignment/TaskAssignmentUtils.java | 171 +- .../processor/assignment/TaskAssignor.java | 13 +- .../assignors/StickyTaskAssignor.java | 86 +- .../internals/InternalTopologyBuilder.java | 41 +- .../processor/internals/PartitionGrouper.java | 45 +- .../internals/RecordCollectorImpl.java | 3 +- .../streams/processor/internals/SinkNode.java | 15 +- .../processor/internals/SourceNode.java | 15 +- .../internals/StreamsPartitionAssignor.java | 168 +- .../processor/internals/StreamsProducer.java | 6 - .../processor/internals/TopologyMetadata.java | 4 - .../assignment/AssignorConfiguration.java | 98 +- .../ClientTagAwareStandbyTaskAssignor.java | 14 +- .../assignment/DefaultApplicationState.java | 2 +- .../DefaultStandbyTaskAssignor.java | 5 +- .../assignment/DefaultTaskTopicPartition.java | 15 +- .../assignment/FallbackPriorTaskAssignor.java | 2 +- .../HighAvailabilityTaskAssignor.java | 24 +- ...sState.java => KafkaStreamsStateImpl.java} | 22 +- .../RackAwareGraphConstructorFactory.java | 4 +- .../assignment/RackAwareTaskAssignor.java | 10 +- .../internals/assignment/RackUtils.java | 3 +- .../assignment/StandbyTaskAssignor.java | 4 +- .../StandbyTaskAssignorFactory.java | 5 +- .../assignment/StickyTaskAssignor.java | 18 +- .../internals/assignment/TaskAssignor.java | 2 +- .../state/internals/MeteredKeyValueStore.java | 15 +- .../state/internals/MeteredSessionStore.java | 15 +- .../MeteredTimestampedKeyValueStore.java | 2 +- .../MeteredVersionedKeyValueStore.java | 18 +- .../state/internals/MeteredWindowStore.java | 13 +- .../internals/StoreSerdeInitializer.java | 76 - .../kafka/streams/StreamsConfigTest.java | 27 - .../TaskAssignorIntegrationTest.java | 14 +- .../InternalTopologyBuilderTest.java | 24 - .../internals/ProcessorNodeTest.java | 8 +- .../internals/RecordCollectorTest.java | 67 +- .../processor/internals/SinkNodeTest.java | 68 +- .../processor/internals/SourceNodeTest.java | 83 +- .../internals/StreamsAssignmentScaleTest.java | 41 +- .../StreamsPartitionAssignorTest.java | 207 +- .../assignment/AssignmentTestUtils.java | 2 +- .../assignment/AssignorConfigurationTest.java | 3 +- ...ClientTagAwareStandbyTaskAssignorTest.java | 2 +- .../CustomStickyTaskAssignorTest.java | 799 -------- .../FallbackPriorTaskAssignorTest.java | 3 +- .../HighAvailabilityTaskAssignorTest.java | 6 +- .../assignment/KafkaStreamsStateTest.java | 4 +- .../RackAwareGraphConstructorFactoryTest.java | 2 +- .../assignment/RackAwareTaskAssignorTest.java | 2 +- .../StandbyTaskAssignorFactoryTest.java | 13 +- .../assignment/StickyTaskAssignorTest.java | 16 +- .../assignment/TaskAssignmentUtilsTest.java | 348 ---- .../TaskAssignorConvergenceTest.java | 2 +- .../internals/StoreSerdeInitializerTest.java | 157 -- .../ReassignPartitionsIntegrationTest.java | 930 +++++++++ 594 files changed, 9238 insertions(+), 16226 deletions(-) delete mode 100644 clients/src/main/java/org/apache/kafka/clients/admin/AddRaftVoterResult.java delete mode 100644 clients/src/main/java/org/apache/kafka/clients/admin/RaftVoterEndpoint.java delete mode 100644 clients/src/main/java/org/apache/kafka/clients/admin/RemoveRaftVoterOptions.java delete mode 100644 clients/src/main/java/org/apache/kafka/clients/admin/RemoveRaftVoterResult.java delete mode 100644 clients/src/main/java/org/apache/kafka/clients/consumer/AcknowledgeType.java delete mode 100644 clients/src/main/java/org/apache/kafka/clients/consumer/AcknowledgementCommitCallback.java delete mode 100644 clients/src/main/java/org/apache/kafka/clients/consumer/KafkaShareConsumer.java delete mode 100644 clients/src/main/java/org/apache/kafka/clients/consumer/MockShareConsumer.java delete mode 100644 clients/src/main/java/org/apache/kafka/clients/consumer/ShareConsumer.java delete mode 100644 clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegate.java delete mode 100644 clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegateCreator.java delete mode 100644 clients/src/main/java/org/apache/kafka/clients/consumer/internals/TimedRequestState.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterRequest.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterResponse.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/RemoveRaftVoterRequest.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/RemoveRaftVoterResponse.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/UpdateRaftVoterRequest.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/UpdateRaftVoterResponse.java delete mode 100644 clients/src/main/resources/common/message/AddRaftVoterRequest.json delete mode 100644 clients/src/main/resources/common/message/AddRaftVoterResponse.json delete mode 100644 clients/src/main/resources/common/message/RemoveRaftVoterRequest.json delete mode 100644 clients/src/main/resources/common/message/RemoveRaftVoterResponse.json delete mode 100644 clients/src/main/resources/common/message/UpdateRaftVoterRequest.json delete mode 100644 clients/src/main/resources/common/message/UpdateRaftVoterResponse.json delete mode 100644 clients/src/test/java/org/apache/kafka/clients/consumer/MockShareConsumerTest.java delete mode 100644 clients/src/test/java/org/apache/kafka/clients/consumer/internals/TimedRequestStateTest.java delete mode 100644 core/src/main/java/kafka/server/MetadataVersionConfigValidator.java delete mode 100644 core/src/main/java/kafka/server/SharePartition.java delete mode 100644 core/src/main/java/kafka/server/SharePartitionManager.java create mode 100644 core/src/main/scala/kafka/metrics/LinuxIoMetricsCollector.scala delete mode 100644 core/src/test/java/kafka/server/LogManagerIntegrationTest.java delete mode 100644 core/src/test/java/kafka/server/MetadataVersionConfigValidatorTest.java delete mode 100644 core/src/test/java/kafka/server/SharePartitionTest.java create mode 100644 core/src/test/scala/kafka/metrics/LinuxIoMetricsCollectorTest.scala create mode 100644 core/src/test/scala/unit/kafka/server/checkpoints/InMemoryLeaderEpochCheckpointTest.scala delete mode 100644 group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/GroupSpec.java delete mode 100644 group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/MemberSubscription.java create mode 100644 group-coordinator/src/main/java/org/apache/kafka/coordinator/group/assignor/AbstractUniformAssignmentBuilder.java create mode 100644 group-coordinator/src/main/java/org/apache/kafka/coordinator/group/assignor/AssignmentMemberSpec.java rename group-coordinator/{group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api => src/main/java/org/apache/kafka/coordinator/group}/assignor/ConsumerGroupPartitionAssignor.java (85%) rename group-coordinator/src/main/java/org/apache/kafka/coordinator/group/assignor/{UniformHeterogeneousAssignmentBuilder.java => GeneralUniformAssignmentBuilder.java} (90%) rename group-coordinator/{group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api => src/main/java/org/apache/kafka/coordinator/group}/assignor/GroupAssignment.java (89%) rename group-coordinator/{group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/MemberAssignment.java => src/main/java/org/apache/kafka/coordinator/group/assignor/GroupSpec.java} (61%) rename group-coordinator/src/main/java/org/apache/kafka/coordinator/group/{consumer => assignor}/GroupSpecImpl.java (54%) rename group-coordinator/src/main/java/org/apache/kafka/coordinator/group/{consumer/MemberAssignmentImpl.java => assignor/MemberAssignment.java} (61%) rename group-coordinator/src/main/java/org/apache/kafka/coordinator/group/assignor/{UniformHomogeneousAssignmentBuilder.java => OptimizedUniformAssignmentBuilder.java} (90%) rename group-coordinator/{group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api => src/main/java/org/apache/kafka/coordinator/group}/assignor/PartitionAssignor.java (90%) rename group-coordinator/{group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api => src/main/java/org/apache/kafka/coordinator/group}/assignor/PartitionAssignorException.java (95%) rename group-coordinator/{group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api => src/main/java/org/apache/kafka/coordinator/group}/assignor/SubscribedTopicDescriber.java (97%) rename group-coordinator/{group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api => src/main/java/org/apache/kafka/coordinator/group}/assignor/SubscriptionType.java (90%) delete mode 100644 group-coordinator/src/main/java/org/apache/kafka/coordinator/group/consumer/MemberSubscriptionAndAssignmentImpl.java rename group-coordinator/src/main/java/org/apache/kafka/coordinator/group/consumer/{SubscribedTopicDescriberImpl.java => SubscribedTopicMetadata.java} (89%) rename group-coordinator/src/test/java/org/apache/kafka/coordinator/group/assignor/{UniformHeterogeneousAssignmentBuilderTest.java => GeneralUniformAssignmentBuilderTest.java} (73%) delete mode 100644 metadata/src/main/java/org/apache/kafka/image/publisher/BrokerRegistrationTracker.java delete mode 100644 metadata/src/test/java/org/apache/kafka/image/publisher/BrokerRegistrationTrackerTest.java delete mode 100644 server/src/main/java/org/apache/kafka/server/metrics/LinuxIoMetricsCollector.java delete mode 100644 server/src/test/java/org/apache/kafka/server/metrics/LinuxIoMetricsCollectorTest.java create mode 100644 storage/src/main/java/org/apache/kafka/storage/internals/checkpoint/InMemoryLeaderEpochCheckpoint.java rename clients/src/main/java/org/apache/kafka/clients/admin/AddRaftVoterOptions.java => storage/src/main/java/org/apache/kafka/storage/internals/checkpoint/LeaderEpochCheckpoint.java (61%) create mode 100644 storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/RemoteLogSegmentLifecycleManager.java create mode 100644 storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerWrapperWithHarness.java delete mode 100644 streams/src/main/java/org/apache/kafka/streams/errors/ErrorHandlerContext.java delete mode 100644 streams/src/main/java/org/apache/kafka/streams/errors/LogAndContinueProcessingExceptionHandler.java delete mode 100644 streams/src/main/java/org/apache/kafka/streams/errors/LogAndFailProcessingExceptionHandler.java delete mode 100644 streams/src/main/java/org/apache/kafka/streams/errors/ProcessingExceptionHandler.java delete mode 100644 streams/src/main/java/org/apache/kafka/streams/errors/internals/DefaultErrorHandlerContext.java rename streams/src/main/java/org/apache/kafka/streams/processor/internals/assignment/{DefaultKafkaStreamsState.java => KafkaStreamsStateImpl.java} (87%) delete mode 100644 streams/src/main/java/org/apache/kafka/streams/state/internals/StoreSerdeInitializer.java delete mode 100644 streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/CustomStickyTaskAssignorTest.java delete mode 100644 streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/TaskAssignmentUtilsTest.java delete mode 100644 streams/src/test/java/org/apache/kafka/streams/state/internals/StoreSerdeInitializerTest.java create mode 100644 tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsIntegrationTest.java diff --git a/README.md b/README.md index 8419e7cf1ad9c..27ce0dc0bce64 100644 --- a/README.md +++ b/README.md @@ -227,16 +227,11 @@ There are two code quality analysis tools that we regularly run, spotbugs and ch Checkstyle enforces a consistent coding style in Kafka. You can run checkstyle using: - ./gradlew checkstyleMain checkstyleTest spotlessCheck + ./gradlew checkstyleMain checkstyleTest The checkstyle warnings will be found in `reports/checkstyle/reports/main.html` and `reports/checkstyle/reports/test.html` files in the subproject build directories. They are also printed to the console. The build will fail if Checkstyle fails. -#### Spotless #### -The import order is a part of static check. please call `spotlessApply` (require JDK 11+) to optimize the imports of Java codes before filing pull request. - - ./gradlew spotlessApply - #### Spotbugs #### Spotbugs uses static analysis to look for bugs in the code. You can run spotbugs using: diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml index 7e7e61b097793..ab6177961f51e 100644 --- a/checkstyle/import-control.xml +++ b/checkstyle/import-control.xml @@ -30,6 +30,8 @@ + + @@ -439,7 +441,6 @@ - @@ -620,6 +621,9 @@ + + + diff --git a/checkstyle/suppressions.xml b/checkstyle/suppressions.xml index ed50889ef6d96..fc6995dadfe7e 100644 --- a/checkstyle/suppressions.xml +++ b/checkstyle/suppressions.xml @@ -200,7 +200,7 @@ files="StreamThread.java"/> + files="(KafkaStreams|KStreamImpl|KTableImpl).java"/> @@ -209,7 +209,7 @@ files="StreamsMetricsImpl.java"/> + files="(KafkaStreams|StreamsPartitionAssignor|StreamThread|TaskManager|GlobalStateManagerImpl|KStreamImplJoin|TopologyConfig|KTableKTableOuterJoin).java"/> diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java index 4ebd600cbd854..6b6b56059c84d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java @@ -124,7 +124,7 @@ static List resolve(String host, HostResolver hostResolver) throws InetAddress[] addresses = hostResolver.resolve(host); List result = filterPreferredAddresses(addresses); if (log.isDebugEnabled()) - log.debug("Resolved host {} as {}", host, result.stream().map(InetAddress::getHostAddress).collect(Collectors.joining(","))); + log.debug("Resolved host {} as {}", host, result.stream().map(i -> i.getHostAddress()).collect(Collectors.joining(","))); return result; } diff --git a/clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java b/clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java index dc3d9f4d7458b..2bd70206fbe0f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java +++ b/clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java @@ -148,7 +148,7 @@ public Iterable clearAll(String node) { } else { final Deque clearedRequests = requests.remove(node); inFlightRequestCount.getAndAdd(-clearedRequests.size()); - return clearedRequests::descendingIterator; + return () -> clearedRequests.descendingIterator(); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/Metadata.java b/clients/src/main/java/org/apache/kafka/clients/Metadata.java index 0aec2287c5d4b..30cad44a4bc97 100644 --- a/clients/src/main/java/org/apache/kafka/clients/Metadata.java +++ b/clients/src/main/java/org/apache/kafka/clients/Metadata.java @@ -425,8 +425,8 @@ public synchronized Set updatePartitionLeadership(Map existingTopicIds = this.metadataSnapshot.topicIds(); Map topicIdsForUpdatedTopics = updatedTopics.stream() - .filter(existingTopicIds::containsKey) - .collect(Collectors.toMap(e -> e, existingTopicIds::get)); + .filter(e -> existingTopicIds.containsKey(e)) + .collect(Collectors.toMap(e -> e, e -> existingTopicIds.get(e))); if (log.isDebugEnabled()) { updatePartitionMetadata.forEach( diff --git a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java index 1fce5eead7bf3..3a7af6617e773 100644 --- a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java @@ -821,8 +821,9 @@ private void processDisconnection(List responses, break; case AUTHENTICATE: log.warn("Connection to node {} ({}) terminated during authentication. This may happen " + - "due to any of the following reasons: (1) Firewall blocking Kafka TLS " + - "traffic (eg it may only allow HTTPS traffic), (2) Transient network issue.", + "due to any of the following reasons: (1) Authentication failed due to invalid " + + "credentials with brokers older than 1.0.0, (2) Firewall blocking Kafka TLS " + + "traffic (eg it may only allow HTTPS traffic), (3) Transient network issue.", nodeId, disconnectState.remoteAddress()); break; case NOT_CONNECTED: diff --git a/clients/src/main/java/org/apache/kafka/clients/NodeApiVersions.java b/clients/src/main/java/org/apache/kafka/clients/NodeApiVersions.java index 12a7d019921b5..838718652f37c 100644 --- a/clients/src/main/java/org/apache/kafka/clients/NodeApiVersions.java +++ b/clients/src/main/java/org/apache/kafka/clients/NodeApiVersions.java @@ -174,9 +174,10 @@ public String toString(boolean lineBreaks) { // which may happen when the remote is too old. for (ApiKeys apiKey : ApiKeys.clientApis()) { if (!apiKeysText.containsKey(apiKey.id)) { - String bld = apiKey.name + "(" + - apiKey.id + "): " + "UNSUPPORTED"; - apiKeysText.put(apiKey.id, bld); + StringBuilder bld = new StringBuilder(); + bld.append(apiKey.name).append("("). + append(apiKey.id).append("): ").append("UNSUPPORTED"); + apiKeysText.put(apiKey.id, bld.toString()); } } String separator = lineBreaks ? ",\n\t" : ", "; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AddRaftVoterResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/AddRaftVoterResult.java deleted file mode 100644 index d42204c5e4e79..0000000000000 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AddRaftVoterResult.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.admin; - -import org.apache.kafka.common.KafkaFuture; -import org.apache.kafka.common.annotation.InterfaceStability; - -/** - * The result of {@link org.apache.kafka.clients.admin.Admin#addRaftVoter(int, org.apache.kafka.common.Uuid, java.util.Set, org.apache.kafka.clients.admin.AddRaftVoterOptions)}. - * - * The API of this class is evolving, see {@link Admin} for details. - */ -@InterfaceStability.Stable -public class AddRaftVoterResult { - private final KafkaFuture result; - - AddRaftVoterResult(KafkaFuture result) { - this.result = result; - } - - /** - * Returns a future that completes when the voter has been added. - */ - public KafkaFuture all() { - return result; - } - -} diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java b/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java index 8d62069b279f3..d936ec80ffe81 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java @@ -901,7 +901,7 @@ default DescribeConsumerGroupsResult describeConsumerGroups(Collection g * List the consumer groups available in the cluster. * * @param options The options to use when listing the consumer groups. - * @return The ListConsumerGroupsResult. + * @return The ListGroupsResult. */ ListConsumerGroupsResult listConsumerGroups(ListConsumerGroupsOptions options); @@ -911,7 +911,7 @@ default DescribeConsumerGroupsResult describeConsumerGroups(Collection g * This is a convenience method for {@link #listConsumerGroups(ListConsumerGroupsOptions)} with default options. * See the overload for more details. * - * @return The ListConsumerGroupsResult. + * @return The ListGroupsResult. */ default ListConsumerGroupsResult listConsumerGroups() { return listConsumerGroups(new ListConsumerGroupsOptions()); @@ -921,7 +921,7 @@ default ListConsumerGroupsResult listConsumerGroups() { * List the consumer group offsets available in the cluster. * * @param options The options to use when listing the consumer group offsets. - * @return The ListConsumerGroupOffsetsResult + * @return The ListGroupOffsetsResult */ default ListConsumerGroupOffsetsResult listConsumerGroupOffsets(String groupId, ListConsumerGroupOffsetsOptions options) { @SuppressWarnings("deprecation") @@ -939,7 +939,7 @@ default ListConsumerGroupOffsetsResult listConsumerGroupOffsets(String groupId, * This is a convenience method for {@link #listConsumerGroupOffsets(Map, ListConsumerGroupOffsetsOptions)} * to list offsets of all partitions of one group with default options. * - * @return The ListConsumerGroupOffsetsResult. + * @return The ListGroupOffsetsResult. */ default ListConsumerGroupOffsetsResult listConsumerGroupOffsets(String groupId) { return listConsumerGroupOffsets(groupId, new ListConsumerGroupOffsetsOptions()); @@ -1711,62 +1711,6 @@ default ListClientMetricsResourcesResult listClientMetricsResources() { */ Uuid clientInstanceId(Duration timeout); - /** - * Add a new voter node to the KRaft metadata quorum. - * - * @param voterId The node ID of the voter. - * @param voterDirectoryId The directory ID of the voter. - * @param endpoints The endpoints that the new voter has. - */ - default AddRaftVoterResult addRaftVoter( - int voterId, - Uuid voterDirectoryId, - Set endpoints - ) { - return addRaftVoter(voterId, voterDirectoryId, endpoints, new AddRaftVoterOptions()); - } - - /** - * Add a new voter node to the KRaft metadata quorum. - * - * @param voterId The node ID of the voter. - * @param voterDirectoryId The directory ID of the voter. - * @param endpoints The endpoints that the new voter has. - * @param options The options to use when adding the new voter node. - */ - AddRaftVoterResult addRaftVoter( - int voterId, - Uuid voterDirectoryId, - Set endpoints, - AddRaftVoterOptions options - ); - - /** - * Remove a voter node from the KRaft metadata quorum. - * - * @param voterId The node ID of the voter. - * @param voterDirectoryId The directory ID of the voter. - */ - default RemoveRaftVoterResult removeRaftVoter( - int voterId, - Uuid voterDirectoryId - ) { - return removeRaftVoter(voterId, voterDirectoryId, new RemoveRaftVoterOptions()); - } - - /** - * Remove a voter node from the KRaft metadata quorum. - * - * @param voterId The node ID of the voter. - * @param voterDirectoryId The directory ID of the voter. - * @param options The options to use when removing the voter node. - */ - RemoveRaftVoterResult removeRaftVoter( - int voterId, - Uuid voterDirectoryId, - RemoveRaftVoterOptions options - ); - /** * Get the metrics kept by the adminClient */ diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeConsumerGroupsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeConsumerGroupsResult.java index ebab1507e6cc2..8e3c2d2822615 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeConsumerGroupsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeConsumerGroupsResult.java @@ -44,7 +44,9 @@ public DescribeConsumerGroupsResult(final Map> describedGroups() { - return new HashMap<>(futures); + Map> describedGroups = new HashMap<>(); + futures.forEach((key, future) -> describedGroups.put(key, future)); + return describedGroups; } /** diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeDelegationTokenOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeDelegationTokenOptions.java index fabd91b58f782..ef9f105850a5f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeDelegationTokenOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeDelegationTokenOptions.java @@ -32,9 +32,9 @@ public class DescribeDelegationTokenOptions extends AbstractOptions owners; /** - * If owners is null, all the user owned tokens and tokens where user have Describe permission + * if owners is null, all the user owned tokens and tokens where user have Describe permission * will be returned. - * @param owners The owners that we want to describe delegation tokens for + * @param owners * @return this instance */ public DescribeDelegationTokenOptions owners(List owners) { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeLogDirsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeLogDirsResult.java index d6f1114d61f4a..c57867aad7c8d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeLogDirsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeLogDirsResult.java @@ -52,7 +52,7 @@ public Map> return descriptions().entrySet().stream() .collect(Collectors.toMap( Map.Entry::getKey, - entry -> entry.getValue().thenApply(this::convertMapValues))); + entry -> entry.getValue().thenApply(map -> convertMapValues(map)))); } @SuppressWarnings("deprecation") @@ -88,7 +88,7 @@ public Map>> descriptions() @Deprecated public KafkaFuture>> all() { return allDescriptions().thenApply(map -> map.entrySet().stream().collect(Collectors.toMap( - Map.Entry::getKey, + entry -> entry.getKey(), entry -> convertMapValues(entry.getValue()) ))); } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeReplicaLogDirsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeReplicaLogDirsResult.java index 3e7457bda5946..54bd9c142b0b0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeReplicaLogDirsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeReplicaLogDirsResult.java @@ -51,18 +51,21 @@ public Map> values() { * Return a future which succeeds if log directory information of all replicas are available */ public KafkaFuture> all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])) - .thenApply(v -> { - Map replicaLogDirInfos = new HashMap<>(); - for (Map.Entry> entry : futures.entrySet()) { - try { - replicaLogDirInfos.put(entry.getKey(), entry.getValue().get()); - } catch (InterruptedException | ExecutionException e) { - // This should be unreachable, because allOf ensured that all the futures completed successfully. - throw new RuntimeException(e); + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])). + thenApply(new KafkaFuture.BaseFunction>() { + @Override + public Map apply(Void v) { + Map replicaLogDirInfos = new HashMap<>(); + for (Map.Entry> entry : futures.entrySet()) { + try { + replicaLogDirInfos.put(entry.getKey(), entry.getValue().get()); + } catch (InterruptedException | ExecutionException e) { + // This should be unreachable, because allOf ensured that all the futures completed successfully. + throw new RuntimeException(e); + } } + return replicaLogDirInfos; } - return replicaLogDirInfos; }); } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ForwardingAdmin.java b/clients/src/main/java/org/apache/kafka/clients/admin/ForwardingAdmin.java index ad15c22498f7c..9fc809dbddd81 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ForwardingAdmin.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ForwardingAdmin.java @@ -288,16 +288,6 @@ public Uuid clientInstanceId(Duration timeout) { return delegate.clientInstanceId(timeout); } - @Override - public AddRaftVoterResult addRaftVoter(int voterId, Uuid voterDirectoryId, Set endpoints, AddRaftVoterOptions options) { - return delegate.addRaftVoter(voterId, voterDirectoryId, endpoints, options); - } - - @Override - public RemoveRaftVoterResult removeRaftVoter(int voterId, Uuid voterDirectoryId, RemoveRaftVoterOptions options) { - return delegate.removeRaftVoter(voterId, voterDirectoryId, options); - } - @Override public Map metrics() { return delegate.metrics(); diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java index c59cccf67c4b8..71d39900cd5a8 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java @@ -97,7 +97,6 @@ import org.apache.kafka.common.errors.UnsupportedSaslMechanismException; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.internals.KafkaFutureImpl; -import org.apache.kafka.common.message.AddRaftVoterRequestData; import org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData; import org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData.ReassignableTopic; import org.apache.kafka.common.message.AlterReplicaLogDirsRequestData; @@ -155,7 +154,6 @@ import org.apache.kafka.common.message.ListGroupsResponseData; import org.apache.kafka.common.message.ListPartitionReassignmentsRequestData; import org.apache.kafka.common.message.MetadataRequestData; -import org.apache.kafka.common.message.RemoveRaftVoterRequestData; import org.apache.kafka.common.message.RenewDelegationTokenRequestData; import org.apache.kafka.common.message.UnregisterBrokerRequestData; import org.apache.kafka.common.message.UpdateFeaturesRequestData; @@ -172,8 +170,6 @@ import org.apache.kafka.common.quota.ClientQuotaFilter; import org.apache.kafka.common.requests.AbstractRequest; import org.apache.kafka.common.requests.AbstractResponse; -import org.apache.kafka.common.requests.AddRaftVoterRequest; -import org.apache.kafka.common.requests.AddRaftVoterResponse; import org.apache.kafka.common.requests.AlterClientQuotasRequest; import org.apache.kafka.common.requests.AlterClientQuotasResponse; import org.apache.kafka.common.requests.AlterConfigsRequest; @@ -236,7 +232,6 @@ import org.apache.kafka.common.requests.ListPartitionReassignmentsResponse; import org.apache.kafka.common.requests.MetadataRequest; import org.apache.kafka.common.requests.MetadataResponse; -import org.apache.kafka.common.requests.RemoveRaftVoterRequest; import org.apache.kafka.common.requests.RenewDelegationTokenRequest; import org.apache.kafka.common.requests.RenewDelegationTokenResponse; import org.apache.kafka.common.requests.UnregisterBrokerRequest; @@ -276,6 +271,7 @@ import java.util.OptionalLong; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -2128,7 +2124,7 @@ else if (topics instanceof TopicNameCollection) throw new IllegalArgumentException("The TopicCollection: " + topics + " provided did not match any supported classes for describeTopics."); } - private Call generateDescribeTopicsCallWithMetadataApi( + Call generateDescribeTopicsCallWithMetadataApi( List topicNamesList, Map> topicFutures, DescribeTopicsOptions options, @@ -2191,7 +2187,7 @@ void handleFailure(Throwable throwable) { }; } - private Call generateDescribeTopicsCallWithDescribeTopicPartitionsApi( + Call generateDescribeTopicsCallWithDescribeTopicPartitionsApi( List topicNamesList, Map> topicFutures, Map nodes, @@ -2199,9 +2195,9 @@ private Call generateDescribeTopicsCallWithDescribeTopicPartitionsApi( long now ) { final Map topicsRequests = new LinkedHashMap<>(); - topicNamesList.stream().sorted().forEach(topic -> - topicsRequests.put(topic, new TopicRequest().setName(topic)) - ); + topicNamesList.stream().sorted().forEach(topic -> { + topicsRequests.put(topic, new TopicRequest().setName(topic)); + }); return new Call("describeTopicPartitions", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { TopicDescription partiallyFinishedTopicDescription = null; @@ -2318,27 +2314,27 @@ private Map> handleDescribeTopicsByNamesWi } if (topicNamesList.isEmpty()) { - return Collections.unmodifiableMap(topicFutures); + return new HashMap<>(topicFutures); } // First, we need to retrieve the node info. DescribeClusterResult clusterResult = describeCluster(); - clusterResult.nodes().whenComplete( - (nodes, exception) -> { - if (exception != null) { - completeAllExceptionally(topicFutures.values(), exception); - return; - } + Map nodes; + try { + nodes = clusterResult.nodes().get().stream().collect(Collectors.toMap(Node::id, node -> node)); + } catch (InterruptedException | ExecutionException e) { + completeAllExceptionally(topicFutures.values(), e.getCause()); + return new HashMap<>(topicFutures); + } - final long now = time.milliseconds(); - Map nodeIdMap = nodes.stream().collect(Collectors.toMap(Node::id, node -> node)); - runnable.call( - generateDescribeTopicsCallWithDescribeTopicPartitionsApi(topicNamesList, topicFutures, nodeIdMap, options, now), - now - ); - }); + final long now = time.milliseconds(); + + runnable.call( + generateDescribeTopicsCallWithDescribeTopicPartitionsApi(topicNamesList, topicFutures, nodes, options, now), + now + ); - return Collections.unmodifiableMap(topicFutures); + return new HashMap<>(topicFutures); } private Map> handleDescribeTopicsByIds(Collection topicIds, DescribeTopicsOptions options) { @@ -3047,7 +3043,7 @@ public DescribeLogDirsRequest.Builder createRequest(int timeoutMs) { public void handleResponse(AbstractResponse abstractResponse) { DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse; Map descriptions = logDirDescriptions(response); - if (!descriptions.isEmpty()) { + if (descriptions.size() > 0) { future.complete(descriptions); } else { // Up to v3 DescribeLogDirsResponse did not have an error code field, hence it defaults to None @@ -3559,10 +3555,10 @@ private void maybeAddConsumerGroup(ListGroupsResponseData.ListedGroup group) { String protocolType = group.protocolType(); if (protocolType.equals(ConsumerProtocol.PROTOCOL_TYPE) || protocolType.isEmpty()) { final String groupId = group.groupId(); - final Optional state = group.groupState().isEmpty() + final Optional state = group.groupState().equals("") ? Optional.empty() : Optional.of(ConsumerGroupState.parse(group.groupState())); - final Optional type = group.groupType().isEmpty() + final Optional type = group.groupType().equals("") ? Optional.empty() : Optional.of(GroupType.parse(group.groupType())); final ConsumerGroupListing groupListing = new ConsumerGroupListing( @@ -4214,9 +4210,9 @@ public void handleResponse(AbstractResponse abstractResponse) { * Be sure to do this after the NOT_CONTROLLER error check above * so that all errors are consistent in that case. */ - userIllegalAlterationExceptions.entrySet().stream().forEach(entry -> - futures.get(entry.getKey()).completeExceptionally(entry.getValue()) - ); + userIllegalAlterationExceptions.entrySet().stream().forEach(entry -> { + futures.get(entry.getKey()).completeExceptionally(entry.getValue()); + }); response.data().results().forEach(result -> { KafkaFutureImpl future = futures.get(result.user()); if (future == null) { @@ -4573,7 +4569,7 @@ public ListTransactionsResult listTransactions(ListTransactionsOptions options) public FenceProducersResult fenceProducers(Collection transactionalIds, FenceProducersOptions options) { AdminApiFuture.SimpleAdminApiFuture future = FenceProducersHandler.newFuture(transactionalIds); - FenceProducersHandler handler = new FenceProducersHandler(options, logContext, requestTimeoutMs); + FenceProducersHandler handler = new FenceProducersHandler(logContext); invokeDriver(handler, future, options.timeoutMs); return new FenceProducersResult(future.all()); } @@ -4608,101 +4604,6 @@ void handleFailure(Throwable throwable) { return new ListClientMetricsResourcesResult(future); } - @Override - public AddRaftVoterResult addRaftVoter( - int voterId, - Uuid voterDirectoryId, - Set endpoints, - AddRaftVoterOptions options - ) { - NodeProvider provider = new LeastLoadedBrokerOrActiveKController(); - - final KafkaFutureImpl future = new KafkaFutureImpl<>(); - final long now = time.milliseconds(); - final Call call = new Call( - "addRaftVoter", calcDeadlineMs(now, options.timeoutMs()), provider) { - - @Override - AddRaftVoterRequest.Builder createRequest(int timeoutMs) { - AddRaftVoterRequestData.ListenerCollection listeners = - new AddRaftVoterRequestData.ListenerCollection(); - endpoints.forEach(endpoint -> - listeners.add(new AddRaftVoterRequestData.Listener(). - setName(endpoint.name()). - setHost(endpoint.host()). - setPort(endpoint.port()))); - return new AddRaftVoterRequest.Builder( - new AddRaftVoterRequestData(). - setVoterId(voterId) . - setVoterDirectoryId(voterDirectoryId). - setListeners(listeners)); - } - - @Override - void handleResponse(AbstractResponse response) { - AddRaftVoterResponse addResponse = (AddRaftVoterResponse) response; - if (addResponse.data().errorCode() != Errors.NONE.code()) { - ApiError error = new ApiError( - addResponse.data().errorCode(), - addResponse.data().errorMessage()); - future.completeExceptionally(error.exception()); - } else { - future.complete(null); - } - } - - @Override - void handleFailure(Throwable throwable) { - future.completeExceptionally(throwable); - } - }; - runnable.call(call, now); - return new AddRaftVoterResult(future); - } - - @Override - public RemoveRaftVoterResult removeRaftVoter( - int voterId, - Uuid voterDirectoryId, - RemoveRaftVoterOptions options - ) { - NodeProvider provider = new LeastLoadedBrokerOrActiveKController(); - - final KafkaFutureImpl future = new KafkaFutureImpl<>(); - final long now = time.milliseconds(); - final Call call = new Call( - "removeRaftVoter", calcDeadlineMs(now, options.timeoutMs()), provider) { - - @Override - RemoveRaftVoterRequest.Builder createRequest(int timeoutMs) { - return new RemoveRaftVoterRequest.Builder( - new RemoveRaftVoterRequestData(). - setVoterId(voterId) . - setVoterDirectoryId(voterDirectoryId)); - } - - @Override - void handleResponse(AbstractResponse response) { - AddRaftVoterResponse addResponse = (AddRaftVoterResponse) response; - if (addResponse.data().errorCode() != Errors.NONE.code()) { - ApiError error = new ApiError( - addResponse.data().errorCode(), - addResponse.data().errorMessage()); - future.completeExceptionally(error.exception()); - } else { - future.complete(null); - } - } - - @Override - void handleFailure(Throwable throwable) { - future.completeExceptionally(throwable); - } - }; - runnable.call(call, now); - return new RemoveRaftVoterResult(future); - } - @Override public Uuid clientInstanceId(Duration timeout) { if (timeout.isNegative()) { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListTopicsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListTopicsResult.java index 15ea042eb1f16..21540732d30d2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListTopicsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListTopicsResult.java @@ -48,13 +48,13 @@ public KafkaFuture> namesToListings() { * Return a future which yields a collection of TopicListing objects. */ public KafkaFuture> listings() { - return future.thenApply(Map::values); + return future.thenApply(namesToDescriptions -> namesToDescriptions.values()); } /** * Return a future which yields a collection of topic names. */ public KafkaFuture> names() { - return future.thenApply(Map::keySet); + return future.thenApply(namesToListings -> namesToListings.keySet()); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsOptions.java index ba950566d25b8..49cf484f5d625 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsOptions.java @@ -78,7 +78,7 @@ public ListTransactionsOptions filterOnDuration(long durationMs) { /** * Returns the set of states to be filtered or empty if no states have been specified. * - * @return the current set of filtered states (empty means that no states are filtered and + * @return the current set of filtered states (empty means that no states are filtered and all * all transactions will be returned) */ public Set filteredStates() { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsResult.java index c22f8b7791baf..c9670dba55245 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsResult.java @@ -103,7 +103,7 @@ public KafkaFuture>> allByBrokerId() } Set remainingResponses = new HashSet<>(map.keySet()); - map.forEach((brokerId, future) -> + map.forEach((brokerId, future) -> { future.whenComplete((listings, brokerException) -> { if (brokerException != null) { allFuture.completeExceptionally(brokerException); @@ -115,8 +115,8 @@ public KafkaFuture>> allByBrokerId() allFuture.complete(allListingsMap); } } - }) - ); + }); + }); }); return allFuture; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/NewTopic.java b/clients/src/main/java/org/apache/kafka/clients/admin/NewTopic.java index 0151e6f61793f..2f335d02f2f2b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/NewTopic.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/NewTopic.java @@ -147,12 +147,14 @@ CreatableTopic convertToCreatableTopic() { @Override public String toString() { - return "(name=" + name + - ", numPartitions=" + numPartitions.map(String::valueOf).orElse("default") + - ", replicationFactor=" + replicationFactor.map(String::valueOf).orElse("default") + - ", replicasAssignments=" + replicasAssignments + - ", configs=" + configs + - ")"; + StringBuilder bld = new StringBuilder(); + bld.append("(name=").append(name). + append(", numPartitions=").append(numPartitions.map(String::valueOf).orElse("default")). + append(", replicationFactor=").append(replicationFactor.map(String::valueOf).orElse("default")). + append(", replicasAssignments=").append(replicasAssignments). + append(", configs=").append(configs). + append(")"); + return bld.toString(); } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/RaftVoterEndpoint.java b/clients/src/main/java/org/apache/kafka/clients/admin/RaftVoterEndpoint.java deleted file mode 100644 index ec9cbfd1fb058..0000000000000 --- a/clients/src/main/java/org/apache/kafka/clients/admin/RaftVoterEndpoint.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.admin; - -import org.apache.kafka.common.annotation.InterfaceStability; - -import java.util.Locale; -import java.util.Objects; - -/** - * An endpoint for a raft quorum voter. - */ -@InterfaceStability.Stable -public class RaftVoterEndpoint { - private final String name; - private final String host; - private final int port; - - static String requireNonNullAllCapsNonEmpty(String input) { - if (input == null) { - throw new IllegalArgumentException("Null argument not allowed."); - } - if (!input.trim().equals(input)) { - throw new IllegalArgumentException("Leading or trailing whitespace is not allowed."); - } - if (input.isEmpty()) { - throw new IllegalArgumentException("Empty string is not allowed."); - } - if (!input.toUpperCase(Locale.ROOT).equals(input)) { - throw new IllegalArgumentException("String must be UPPERCASE."); - } - return input; - } - - /** - * Create an endpoint for a metadata quorum voter. - * - * @param name The human-readable name for this endpoint. For example, CONTROLLER. - * @param host The DNS hostname for this endpoint. - * @param port The network port for this endpoint. - */ - public RaftVoterEndpoint( - String name, - String host, - int port - ) { - this.name = requireNonNullAllCapsNonEmpty(name); - this.host = Objects.requireNonNull(host); - this.port = port; - } - - public String name() { - return name; - } - - public String host() { - return host; - } - - public int port() { - return port; - } - - @Override - public boolean equals(Object o) { - if (o == null || (!o.getClass().equals(getClass()))) return false; - RaftVoterEndpoint other = (RaftVoterEndpoint) o; - return name.equals(other.name) && - host.equals(other.host) && - port == other.port; - } - - @Override - public int hashCode() { - return Objects.hash(name, host, port); - } - - @Override - public String toString() { - return "RaftVoterEndpoint" + - "(name=" + name + - ", host=" + host + - ", port=" + port + - ")"; - } -} diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/RemoveRaftVoterOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/RemoveRaftVoterOptions.java deleted file mode 100644 index afffdcc2d86b8..0000000000000 --- a/clients/src/main/java/org/apache/kafka/clients/admin/RemoveRaftVoterOptions.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.admin; - -import org.apache.kafka.common.annotation.InterfaceStability; - -/** - * Options for {@link Admin#removeRaftVoter}. - */ -@InterfaceStability.Stable -public class RemoveRaftVoterOptions extends AbstractOptions { -} diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/RemoveRaftVoterResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/RemoveRaftVoterResult.java deleted file mode 100644 index 8e8e99ddbfc21..0000000000000 --- a/clients/src/main/java/org/apache/kafka/clients/admin/RemoveRaftVoterResult.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.admin; - -import org.apache.kafka.common.KafkaFuture; -import org.apache.kafka.common.annotation.InterfaceStability; - -/** - * The result of {@link org.apache.kafka.clients.admin.Admin#removeRaftVoter(int, org.apache.kafka.common.Uuid, org.apache.kafka.clients.admin.RemoveRaftVoterOptions)}. - * - * The API of this class is evolving, see {@link Admin} for details. - */ -@InterfaceStability.Stable -public class RemoveRaftVoterResult { - private final KafkaFuture result; - - RemoveRaftVoterResult(KafkaFuture result) { - this.result = result; - } - - /** - * Returns a future that completes when the voter has been removed. - */ - public KafkaFuture all() { - return result; - } - -} diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandler.java index 184608ee95f91..fcb657fff8f92 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandler.java @@ -212,7 +212,7 @@ private ApiResult handledConsumerGroup final Set authorizedOperations = validAclOperations(describedGroup.authorizedOperations()); final List memberDescriptions = new ArrayList<>(describedGroup.members().size()); - describedGroup.members().forEach(groupMember -> + describedGroup.members().forEach(groupMember -> { memberDescriptions.add(new MemberDescription( groupMember.memberId(), Optional.ofNullable(groupMember.instanceId()), @@ -220,8 +220,8 @@ private ApiResult handledConsumerGroup groupMember.clientHost(), new MemberAssignment(convertAssignment(groupMember.assignment())), Optional.of(new MemberAssignment(convertAssignment(groupMember.targetAssignment()))) - )) - ); + )); + }); final ConsumerGroupDescription consumerGroupDescription = new ConsumerGroupDescription( diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/FenceProducersHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/FenceProducersHandler.java index 9a12bc1959609..23572dd4419ca 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/FenceProducersHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/FenceProducersHandler.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.clients.admin.internals; -import org.apache.kafka.clients.admin.FenceProducersOptions; import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.ClusterAuthorizationException; import org.apache.kafka.common.errors.TransactionalIdAuthorizationException; @@ -39,16 +38,12 @@ public class FenceProducersHandler extends AdminApiHandler.Unbatched { private final Logger log; private final AdminApiLookupStrategy lookupStrategy; - private final int txnTimeoutMs; public FenceProducersHandler( - FenceProducersOptions options, - LogContext logContext, - int requestTimeoutMs + LogContext logContext ) { this.log = logContext.logger(FenceProducersHandler.class); this.lookupStrategy = new CoordinatorStrategy(FindCoordinatorRequest.CoordinatorType.TRANSACTION, logContext); - this.txnTimeoutMs = options.timeoutMs() != null ? options.timeoutMs() : requestTimeoutMs; } public static AdminApiFuture.SimpleAdminApiFuture newFuture( @@ -87,8 +82,9 @@ InitProducerIdRequest.Builder buildSingleRequest(int brokerId, CoordinatorKey ke .setProducerEpoch(ProducerIdAndEpoch.NONE.epoch) .setProducerId(ProducerIdAndEpoch.NONE.producerId) .setTransactionalId(key.idValue) - // This timeout is used by the coordinator to append the record with the new producer epoch to the transaction log. - .setTransactionTimeoutMs(txnTimeoutMs); + // Set transaction timeout to 1 since it's only being initialized to fence out older producers with the same transactional ID, + // and shouldn't be used for any actual record writes + .setTransactionTimeoutMs(1); return new InitProducerIdRequest.Builder(data); } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategy.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategy.java index 2a2192a58a9a1..fe8e48e705dc0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategy.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategy.java @@ -121,9 +121,9 @@ private void failAllPartitionsForTopic( Map failed, Function exceptionGenerator ) { - partitions.stream().filter(tp -> tp.topic().equals(topic)).forEach(tp -> - failed.put(tp, exceptionGenerator.apply(tp)) - ); + partitions.stream().filter(tp -> tp.topic().equals(topic)).forEach(tp -> { + failed.put(tp, exceptionGenerator.apply(tp)); + }); } private void handlePartitionError( diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/AcknowledgeType.java b/clients/src/main/java/org/apache/kafka/clients/consumer/AcknowledgeType.java deleted file mode 100644 index 14b5415c2a407..0000000000000 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/AcknowledgeType.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.consumer; - -import org.apache.kafka.common.annotation.InterfaceStability; - -import java.util.Locale; - -@InterfaceStability.Evolving -public enum AcknowledgeType { - /** The record was consumed successfully. */ - ACCEPT((byte) 1), - - /** The record was not consumed successfully. Release it for another delivery attempt. */ - RELEASE((byte) 2), - - /** The record was not consumed successfully. Reject it and do not release it for another delivery attempt. */ - REJECT((byte) 3); - - public final byte id; - - AcknowledgeType(byte id) { - this.id = id; - } - - @Override - public String toString() { - return super.toString().toLowerCase(Locale.ROOT); - } - - - public static AcknowledgeType forId(byte id) { - switch (id) { - case 1: - return ACCEPT; - case 2: - return RELEASE; - case 3: - return REJECT; - default: - throw new IllegalArgumentException("Unknown acknowledge type id: " + id); - } - } -} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/AcknowledgementCommitCallback.java b/clients/src/main/java/org/apache/kafka/clients/consumer/AcknowledgementCommitCallback.java deleted file mode 100644 index f37fbe0575079..0000000000000 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/AcknowledgementCommitCallback.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.consumer; - -import org.apache.kafka.common.KafkaException; -import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.annotation.InterfaceStability; -import org.apache.kafka.common.errors.AuthorizationException; -import org.apache.kafka.common.errors.InterruptException; -import org.apache.kafka.common.errors.InvalidRecordStateException; -import org.apache.kafka.common.errors.WakeupException; - -import java.util.Map; -import java.util.Set; - -/** - * A callback interface that the user can implement to trigger custom actions when an acknowledgement completes. - * The callback may be executed in any thread calling {@link ShareConsumer#poll(java.time.Duration)}. - */ -@InterfaceStability.Evolving -public interface AcknowledgementCommitCallback { - - /** - * A callback method the user can implement to provide asynchronous handling of acknowledgement completion. - * This method will be called when the acknowledgement request sent to the server has been completed. - * - * @param offsets A map of the offsets that this callback applies to. - * - * @param exception The exception thrown during processing of the request, or null if the acknowledgement completed successfully. - *

    - *
  • {@link InvalidRecordStateException} if the record state is invalid - *
  • {@link AuthorizationException} if not authorized to the topic of group - *
  • {@link WakeupException} if {@link KafkaShareConsumer#wakeup()} is called before or while this function is called - *
  • {@link InterruptException} if the calling thread is interrupted before or while this function is called - *
  • {@link KafkaException} for any other unrecoverable errors - *
- */ - void onComplete(Map> offsets, Exception exception); -} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java index 76bfe7e91a149..7ec147e9e3ced 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java @@ -198,10 +198,7 @@ public class ConsumerConfig extends AbstractConfig { * fetch.max.wait.ms */ public static final String FETCH_MAX_WAIT_MS_CONFIG = "fetch.max.wait.ms"; - private static final String FETCH_MAX_WAIT_MS_DOC = "The maximum amount of time the server will block before " + - "answering the fetch request there isn't sufficient data to immediately satisfy the requirement given by " + - "fetch.min.bytes. This config is used only for local log fetch. To tune the remote fetch maximum wait " + - "time, please refer to 'remote.fetch.max.wait.ms' broker config"; + private static final String FETCH_MAX_WAIT_MS_DOC = "The maximum amount of time the server will block before answering the fetch request if there isn't sufficient data to immediately satisfy the requirement given by fetch.min.bytes."; public static final int DEFAULT_FETCH_MAX_WAIT_MS = 500; /** metadata.max.age.ms */ diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceListener.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceListener.java index 8884c0393d608..2f43b603fc8ff 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceListener.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceListener.java @@ -152,7 +152,7 @@ public interface ConsumerRebalanceListener { * {@link #onPartitionsRevoked(Collection)} callback before any instance executes its * {@link #onPartitionsAssigned(Collection)} callback. During exceptional scenarios, partitions may be migrated * without the old owner being notified (i.e. their {@link #onPartitionsRevoked(Collection)} callback not triggered), - * and later when the old owner consumer realized this event, the {@link #onPartitionsLost(Collection)} callback + * and later when the old owner consumer realized this event, the {@link #onPartitionsLost(Collection)} (Collection)} callback * will be triggered by the consumer then. *

* It is common for the assignment callback to use the consumer instance in order to query offsets. It is possible diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaShareConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaShareConsumer.java deleted file mode 100644 index 2fc721a84ff7f..0000000000000 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaShareConsumer.java +++ /dev/null @@ -1,700 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.consumer; - -import org.apache.kafka.clients.KafkaClient; -import org.apache.kafka.clients.consumer.internals.ConsumerMetadata; -import org.apache.kafka.clients.consumer.internals.ShareConsumerDelegate; -import org.apache.kafka.clients.consumer.internals.ShareConsumerDelegateCreator; -import org.apache.kafka.clients.consumer.internals.SubscriptionState; -import org.apache.kafka.common.KafkaException; -import org.apache.kafka.common.Metric; -import org.apache.kafka.common.MetricName; -import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.annotation.InterfaceStability; -import org.apache.kafka.common.errors.AuthenticationException; -import org.apache.kafka.common.errors.AuthorizationException; -import org.apache.kafka.common.errors.InterruptException; -import org.apache.kafka.common.errors.InvalidTopicException; -import org.apache.kafka.common.errors.WakeupException; -import org.apache.kafka.common.metrics.Metrics; -import org.apache.kafka.common.serialization.Deserializer; -import org.apache.kafka.common.utils.LogContext; -import org.apache.kafka.common.utils.Time; - -import java.time.Duration; -import java.util.Collection; -import java.util.Map; -import java.util.Optional; -import java.util.Properties; -import java.util.Set; - -import static org.apache.kafka.common.utils.Utils.propsToMap; - -/** - * A client that consumes records from a Kafka cluster using a share group. - *

- * This is an early access feature introduced by KIP-932. It is not suitable for production use until it is - * fully implemented and released. - * - *

Cross-Version Compatibility

- * This client can communicate with brokers that are version 4.0.0 or newer. You will receive an - * {@link org.apache.kafka.common.errors.UnsupportedVersionException} when invoking an API that is not - * available on the running broker version. - * - *

Share Groups and Topic Subscriptions

- * Kafka uses the concept of share groups to allow a pool of consumers to cooperate on the work of - * consuming and processing records. All consumer instances sharing the same {@code group.id} will be part of - * the same share group. - *

- * Each consumer in a group can dynamically set the list of topics it wants to subscribe to using the - * {@link #subscribe(Collection)} method. Kafka will deliver each message in the subscribed topics to one - * consumer in the share group. Unlike consumer groups, share groups balance the partitions between all - * members of the share group permitting multiple consumers to consume from the same partitions. This gives - * more flexible sharing of records than a consumer group, at the expense of record ordering. - *

- * Membership in a share group is maintained dynamically: if a consumer fails, the partitions assigned to - * it will be reassigned to other consumers in the same group. Similarly, if a new consumer joins the group, - * the partition assignment is re-evaluated and partitions can be moved from existing consumers to the new one. - * This is known as rebalancing the group and is discussed in more detail below. - * Group rebalancing is also used when new partitions are added to one of the subscribed topics. The group will - * automatically detect the new partitions through periodic metadata refreshes and assign them to the members of the group. - *

- * Conceptually, you can think of a share group as a single logical subscriber made up of multiple consumers. - * In fact, in other messaging systems, a share group is roughly equivalent to a durable shared subscription. - * You can have multiple share groups and consumer groups independently consuming from the same topics. - * - *

Detecting Consumer Failures

- * After subscribing to a set of topics, the consumer will automatically join the group when {@link #poll(Duration)} is - * invoked. This method is designed to ensure consumer liveness. As long as you continue to call poll, the consumer - * will stay in the group and continue to receive records from the partitions it was assigned. Under the covers, - * the consumer sends periodic heartbeats to the broker. If the consumer crashes or is unable to send heartbeats for - * the duration of the share group's session time-out, then the consumer will be considered dead and its partitions - * will be reassigned. - *

- * It is also possible that the consumer could encounter a "livelock" situation where it is continuing to send heartbeats - * in the background, but no progress is being made. To prevent the consumer from holding onto its partitions - * indefinitely in this case, we provide a liveness detection mechanism using the {@code max.poll.interval.ms} setting. - * If you don't call poll at least as frequently as this, the client will proactively leave the share group. - * So to stay in the group, you must continue to call poll. - * - *

Record Delivery and Acknowledgement

- * When a consumer in a share-group fetches records using {@link #poll(Duration)}, it receives available records from any - * of the topic-partitions that match its subscriptions. Records are acquired for delivery to this consumer with a - * time-limited acquisition lock. While a record is acquired, it is not available for another consumer. By default, - * the lock duration is 30 seconds, but it can also be controlled using the group {@code group.share.record.lock.duration.ms} - * configuration parameter. The idea is that the lock is automatically released once the lock duration has elapsed, and - * then the record is available to be given to another consumer. The consumer which holds the lock can deal with it in - * the following ways: - *
    - *
  • The consumer can acknowledge successful processing of the record
  • - *
  • The consumer can release the record, which makes the record available for another delivery attempt
  • - *
  • The consumer can reject the record, which indicates that the record is unprocessable and does not make - * the record available for another delivery attempt
  • - *
  • The consumer can do nothing, in which case the lock is automatically released when the lock duration has elapsed
  • - *
- * The cluster limits the number of records acquired for consumers for each topic-partition in a share group. Once the limit - * is reached, fetching records will temporarily yield no further records until the number of acquired records reduces, - * as naturally happens when the locks time out. This limit is controlled by the broker configuration property - * {@code group.share.record.lock.partition.limit}. By limiting the duration of the acquisition lock and automatically - * releasing the locks, the broker ensures delivery progresses even in the presence of consumer failures. - *

- * The consumer can choose to use implicit or explicit acknowledgement of the records it processes. - *

If the application calls {@link #acknowledge(ConsumerRecord, AcknowledgeType)} for any record in the batch, - * it is using explicit acknowledgement. In this case: - *

    - *
  • The application calls {@link #commitSync()} or {@link #commitAsync()} which commits the acknowledgements to Kafka. - * If any records in the batch were not acknowledged, they remain acquired and will be presented to the application - * in response to a future poll.
  • - *
  • The application calls {@link #poll(Duration)} without committing first, which commits the acknowledgements to - * Kafka asynchronously. In this case, no exception is thrown by a failure to commit the acknowledgement. - * If any records in the batch were not acknowledged, they remain acquired and will be presented to the application - * in response to a future poll.
  • - *
  • The application calls {@link #close()} which attempts to commit any pending acknowledgements and - * releases any remaining acquired records.
  • - *
- * If the application does not call {@link #acknowledge(ConsumerRecord, AcknowledgeType)} for any record in the batch, - * it is using implicit acknowledgement. In this case: - *
    - *
  • The application calls {@link #commitSync()} or {@link #commitAsync()} which implicitly acknowledges all of - * the delivered records as processed successfully and commits the acknowledgements to Kafka.
  • - *
  • The application calls {@link #poll(Duration)} without committing, which also implicitly acknowledges all of - * the delivered records and commits the acknowledgements to Kafka asynchronously. In this case, no exception is - * thrown by a failure to commit the acknowledgements.
  • - *
  • The application calls {@link #close()} which releases any acquired records without acknowledgement.
  • - *
- *

- * The consumer guarantees that the records returned in the {@code ConsumerRecords} object for a specific topic-partition - * are in order of increasing offset. For each topic-partition, Kafka guarantees that acknowledgements for the records - * in a batch are performed atomically. This makes error handling significantly more straightforward because there can be - * one error code per partition. - * - *

Usage Examples

- * The share consumer APIs offer flexibility to cover a variety of consumption use cases. Here are some examples to - * demonstrate how to use them. - * - *

Acknowledging a batch of records (implicit acknowledgement)

- * This example demonstrates implicit acknowledgement using {@link #poll(Duration)} to acknowledge the records which - * were delivered in the previous poll. All the records delivered are implicitly marked as successfully consumed and - * acknowledged synchronously with Kafka as the consumer fetches more records. - *
- *     Properties props = new Properties();
- *     props.setProperty("bootstrap.servers", "localhost:9092");
- *     props.setProperty("group.id", "test");
- *     props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
- *     props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
- *     KafkaShareConsumer<String, String> consumer = new KafkaShareConsumer<>(props);
- *     consumer.subscribe(Arrays.asList("foo"));
- *     while (true) {
- *         ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
- *         for (ConsumerRecord<String, String> record : records) {
- *             System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
- *             doProcessing(record);
- *         }
- *     }
- * 
- * - * Alternatively, you can use {@link #commitSync()} or {@link #commitAsync()} to commit the acknowledgements, but this is - * slightly less efficient because there is an additional request sent to Kafka. - *
- *     Properties props = new Properties();
- *     props.setProperty("bootstrap.servers", "localhost:9092");
- *     props.setProperty("group.id", "test");
- *     props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
- *     props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
- *     KafkaShareConsumer<String, String> consumer = new KafkaShareConsumer<>(props);
- *     consumer.subscribe(Arrays.asList("foo"));
- *     while (true) {
- *         ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
- *         for (ConsumerRecord<String, String> record : records) {
- *             System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
- *             doProcessing(record);
- *         }
- *         consumer.commitSync();
- *     }
- * 
- * - *

Per-record acknowledgement (explicit acknowledgement)

- * This example demonstrates using different acknowledgement types depending on the outcome of processing the records. - *
- *     Properties props = new Properties();
- *     props.setProperty("bootstrap.servers", "localhost:9092");
- *     props.setProperty("group.id", "test");
- *     props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
- *     props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
- *     KafkaShareConsumer<String, String> consumer = new KafkaShareConsumer<>(props);
- *     consumer.subscribe(Arrays.asList("foo"));
- *     while (true) {
- *         ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
- *         for (ConsumerRecord<String, String> record : records) {
- *             try {
- *                 doProcessing(record);
- *                 consumer.acknowledge(record, AcknowledgeType.ACCEPT);
- *             } catch (Exception e) {
- *                 consumer.acknowledge(record, AcknowledgeType.REJECT);
- *             }
- *         }
- *         consumer.commitSync();
- *     }
- * 
- * - * Each record processed is separately acknowledged using a call to {@link #acknowledge(ConsumerRecord, AcknowledgeType)}. - * The {@link AcknowledgeType} argument indicates whether the record was processed successfully or not. In this case, - * the bad records are rejected meaning that they’re not eligible for further delivery attempts. For a permanent error - * such as a semantic error, this is appropriate. For a transient error which might not affect a subsequent processing - * attempt, {@link AcknowledgeType#RELEASE} is more appropriate because the record remains eligible for further delivery attempts. - *

- * The calls to {@link #acknowledge(ConsumerRecord, AcknowledgeType)} are simply updating local information in the consumer. - * It is only once {@link #commitSync()} is called that the acknowledgements are committed by sending the new state - * information to Kafka. - * - *

Per-record acknowledgement, ending processing of the batch on an error (explicit acknowledgement)

- * This example demonstrates ending processing of a batch of records on the first error. - *
- *     Properties props = new Properties();
- *     props.setProperty("bootstrap.servers", "localhost:9092");
- *     props.setProperty("group.id", "test");
- *     props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
- *     props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
- *     KafkaShareConsumer<String, String> consumer = new KafkaShareConsumer<>(props);
- *     consumer.subscribe(Arrays.asList("foo"));
- *     while (true) {
- *         ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
- *         for (ConsumerRecord<String, String> record : records) {
- *             try {
- *                 doProcessing(record);
- *                 consumer.acknowledge(record, AcknowledgeType.ACCEPT);
- *             } catch (Exception e) {
- *                 consumer.acknowledge(record, AcknowledgeType.REJECT);
- *                 break;
- *             }
- *         }
- *         consumer.commitSync();
- *     }
- * 
- * There are the following cases in this example: - *
    - *
  1. The batch contains no records, in which case the application just polls again. The call to {@link #commitSync()} - * just does nothing because the batch was empty.
  2. - *
  3. All of the records in the batch are processed successfully. The calls to {@link #acknowledge(ConsumerRecord, AcknowledgeType)} - * specifying {@code AcknowledgeType.ACCEPT} mark all records in the batch as successfully processed.
  4. - *
  5. One of the records encounters an exception. The call to {@link #acknowledge(ConsumerRecord, AcknowledgeType)} specifying - * {@code AcknowledgeType.REJECT} rejects that record. Earlier records in the batch have already been marked as successfully - * processed. The call to {@link #commitSync()} commits the acknowledgements, but the records after the failed record - * remain acquired as part of the same delivery attempt and will be presented to the application in response to another poll.
  6. - *
- * - *

Reading Transactional Records

- * The way that share groups handle transactional records is controlled by the {@code group.share.isolation.level} - * configuration property. In a share group, the isolation level applies to the entire share group, not just individual - * consumers. - *

- * In read_uncommitted isolation level, the share group consumes all non-transactional and transactional - * records. The consumption is bounded by the high-water mark. - *

- * In read_committed isolation level (not yet supported), the share group only consumes non-transactional - * records and committed transactional records. The set of records which are eligible to become in-flight records are - * non-transactional records and committed transactional records only. The consumption is bounded by the last stable - * offset, so an open transaction blocks the progress of the share group with read_committed isolation level. - * - *

Multithreaded Processing

- * The consumer is NOT thread-safe. It is the responsibility of the user to ensure that multithreaded access - * is properly synchronized. Unsynchronized access will result in {@link java.util.ConcurrentModificationException}. - *

- * The only exception to this rule is {@link #wakeup()} which can safely be used from an external thread to - * interrupt an active operation. In this case, a {@link org.apache.kafka.common.errors.WakeupException} will be - * thrown from the thread blocking on the operation. This can be used to shut down the consumer from another thread. - * The following snippet shows the typical pattern: - * - *

- * public class KafkaShareConsumerRunner implements Runnable {
- *     private final AtomicBoolean closed = new AtomicBoolean(false);
- *     private final KafkaShareConsumer consumer;
- *
- *     public KafkaShareConsumerRunner(KafkaShareConsumer consumer) {
- *       this.consumer = consumer;
- *     }
- *
- *     {@literal}@Override
- *     public void run() {
- *         try {
- *             consumer.subscribe(Arrays.asList("topic"));
- *             while (!closed.get()) {
- *                 ConsumerRecords records = consumer.poll(Duration.ofMillis(10000));
- *                 // Handle new records
- *             }
- *         } catch (WakeupException e) {
- *             // Ignore exception if closing
- *             if (!closed.get()) throw e;
- *         } finally {
- *             consumer.close();
- *         }
- *     }
- *
- *     // Shutdown hook which can be called from a separate thread
- *     public void shutdown() {
- *         closed.set(true);
- *         consumer.wakeup();
- *     }
- * }
- * 
- * - * Then in a separate thread, the consumer can be shutdown by setting the closed flag and waking up the consumer. - *
- *     closed.set(true);
- *     consumer.wakeup();
- * 
- * - *

- * Note that while it is possible to use thread interrupts instead of {@link #wakeup()} to abort a blocking operation - * (in which case, {@link InterruptException} will be raised), we discourage their use since they may cause a clean - * shutdown of the consumer to be aborted. Interrupts are mainly supported for those cases where using {@link #wakeup()} - * is impossible, such as when a consumer thread is managed by code that is unaware of the Kafka client. - *

- * We have intentionally avoided implementing a particular threading model for processing. Various options for - * multithreaded processing are possible, of which the most straightforward is to dedicate a thread to each consumer. - */ -@InterfaceStability.Evolving -public class KafkaShareConsumer implements ShareConsumer { - - private final static ShareConsumerDelegateCreator CREATOR = new ShareConsumerDelegateCreator(); - - private final ShareConsumerDelegate delegate; - - /** - * A consumer is instantiated by providing a set of key-value pairs as configuration. Valid configuration strings - * are documented here. Values can be - * either strings or objects of the appropriate type (for example a numeric configuration would accept either the - * string "42" or the integer 42). - *

- * Valid configuration strings are documented at {@link ConsumerConfig}. - *

- * Note: after creating a {@code KafkaShareConsumer} you must always {@link #close()} it to avoid resource leaks. - * - * @param configs The consumer configs - */ - public KafkaShareConsumer(Map configs) { - this(configs, null, null); - } - - /** - * A consumer is instantiated by providing a {@link java.util.Properties} object as configuration. - *

- * Valid configuration strings are documented at {@link ConsumerConfig}. - *

- * Note: after creating a {@code KafkaShareConsumer} you must always {@link #close()} it to avoid resource leaks. - * - * @param properties The consumer configuration properties - */ - public KafkaShareConsumer(Properties properties) { - this(properties, null, null); - } - - /** - * A consumer is instantiated by providing a {@link java.util.Properties} object as configuration, and a - * key and a value {@link Deserializer}. - *

- * Valid configuration strings are documented at {@link ConsumerConfig}. - *

- * Note: after creating a {@code KafkaShareConsumer} you must always {@link #close()} it to avoid resource leaks. - * - * @param properties The consumer configuration properties - * @param keyDeserializer The deserializer for key that implements {@link Deserializer}. The configure() method - * won't be called in the consumer when the deserializer is passed in directly. - * @param valueDeserializer The deserializer for value that implements {@link Deserializer}. The configure() method - * won't be called in the consumer when the deserializer is passed in directly. - */ - public KafkaShareConsumer(Properties properties, - Deserializer keyDeserializer, - Deserializer valueDeserializer) { - this(propsToMap(properties), keyDeserializer, valueDeserializer); - } - - /** - * A consumer is instantiated by providing a set of key-value pairs as configuration, and a key and a value {@link Deserializer}. - *

- * Valid configuration strings are documented at {@link ConsumerConfig}. - *

- * Note: after creating a {@code KafkaShareConsumer} you must always {@link #close()} it to avoid resource leaks. - * - * @param configs The consumer configs - * @param keyDeserializer The deserializer for key that implements {@link Deserializer}. The configure() method - * won't be called in the consumer when the deserializer is passed in directly. - * @param valueDeserializer The deserializer for value that implements {@link Deserializer}. The configure() method - * won't be called in the consumer when the deserializer is passed in directly. - */ - public KafkaShareConsumer(Map configs, - Deserializer keyDeserializer, - Deserializer valueDeserializer) { - this(new ConsumerConfig(ConsumerConfig.appendDeserializerToConfig(configs, keyDeserializer, valueDeserializer)), - keyDeserializer, valueDeserializer); - } - - public KafkaShareConsumer(ConsumerConfig config, - Deserializer keyDeserializer, - Deserializer valueDeserializer) { - delegate = CREATOR.create(config, keyDeserializer, valueDeserializer); - } - - KafkaShareConsumer(final LogContext logContext, - final String clientId, - final String groupId, - final ConsumerConfig config, - final Deserializer keyDeserializer, - final Deserializer valueDeserializer, - final Time time, - final KafkaClient client, - final SubscriptionState subscriptions, - final ConsumerMetadata metadata) { - delegate = CREATOR.create( - logContext, clientId, groupId, config, keyDeserializer, valueDeserializer, - time, client, subscriptions, metadata); - } - - /** - * Get the current subscription. Will return the same topics used in the most recent call to - * {@link #subscribe(Collection)}, or an empty set if no such call has been made. - * - * @return The set of topics currently subscribed to - */ - @Override - public Set subscription() { - return delegate.subscription(); - } - - /** - * Subscribe to the given list of topics to get dynamically assigned partitions. - * Topic subscriptions are not incremental. This list will replace the current - * assignment, if there is one. If the given list of topics is empty, it is treated the same as {@link #unsubscribe()}. - * - *

- * As part of group management, the coordinator will keep track of the list of consumers that belong to a particular - * group and will trigger a rebalance operation if any one of the following events are triggered: - *

    - *
  • A member joins or leaves the share group - *
  • An existing member of the share group is shut down or fails - *
  • The number of partitions changes for any of the subscribed topics - *
  • A subscribed topic is created or deleted - *
- * - * @param topics The list of topics to subscribe to - * - * @throws IllegalArgumentException if topics is null or contains null or empty elements - * @throws KafkaException for any other unrecoverable errors - */ - @Override - public void subscribe(Collection topics) { - delegate.subscribe(topics); - } - - /** - * Unsubscribe from topics currently subscribed with {@link #subscribe(Collection)}. - * - * @throws KafkaException for any other unrecoverable errors - */ - @Override - public void unsubscribe() { - delegate.unsubscribe(); - } - - /** - * Fetch data for the topics specified using {@link #subscribe(Collection)}. It is an error to not have - * subscribed to any topics before polling for data. - * - *

- * This method returns immediately if there are records available. Otherwise, it will await the passed timeout. - * If the timeout expires, an empty record set will be returned. - * - * @param timeout The maximum time to block (must not be greater than {@link Long#MAX_VALUE} milliseconds) - * - * @return map of topic to records since the last fetch for the subscribed list of topics - * - * @throws AuthenticationException if authentication fails. See the exception for more details - * @throws AuthorizationException if caller lacks Read access to any of the subscribed - * topics or to the share group. See the exception for more details - * @throws IllegalArgumentException if the timeout value is negative - * @throws IllegalStateException if the consumer is not subscribed to any topics - * @throws ArithmeticException if the timeout is greater than {@link Long#MAX_VALUE} milliseconds. - * @throws InvalidTopicException if the current subscription contains any invalid - * topic (per {@link org.apache.kafka.common.internals.Topic#validate(String)}) - * @throws WakeupException if {@link #wakeup()} is called before or while this method is called - * @throws InterruptException if the calling thread is interrupted before or while this method is called - * @throws KafkaException for any other unrecoverable errors - */ - @Override - public ConsumerRecords poll(Duration timeout) { - return delegate.poll(timeout); - } - - /** - * Acknowledge successful delivery of a record returned on the last {@link #poll(Duration)} call. - * The acknowledgement is committed on the next {@link #commitSync()}, {@link #commitAsync()} or - * {@link #poll(Duration)} call. - * - * @param record The record to acknowledge - * - * @throws IllegalStateException if the record is not waiting to be acknowledged, or the consumer has already - * used implicit acknowledgement - */ - @Override - public void acknowledge(ConsumerRecord record) { - delegate.acknowledge(record); - } - - /** - * Acknowledge delivery of a record returned on the last {@link #poll(Duration)} call indicating whether - * it was processed successfully. The acknowledgement is committed on the next {@link #commitSync()}, - * {@link #commitAsync()} or {@link #poll(Duration)} call. By using this method, the consumer is using - * explicit acknowledgement. - * - * @param record The record to acknowledge - * @param type The acknowledgement type which indicates whether it was processed successfully - * - * @throws IllegalStateException if the record is not waiting to be acknowledged, or the consumer has already - * used implicit acknowledgement - */ - @Override - public void acknowledge(ConsumerRecord record, AcknowledgeType type) { - delegate.acknowledge(record, type); - } - - /** - * Commit the acknowledgements for the records returned. If the consumer is using explicit acknowledgement, - * the acknowledgements to commit have been indicated using {@link #acknowledge(ConsumerRecord)} or - * {@link #acknowledge(ConsumerRecord, AcknowledgeType)}. If the consumer is using implicit acknowledgement, - * all the records returned by the latest call to {@link #poll(Duration)} are acknowledged. - * - *

- * This is a synchronous commit and will block until either the commit succeeds, an unrecoverable error is - * encountered (in which case it is thrown to the caller), or the timeout specified by {@code default.api.timeout.ms} - * expires. - * - * @return A map of the results for each topic-partition for which delivery was acknowledged. - * If the acknowledgement failed for a topic-partition, an exception is present. - * - * @throws WakeupException if {@link #wakeup()} is called before or while this method is called - * @throws InterruptException if the thread is interrupted while blocked - * @throws KafkaException for any other unrecoverable errors - */ - @Override - public Map> commitSync() { - return delegate.commitSync(); - } - - /** - * Commit the acknowledgements for the records returned. If the consumer is using explicit acknowledgement, - * the acknowledgements to commit have been indicated using {@link #acknowledge(ConsumerRecord)} or - * {@link #acknowledge(ConsumerRecord, AcknowledgeType)}. If the consumer is using implicit acknowledgement, - * all the records returned by the latest call to {@link #poll(Duration)} are acknowledged. - - *

- * This is a synchronous commit and will block until either the commit succeeds, an unrecoverable error is - * encountered (in which case it is thrown to the caller), or the timeout expires. - * - * @param timeout The maximum amount of time to await completion of the acknowledgement - * - * @return A map of the results for each topic-partition for which delivery was acknowledged. - * If the acknowledgement failed for a topic-partition, an exception is present. - * - * @throws IllegalArgumentException if the {@code timeout} is negative - * @throws WakeupException if {@link #wakeup()} is called before or while this method is called - * @throws InterruptException if the thread is interrupted while blocked - * @throws KafkaException for any other unrecoverable errors - */ - @Override - public Map> commitSync(Duration timeout) { - return delegate.commitSync(timeout); - } - - /** - * Commit the acknowledgements for the records returned. If the consumer is using explicit acknowledgement, - * the acknowledgements to commit have been indicated using {@link #acknowledge(ConsumerRecord)} or - * {@link #acknowledge(ConsumerRecord, AcknowledgeType)}. If the consumer is using implicit acknowledgement, - * all the records returned by the latest call to {@link #poll(Duration)} are acknowledged. - * - * @throws KafkaException for any other unrecoverable errors - */ - @Override - public void commitAsync() { - delegate.commitAsync(); - } - - /** - * Sets the acknowledgement commit callback which can be used to handle acknowledgement completion. - * - * @param callback The acknowledgement commit callback - */ - @Override - public void setAcknowledgementCommitCallback(AcknowledgementCommitCallback callback) { - delegate.setAcknowledgementCommitCallback(callback); - } - - /** - * Determines the client's unique client instance ID used for telemetry. This ID is unique to - * this specific client instance and will not change after it is initially generated. - * The ID is useful for correlating client operations with telemetry sent to the broker and - * to its eventual monitoring destinations. - *

- * If telemetry is enabled, this will first require a connection to the cluster to generate - * the unique client instance ID. This method waits up to {@code timeout} for the consumer - * client to complete the request. - *

- * Client telemetry is controlled by the {@link ConsumerConfig#ENABLE_METRICS_PUSH_CONFIG} - * configuration option. - * - * @param timeout The maximum time to wait for consumer client to determine its client instance ID. - * The value must be non-negative. Specifying a timeout of zero means do not - * wait for the initial request to complete if it hasn't already. - * - * @return The client's assigned instance id used for metrics collection. - * - * @throws IllegalArgumentException if the {@code timeout} is negative - * @throws IllegalStateException if telemetry is not enabled - * @throws WakeupException if {@link #wakeup()} is called before or while this method is called - * @throws InterruptException if the thread is interrupted while blocked - * @throws KafkaException if an unexpected error occurs while trying to determine the client - * instance ID, though this error does not necessarily imply the - * consumer client is otherwise unusable - */ - @Override - public Uuid clientInstanceId(Duration timeout) { - return delegate.clientInstanceId(timeout); - } - - /** - * Get the metrics kept by the consumer - */ - @Override - public Map metrics() { - return delegate.metrics(); - } - - /** - * Close the consumer, waiting for up to the default timeout of 30 seconds for any needed cleanup. - * This will commit acknowledgements if possible within the default timeout. - * See {@link #close(Duration)} for details. Note that {@link #wakeup()} cannot be used to interrupt close. - * - * @throws WakeupException if {@link #wakeup()} is called before or while this method is called - * @throws InterruptException if the thread is interrupted before or while this method is called - * @throws KafkaException for any other error during close - */ - @Override - public void close() { - delegate.close(); - } - - /** - * Tries to close the consumer cleanly within the specified timeout. This method waits up to - * {@code timeout} for the consumer to complete acknowledgements and leave the group. - * If the consumer is unable to complete acknowledgements and gracefully leave the group - * before the timeout expires, the consumer is force closed. Note that {@link #wakeup()} cannot be - * used to interrupt close. - * - * @param timeout The maximum time to wait for consumer to close gracefully. The value must be - * non-negative. Specifying a timeout of zero means do not wait for pending requests to complete. - * - * @throws IllegalArgumentException if the {@code timeout} is negative - * @throws WakeupException if {@link #wakeup()} is called before or while this method is called - * @throws InterruptException if the thread is interrupted before or while this method is called - * @throws KafkaException for any other error during close - */ - @Override - public void close(Duration timeout) { - delegate.close(timeout); - } - - /** - * Wake up the consumer. This method is thread-safe and is useful in particular to abort a long poll. - * The thread which is blocking in an operation will throw {@link WakeupException}. - * If no thread is blocking in a method which can throw {@link WakeupException}, - * the next call to such a method will raise it instead. - */ - @Override - public void wakeup() { - delegate.wakeup(); - } - - // Functions below are for testing only - String clientId() { - return delegate.clientId(); - } - - Metrics metricsRegistry() { - return delegate.metricsRegistry(); - } -} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java index 600f8bbd07ef4..27faa80c65421 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java @@ -253,7 +253,7 @@ public synchronized ConsumerRecords poll(final Duration timeout) { } } - toClear.forEach(records::remove); + toClear.forEach(p -> this.records.remove(p)); return new ConsumerRecords<>(results); } @@ -263,7 +263,7 @@ public synchronized void addRecord(ConsumerRecord record) { Set currentAssigned = this.subscriptions.assignedPartitions(); if (!currentAssigned.contains(tp)) throw new IllegalStateException("Cannot add records for a partition that is not assigned to the consumer"); - List> recs = records.computeIfAbsent(tp, k -> new ArrayList<>()); + List> recs = this.records.computeIfAbsent(tp, k -> new ArrayList<>()); recs.add(record); } @@ -286,7 +286,8 @@ public synchronized void setOffsetsException(KafkaException exception) { @Override public synchronized void commitAsync(Map offsets, OffsetCommitCallback callback) { ensureNotClosed(); - committed.putAll(offsets); + for (Map.Entry entry : offsets.entrySet()) + committed.put(entry.getKey(), entry.getValue()); if (callback != null) { callback.onComplete(offsets, null); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/MockShareConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/MockShareConsumer.java deleted file mode 100644 index 212e2ae390d5f..0000000000000 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/MockShareConsumer.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.consumer; - -import org.apache.kafka.clients.consumer.internals.SubscriptionState; -import org.apache.kafka.common.KafkaException; -import org.apache.kafka.common.Metric; -import org.apache.kafka.common.MetricName; -import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.utils.LogContext; - -import java.time.Duration; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.DEFAULT_CLOSE_TIMEOUT_MS; - -/** - * A mock of the {@link ShareConsumer} interface you can use for testing code that uses Kafka. This class is not - * thread-safe . - */ -public class MockShareConsumer implements ShareConsumer { - - private final SubscriptionState subscriptions; - private final AtomicBoolean wakeup; - - private final Map>> records; - - private boolean closed; - private Uuid clientInstanceId; - - public MockShareConsumer() { - this.subscriptions = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); - this.records = new HashMap<>(); - this.closed = false; - this.wakeup = new AtomicBoolean(false); - } - - @Override - public synchronized Set subscription() { - ensureNotClosed(); - return subscriptions.subscription(); - } - - @Override - public synchronized void subscribe(Collection topics) { - ensureNotClosed(); - subscriptions.subscribe(new HashSet<>(topics), Optional.empty()); - } - - @Override - public synchronized void unsubscribe() { - ensureNotClosed(); - subscriptions.unsubscribe(); - } - - @Override - public synchronized ConsumerRecords poll(Duration timeout) { - ensureNotClosed(); - - final Map>> results = new HashMap<>(); - for (Map.Entry>> entry : records.entrySet()) { - final List> recs = entry.getValue(); - for (final ConsumerRecord rec : recs) { - results.computeIfAbsent(entry.getKey(), partition -> new ArrayList<>()).add(rec); - } - } - - records.clear(); - return new ConsumerRecords<>(results); - } - - @Override - public synchronized void acknowledge(ConsumerRecord record) { - } - - @Override - public synchronized void acknowledge(ConsumerRecord record, AcknowledgeType type) { - } - - @Override - public synchronized Map> commitSync() { - return new HashMap<>(); - } - - @Override - public synchronized Map> commitSync(Duration timeout) { - return new HashMap<>(); - } - - @Override - public synchronized void commitAsync() { - } - - @Override - public void setAcknowledgementCommitCallback(AcknowledgementCommitCallback callback) { - } - - public synchronized void setClientInstanceId(final Uuid clientInstanceId) { - this.clientInstanceId = clientInstanceId; - } - - @Override - public synchronized Uuid clientInstanceId(Duration timeout) { - if (clientInstanceId == null) { - throw new UnsupportedOperationException("clientInstanceId not set"); - } - - return clientInstanceId; - } - - @Override - public synchronized Map metrics() { - ensureNotClosed(); - return Collections.emptyMap(); - } - - @Override - public synchronized void close() { - close(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS)); - } - - @Override - public synchronized void close(Duration timeout) { - closed = true; - } - - @Override - public synchronized void wakeup() { - wakeup.set(true); - } - - public synchronized void addRecord(ConsumerRecord record) { - ensureNotClosed(); - TopicPartition tp = new TopicPartition(record.topic(), record.partition()); - if (!subscriptions.subscription().contains(record.topic())) - throw new IllegalStateException("Cannot add records for a topics that is not subscribed by the consumer"); - List> recs = records.computeIfAbsent(tp, k -> new ArrayList<>()); - recs.add(record); - } - - private void ensureNotClosed() { - if (closed) - throw new IllegalStateException("This consumer has already been closed."); - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/RangeAssignor.java b/clients/src/main/java/org/apache/kafka/clients/consumer/RangeAssignor.java index 6bc064006fe6f..e95d4f1efd7b0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/RangeAssignor.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/RangeAssignor.java @@ -170,7 +170,7 @@ private void assignRanges(TopicAssignmentState assignmentState, private void assignWithRackMatching(Collection assignmentStates, Map> assignment) { - assignmentStates.stream().collect(Collectors.groupingBy(t -> t.consumers)).forEach((consumers, states) -> + assignmentStates.stream().collect(Collectors.groupingBy(t -> t.consumers)).forEach((consumers, states) -> { states.stream().collect(Collectors.groupingBy(t -> t.partitionRacks.size())).forEach((numPartitions, coPartitionedStates) -> { if (coPartitionedStates.size() > 1) assignCoPartitionedWithRackMatching(consumers, numPartitions, coPartitionedStates, assignment); @@ -179,8 +179,8 @@ private void assignWithRackMatching(Collection assignmentS if (state.needsRackAwareAssignment) assignRanges(state, state::racksMatch, assignment); } - }) - ); + }); + }); } private void assignCoPartitionedWithRackMatching(LinkedHashMap> consumers, diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ShareConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ShareConsumer.java deleted file mode 100644 index 8ac4198c70df3..0000000000000 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ShareConsumer.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.consumer; - -import org.apache.kafka.common.KafkaException; -import org.apache.kafka.common.Metric; -import org.apache.kafka.common.MetricName; -import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.annotation.InterfaceStability; - -import java.io.Closeable; -import java.time.Duration; -import java.util.Collection; -import java.util.Map; -import java.util.Optional; -import java.util.Set; - -/** - * @see KafkaShareConsumer - * @see MockShareConsumer - */ -@InterfaceStability.Evolving -public interface ShareConsumer extends Closeable { - - /** - * @see KafkaShareConsumer#subscription() - */ - Set subscription(); - - /** - * @see KafkaShareConsumer#subscribe(Collection) - */ - void subscribe(Collection topics); - - /** - * @see KafkaShareConsumer#unsubscribe() - */ - void unsubscribe(); - - /** - * @see KafkaShareConsumer#poll(Duration) - */ - ConsumerRecords poll(Duration timeout); - - /** - * @see KafkaShareConsumer#acknowledge(ConsumerRecord) - */ - void acknowledge(ConsumerRecord record); - - /** - * @see KafkaShareConsumer#acknowledge(ConsumerRecord, AcknowledgeType) - */ - void acknowledge(ConsumerRecord record, AcknowledgeType type); - - /** - * @see KafkaShareConsumer#commitSync() - */ - Map> commitSync(); - - /** - * @see KafkaShareConsumer#commitSync(Duration) - */ - Map> commitSync(Duration timeout); - - /** - * @see KafkaShareConsumer#commitAsync() - */ - void commitAsync(); - - /** - * @see KafkaShareConsumer#setAcknowledgementCommitCallback(AcknowledgementCommitCallback) - */ - void setAcknowledgementCommitCallback(AcknowledgementCommitCallback callback); - - /** - * See {@link KafkaShareConsumer#clientInstanceId(Duration)}} - */ - Uuid clientInstanceId(Duration timeout); - - /** - * @see KafkaShareConsumer#metrics() - */ - Map metrics(); - - /** - * @see KafkaShareConsumer#close() - */ - void close(); - - /** - * @see KafkaShareConsumer#close(Duration) - */ - void close(Duration timeout); - - /** - * @see KafkaShareConsumer#wakeup() - */ - void wakeup(); - -} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/StickyAssignor.java b/clients/src/main/java/org/apache/kafka/clients/consumer/StickyAssignor.java index 0d3e4a256e2be..7e7350a5946e8 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/StickyAssignor.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/StickyAssignor.java @@ -236,7 +236,8 @@ static ByteBuffer serializeTopicPartitionAssignment(MemberData memberData) { topicAssignments.add(topicAssignment); } struct.set(TOPIC_PARTITIONS_KEY_NAME, topicAssignments.toArray()); - memberData.generation.ifPresent(integer -> struct.set(GENERATION_KEY_NAME, integer)); + if (memberData.generation.isPresent()) + struct.set(GENERATION_KEY_NAME, memberData.generation.get()); ByteBuffer buffer = ByteBuffer.allocate(STICKY_ASSIGNOR_USER_DATA_V1.sizeOf(struct)); STICKY_ASSIGNOR_USER_DATA_V1.write(buffer, struct); buffer.flip(); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java index e6de169f7b33f..6930cd02955d6 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java @@ -26,6 +26,7 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; import org.apache.kafka.clients.consumer.ConsumerInterceptor; +import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.GroupProtocol; @@ -234,12 +235,12 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { private final SubscriptionState subscriptions; private final ConsumerMetadata metadata; - private int metadataVersionSnapshot; private final Metrics metrics; private final long retryBackoffMs; private final int defaultApiTimeoutMs; private final boolean autoCommitEnabled; private volatile boolean closed = false; + private final List assignors; private final Optional clientTelemetryReporter; // to keep from repeatedly scanning subscriptions in poll(), cache the result during metadata updates @@ -312,7 +313,6 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { this.metadata = metadataFactory.build(config, subscriptions, logContext, clusterResourceListeners); final List addresses = ClientUtils.parseAndValidateAddresses(config); metadata.bootstrap(addresses); - this.metadataVersionSnapshot = metadata.updateVersion(); FetchMetricsManager fetchMetricsManager = createFetchMetricsManager(metrics); FetchConfig fetchConfig = new FetchConfig(config); @@ -331,8 +331,7 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { apiVersions, metrics, fetchMetricsManager, - clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null), - backgroundEventHandler); + clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null)); this.offsetCommitCallbackInvoker = new OffsetCommitCallbackInvoker(interceptors); this.asyncCommitFenced = new AtomicBoolean(false); this.groupMetadata.set(initializeGroupMetadata(config, groupRebalanceConfig)); @@ -374,6 +373,10 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { rebalanceListenerInvoker ); this.backgroundEventReaper = backgroundEventReaperFactory.build(logContext); + this.assignors = ConsumerPartitionAssignor.getAssignorInstances( + config.getList(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG), + config.originals(Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId)) + ); // The FetchCollector is only used on the application thread. this.fetchCollector = fetchCollectorFactory.build(logContext, @@ -421,6 +424,7 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { ConsumerMetadata metadata, long retryBackoffMs, int defaultApiTimeoutMs, + List assignors, String groupId, boolean autoCommitEnabled) { this.log = logContext.logger(getClass()); @@ -437,11 +441,11 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { this.metrics = metrics; this.groupMetadata.set(initializeGroupMetadata(groupId, Optional.empty())); this.metadata = metadata; - this.metadataVersionSnapshot = metadata.updateVersion(); this.retryBackoffMs = retryBackoffMs; this.defaultApiTimeoutMs = defaultApiTimeoutMs; this.deserializers = deserializers; this.applicationEventHandler = applicationEventHandler; + this.assignors = assignors; this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics, "consumer"); this.clientTelemetryReporter = Optional.empty(); this.autoCommitEnabled = autoCommitEnabled; @@ -456,7 +460,8 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { Deserializer valueDeserializer, KafkaClient client, SubscriptionState subscriptions, - ConsumerMetadata metadata) { + ConsumerMetadata metadata, + List assignors) { this.log = logContext.logger(getClass()); this.subscriptions = subscriptions; this.clientId = config.getString(ConsumerConfig.CLIENT_ID_CONFIG); @@ -467,10 +472,10 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { this.time = time; this.metrics = new Metrics(time); this.metadata = metadata; - this.metadataVersionSnapshot = metadata.updateVersion(); this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); this.defaultApiTimeoutMs = config.getInt(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG); this.deserializers = new Deserializers<>(keyDeserializer, valueDeserializer); + this.assignors = assignors; this.clientTelemetryReporter = Optional.empty(); ConsumerMetrics metricsRegistry = new ConsumerMetrics(CONSUMER_METRIC_GROUP_PREFIX); @@ -505,9 +510,7 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { time, config, logContext, - client, - metadata, - backgroundEventHandler + client ); this.offsetCommitCallbackInvoker = new OffsetCommitCallbackInvoker(interceptors); this.asyncCommitFenced = new AtomicBoolean(false); @@ -1475,11 +1478,12 @@ public void assign(Collection partitions) { } /** - *

+ * TODO: remove this when we implement the KIP-848 protocol. * - * This function evaluates the regex that the consumer subscribed to - * against the list of topic names from metadata, and updates - * the list of topics in subscription state accordingly + *

+ * The contents of this method are shamelessly stolen from + * {@link ConsumerCoordinator#updatePatternSubscription(Cluster)} and are used here because we won't have access + * to a {@link ConsumerCoordinator} in this code. Perhaps it could be moved to a ConsumerUtils class? * * @param cluster Cluster from which we get the topics */ @@ -1489,7 +1493,7 @@ private void updatePatternSubscription(Cluster cluster) { .collect(Collectors.toSet()); if (subscriptions.subscribeFromPattern(topicsToSubscribe)) { applicationEventHandler.add(new SubscriptionChangeEvent()); - this.metadataVersionSnapshot = metadata.requestUpdateForNewTopics(); + metadata.requestUpdateForNewTopics(); } } @@ -1683,6 +1687,12 @@ private boolean initWithCommittedOffsetsIfNeeded(Timer timer) { } } + private void throwIfNoAssignorsConfigured() { + if (assignors.isEmpty()) + throw new IllegalStateException("Must configure at least one partition assigner class name to " + + ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG + " configuration property"); + } + private void updateLastSeenEpochIfNewer(TopicPartition topicPartition, OffsetAndMetadata offsetAndMetadata) { if (offsetAndMetadata != null) offsetAndMetadata.leaderEpoch().ifPresent(epoch -> metadata.updateLastSeenEpochIfNewer(topicPartition, epoch)); @@ -1770,6 +1780,7 @@ private void subscribeInternal(Pattern pattern, Optional topics, Optional currentTopicPartitions = new HashSet<>(); @@ -1805,7 +1818,7 @@ private void subscribeInternal(Collection topics, Optional(topics), listener)) - this.metadataVersionSnapshot = metadata.requestUpdateForNewTopics(); + metadata.requestUpdateForNewTopics(); // Trigger subscribe event to effectively join the group if not already part of it, // or just send the new subscription to the broker. @@ -1984,12 +1997,9 @@ SubscriptionState subscriptions() { } private void maybeUpdateSubscriptionMetadata() { - if (this.metadataVersionSnapshot < metadata.updateVersion()) { - this.metadataVersionSnapshot = metadata.updateVersion(); - if (subscriptions.hasPatternSubscription()) { - updatePatternSubscription(metadata.fetch()); - } + if (subscriptions.hasPatternSubscription()) { + updatePatternSubscription(metadata.fetch()); } } -} +} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java index 8ec705e972586..f6a11fe4b7d58 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java @@ -69,7 +69,6 @@ import static org.apache.kafka.common.protocol.Errors.COORDINATOR_LOAD_IN_PROGRESS; public class CommitRequestManager implements RequestManager, MemberStateListener { - private final Time time; private final SubscriptionState subscriptions; private final LogContext logContext; private final Logger log; @@ -134,7 +133,6 @@ public CommitRequestManager( final OptionalDouble jitter, final Metrics metrics) { Objects.requireNonNull(coordinatorRequestManager, "Coordinator is needed upon committing offsets"); - this.time = time; this.logContext = logContext; this.log = logContext.logger(getClass()); this.pendingRequests = new PendingRequests(); @@ -207,13 +205,6 @@ private static long findMinTime(final Collection request .orElse(Long.MAX_VALUE); } - private KafkaException maybeWrapAsTimeoutException(Throwable t) { - if (t instanceof TimeoutException) - return (TimeoutException) t; - else - return new TimeoutException(t); - } - /** * Generate a request to commit consumed offsets. Add the request to the queue of pending * requests to be sent out on the next call to {@link #poll(long)}. If there are empty @@ -254,7 +245,7 @@ public void maybeAutoCommitAsync() { if (autoCommitEnabled() && autoCommitState.get().shouldAutoCommit()) { OffsetCommitRequestState requestState = createOffsetCommitRequest( subscriptions.allConsumed(), - Long.MAX_VALUE); + Optional.empty()); CompletableFuture> result = requestAutoCommit(requestState); // Reset timer to the interval (even if no request was generated), but ensure that if // the request completes with a retriable error, the timer is reset to send the next @@ -303,14 +294,14 @@ private void maybeResetTimerWithBackoff(final CompletableFuture maybeAutoCommitSyncBeforeRevocation(final long deadlineMs) { + public CompletableFuture maybeAutoCommitSyncBeforeRevocation(final long retryExpirationTimeMs) { if (!autoCommitEnabled()) { return CompletableFuture.completedFuture(null); } CompletableFuture result = new CompletableFuture<>(); OffsetCommitRequestState requestState = - createOffsetCommitRequest(subscriptions.allConsumed(), deadlineMs); + createOffsetCommitRequest(subscriptions.allConsumed(), Optional.of(retryExpirationTimeMs)); autoCommitSyncBeforeRevocationWithRetries(requestState, result); return result; } @@ -323,9 +314,9 @@ private void autoCommitSyncBeforeRevocationWithRetries(OffsetCommitRequestState result.complete(null); } else { if (error instanceof RetriableException || isStaleEpochErrorAndValidEpochAvailable(error)) { - if (requestAttempt.isExpired()) { + if (error instanceof TimeoutException && requestAttempt.isExpired) { log.debug("Auto-commit sync before revocation timed out and won't be retried anymore"); - result.completeExceptionally(maybeWrapAsTimeoutException(error)); + result.completeExceptionally(error); } else if (error instanceof UnknownTopicOrPartitionException) { log.debug("Auto-commit sync before revocation failed because topic or partition were deleted"); result.completeExceptionally(error); @@ -376,7 +367,7 @@ public CompletableFuture commitAsync(final Map asyncCommitResult = new CompletableFuture<>(); @@ -394,26 +385,28 @@ public CompletableFuture commitAsync(final Map commitSync(final Map offsets, - final long deadlineMs) { + final long retryExpirationTimeMs) { CompletableFuture result = new CompletableFuture<>(); - OffsetCommitRequestState requestState = createOffsetCommitRequest(offsets, deadlineMs); + OffsetCommitRequestState requestState = createOffsetCommitRequest( + offsets, + Optional.of(retryExpirationTimeMs)); commitSyncWithRetries(requestState, result); return result; } private OffsetCommitRequestState createOffsetCommitRequest(final Map offsets, - final long deadlineMs) { + final Optional expirationTimeMs) { return jitter.isPresent() ? new OffsetCommitRequestState( offsets, groupId, groupInstanceId, - deadlineMs, + expirationTimeMs, retryBackoffMs, retryBackoffMaxMs, jitter.getAsDouble(), @@ -422,7 +415,7 @@ private OffsetCommitRequestState createOffsetCommitRequest(final Map> fetchOffsets( final Set partitions, - final long deadlineMs) { + final long expirationTimeMs) { if (partitions.isEmpty()) { return CompletableFuture.completedFuture(Collections.emptyMap()); } CompletableFuture> result = new CompletableFuture<>(); - OffsetFetchRequestState request = createOffsetFetchRequest(partitions, deadlineMs); + OffsetFetchRequestState request = createOffsetFetchRequest(partitions, expirationTimeMs); fetchOffsetsWithRetries(request, result); return result; } private OffsetFetchRequestState createOffsetFetchRequest(final Set partitions, - final long deadlineMs) { + final long expirationTimeMs) { return jitter.isPresent() ? new OffsetFetchRequestState( partitions, retryBackoffMs, retryBackoffMaxMs, - deadlineMs, + expirationTimeMs, jitter.getAsDouble(), memberInfo) : new OffsetFetchRequestState( partitions, retryBackoffMs, retryBackoffMaxMs, - deadlineMs, + expirationTimeMs, memberInfo); } @@ -523,9 +516,8 @@ private void fetchOffsetsWithRetries(final OffsetFetchRequestState fetchRequest, result.complete(res); } else { if (error instanceof RetriableException || isStaleEpochErrorAndValidEpochAvailable(error)) { - if (fetchRequest.isExpired()) { - log.debug("OffsetFetch request for {} timed out and won't be retried anymore", fetchRequest.requestedPartitions); - result.completeExceptionally(maybeWrapAsTimeoutException(error)); + if (error instanceof TimeoutException && fetchRequest.isExpired) { + result.completeExceptionally(error); } else { fetchRequest.resetFuture(); fetchOffsetsWithRetries(fetchRequest, result); @@ -620,12 +612,12 @@ private class OffsetCommitRequestState extends RetriableRequestState { OffsetCommitRequestState(final Map offsets, final String groupId, final Optional groupInstanceId, - final long deadlineMs, + final Optional expirationTimeMs, final long retryBackoffMs, final long retryBackoffMaxMs, final MemberInfo memberInfo) { super(logContext, CommitRequestManager.class.getSimpleName(), retryBackoffMs, - retryBackoffMaxMs, memberInfo, deadlineTimer(time, deadlineMs)); + retryBackoffMaxMs, memberInfo, expirationTimeMs); this.offsets = offsets; this.groupId = groupId; this.groupInstanceId = groupInstanceId; @@ -636,13 +628,13 @@ private class OffsetCommitRequestState extends RetriableRequestState { OffsetCommitRequestState(final Map offsets, final String groupId, final Optional groupInstanceId, - final long deadlineMs, + final Optional expirationTimeMs, final long retryBackoffMs, final long retryBackoffMaxMs, final double jitter, final MemberInfo memberInfo) { super(logContext, CommitRequestManager.class.getSimpleName(), retryBackoffMs, 2, - retryBackoffMaxMs, jitter, memberInfo, deadlineTimer(time, deadlineMs)); + retryBackoffMaxMs, jitter, memberInfo, expirationTimeMs); this.offsets = offsets; this.groupId = groupId; this.groupInstanceId = groupInstanceId; @@ -788,24 +780,40 @@ void removeRequest() { * Represents a request that can be retried or aborted, based on member ID and epoch * information. */ - abstract class RetriableRequestState extends TimedRequestState { + abstract class RetriableRequestState extends RequestState { /** * Member info (ID and epoch) to be included in the request if present. */ final MemberInfo memberInfo; + /** + * Time until which the request should be retried if it fails with retriable + * errors. If not present, the request is triggered without waiting for a response or + * retrying. + */ + private final Optional expirationTimeMs; + + /** + * True if the request expiration time has been reached. This is set when validating the + * request expiration on {@link #poll(long)} before sending it. It is used to know if a + * request should be retried on TimeoutException. + */ + boolean isExpired; + RetriableRequestState(LogContext logContext, String owner, long retryBackoffMs, - long retryBackoffMaxMs, MemberInfo memberInfo, Timer timer) { - super(logContext, owner, retryBackoffMs, retryBackoffMaxMs, timer); + long retryBackoffMaxMs, MemberInfo memberInfo, Optional expirationTimeMs) { + super(logContext, owner, retryBackoffMs, retryBackoffMaxMs); this.memberInfo = memberInfo; + this.expirationTimeMs = expirationTimeMs; } // Visible for testing RetriableRequestState(LogContext logContext, String owner, long retryBackoffMs, int retryBackoffExpBase, - long retryBackoffMaxMs, double jitter, MemberInfo memberInfo, Timer timer) { - super(logContext, owner, retryBackoffMs, retryBackoffExpBase, retryBackoffMaxMs, jitter, timer); + long retryBackoffMaxMs, double jitter, MemberInfo memberInfo, Optional expirationTimeMs) { + super(logContext, owner, retryBackoffMs, retryBackoffExpBase, retryBackoffMaxMs, jitter); this.memberInfo = memberInfo; + this.expirationTimeMs = expirationTimeMs; } /** @@ -820,12 +828,13 @@ abstract class RetriableRequestState extends TimedRequestState { abstract CompletableFuture future(); /** - * Complete the request future with a TimeoutException if the request has been sent out - * at least once and the timeout has been reached. + * Complete the request future with a TimeoutException if the request timeout has been + * reached, based on the provided current time. */ - void maybeExpire() { - if (numAttempts > 0 && isExpired()) { + void maybeExpire(long currentTimeMs) { + if (retryTimeoutExpired(currentTimeMs)) { removeRequest(); + isExpired = true; future().completeExceptionally(new TimeoutException(requestDescription() + " could not complete before timeout expired.")); } @@ -837,12 +846,11 @@ void maybeExpire() { NetworkClientDelegate.UnsentRequest buildRequestWithResponseHandling(final AbstractRequest.Builder builder) { NetworkClientDelegate.UnsentRequest request = new NetworkClientDelegate.UnsentRequest( builder, - coordinatorRequestManager.coordinator() - ); + coordinatorRequestManager.coordinator()); request.whenComplete( (response, throwable) -> { - long completionTimeMs = request.handler().completionTimeMs(); - handleClientResponse(response, throwable, completionTimeMs); + long currentTimeMs = request.handler().completionTimeMs(); + handleClientResponse(response, throwable, currentTimeMs); }); return request; } @@ -867,6 +875,10 @@ private void handleClientResponse(final ClientResponse response, abstract void onResponse(final ClientResponse response); + boolean retryTimeoutExpired(long currentTimeMs) { + return expirationTimeMs.isPresent() && expirationTimeMs.get() <= currentTimeMs; + } + abstract void removeRequest(); // Visible for testing @@ -891,10 +903,10 @@ class OffsetFetchRequestState extends RetriableRequestState { public OffsetFetchRequestState(final Set partitions, final long retryBackoffMs, final long retryBackoffMaxMs, - final long deadlineMs, + final long expirationTimeMs, final MemberInfo memberInfo) { super(logContext, CommitRequestManager.class.getSimpleName(), retryBackoffMs, - retryBackoffMaxMs, memberInfo, deadlineTimer(time, deadlineMs)); + retryBackoffMaxMs, memberInfo, Optional.of(expirationTimeMs)); this.requestedPartitions = partitions; this.future = new CompletableFuture<>(); } @@ -902,11 +914,11 @@ public OffsetFetchRequestState(final Set partitions, public OffsetFetchRequestState(final Set partitions, final long retryBackoffMs, final long retryBackoffMaxMs, - final long deadlineMs, + final long expirationTimeMs, final double jitter, final MemberInfo memberInfo) { super(logContext, CommitRequestManager.class.getSimpleName(), retryBackoffMs, 2, - retryBackoffMaxMs, jitter, memberInfo, deadlineTimer(time, deadlineMs)); + retryBackoffMaxMs, jitter, memberInfo, Optional.of(expirationTimeMs)); this.requestedPartitions = partitions; this.future = new CompletableFuture<>(); } @@ -1137,10 +1149,9 @@ private CompletableFuture> addOffsetFetch inflightOffsetFetches.stream().filter(r -> r.sameRequest(request)).findAny(); if (dupe.isPresent() || inflight.isPresent()) { - log.debug("Duplicated unsent offset fetch request found for partitions: {}", request.requestedPartitions); + log.info("Duplicated OffsetFetchRequest: " + request.requestedPartitions); dupe.orElseGet(inflight::get).chainFuture(request.future); } else { - log.debug("Enqueuing offset fetch request for partitions: {}", request.requestedPartitions); this.unsentOffsetFetches.add(request); } return request.future; @@ -1158,7 +1169,7 @@ List drain(final long currentTimeMs) { .filter(request -> !request.canSendRequest(currentTimeMs)) .collect(Collectors.toList()); - failAndRemoveExpiredCommitRequests(); + failAndRemoveExpiredCommitRequests(currentTimeMs); // Add all unsent offset commit requests to the unsentRequests list List unsentRequests = unsentOffsetCommits.stream() @@ -1172,7 +1183,7 @@ List drain(final long currentTimeMs) { unsentOffsetFetches.stream() .collect(Collectors.partitioningBy(request -> request.canSendRequest(currentTimeMs))); - failAndRemoveExpiredFetchRequests(); + failAndRemoveExpiredFetchRequests(currentTimeMs); // Add all sendable offset fetch requests to the unsentRequests list and to the inflightOffsetFetches list for (OffsetFetchRequestState request : partitionedBySendability.get(true)) { @@ -1193,18 +1204,18 @@ List drain(final long currentTimeMs) { * Find the unsent commit requests that have expired, remove them and complete their * futures with a TimeoutException. */ - private void failAndRemoveExpiredCommitRequests() { + private void failAndRemoveExpiredCommitRequests(final long currentTimeMs) { Queue requestsToPurge = new LinkedList<>(unsentOffsetCommits); - requestsToPurge.forEach(RetriableRequestState::maybeExpire); + requestsToPurge.forEach(req -> req.maybeExpire(currentTimeMs)); } /** * Find the unsent fetch requests that have expired, remove them and complete their * futures with a TimeoutException. */ - private void failAndRemoveExpiredFetchRequests() { + private void failAndRemoveExpiredFetchRequests(final long currentTimeMs) { Queue requestsToPurge = new LinkedList<>(unsentOffsetFetches); - requestsToPurge.forEach(RetriableRequestState::maybeExpire); + requestsToPurge.forEach(req -> req.maybeExpire(currentTimeMs)); } private void clearAll() { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerDelegateCreator.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerDelegateCreator.java index 81c45aba69c75..bd95e06c86448 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerDelegateCreator.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerDelegateCreator.java @@ -91,7 +91,8 @@ public ConsumerDelegate create(LogContext logContext, valueDeserializer, client, subscriptions, - metadata + metadata, + assignors ); else return new LegacyKafkaConsumer<>( diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java index 7616ac6912289..adee6594603bb 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java @@ -293,13 +293,12 @@ private void closeInternal(final Duration timeout) { * Check the unsent queue one last time and poll until all requests are sent or the timer runs out. */ private void sendUnsentRequests(final Timer timer) { - if (!networkClientDelegate.hasAnyPendingRequests()) + if (networkClientDelegate.unsentRequests().isEmpty()) return; - do { networkClientDelegate.poll(timer.remainingMs(), timer.currentTimeMs()); timer.update(); - } while (timer.notExpired() && networkClientDelegate.hasAnyPendingRequests()); + } while (timer.notExpired() && !networkClientDelegate.unsentRequests().isEmpty()); } void cleanup() { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java index aba539b876326..a6cc28fb0f4c6 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java @@ -28,6 +28,7 @@ import org.apache.kafka.common.requests.FindCoordinatorRequest; import org.apache.kafka.common.requests.FindCoordinatorResponse; import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.Time; import org.slf4j.Logger; import java.util.Objects; @@ -50,6 +51,7 @@ */ public class CoordinatorRequestManager implements RequestManager { private static final long COORDINATOR_DISCONNECT_LOGGING_INTERVAL_MS = 60 * 1000; + private final Time time; private final Logger log; private final BackgroundEventHandler backgroundEventHandler; private final String groupId; @@ -60,6 +62,7 @@ public class CoordinatorRequestManager implements RequestManager { private Node coordinator; public CoordinatorRequestManager( + final Time time, final LogContext logContext, final long retryBackoffMs, final long retryBackoffMaxMs, @@ -67,6 +70,7 @@ public CoordinatorRequestManager( final String groupId ) { Objects.requireNonNull(groupId); + this.time = time; this.log = logContext.logger(this.getClass()); this.backgroundEventHandler = errorHandler; this.groupId = groupId; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/HeartbeatRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/HeartbeatRequestManager.java index 0ed4a67b5869d..d31d412c65503 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/HeartbeatRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/HeartbeatRequestManager.java @@ -569,7 +569,7 @@ public ConsumerGroupHeartbeatRequestData buildRequestData() { sentFields.rebalanceTimeoutMs = rebalanceTimeoutMs; } - // SubscribedTopicNames - only sent if it has changed since the last heartbeat + // SubscribedTopicNames - only sent if has changed since the last heartbeat TreeSet subscribedTopicNames = new TreeSet<>(this.subscriptions.subscription()); if (sendAllFields || !subscribedTopicNames.equals(sentFields.subscribedTopicNames)) { data.setSubscribedTopicNames(new ArrayList<>(this.subscriptions.subscription())); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MemberState.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MemberState.java index a62c634ba437e..9f0c7d947ea7e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MemberState.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MemberState.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.List; public enum MemberState { @@ -121,7 +120,7 @@ public enum MemberState { RECONCILING.previousValidStates = Arrays.asList(STABLE, JOINING, ACKNOWLEDGING, RECONCILING); - ACKNOWLEDGING.previousValidStates = Collections.singletonList(RECONCILING); + ACKNOWLEDGING.previousValidStates = Arrays.asList(RECONCILING); FATAL.previousValidStates = Arrays.asList(JOINING, STABLE, RECONCILING, ACKNOWLEDGING, PREPARE_LEAVING, LEAVING, UNSUBSCRIBED); @@ -134,11 +133,11 @@ public enum MemberState { PREPARE_LEAVING.previousValidStates = Arrays.asList(JOINING, STABLE, RECONCILING, ACKNOWLEDGING, UNSUBSCRIBED); - LEAVING.previousValidStates = Collections.singletonList(PREPARE_LEAVING); + LEAVING.previousValidStates = Arrays.asList(PREPARE_LEAVING); UNSUBSCRIBED.previousValidStates = Arrays.asList(PREPARE_LEAVING, LEAVING, FENCED); - STALE.previousValidStates = Collections.singletonList(LEAVING); + STALE.previousValidStates = Arrays.asList(LEAVING); } private List previousValidStates; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MembershipManagerImpl.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MembershipManagerImpl.java index 2aabf4ae130e8..76a550ad71985 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MembershipManagerImpl.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MembershipManagerImpl.java @@ -960,7 +960,7 @@ void maybeReconcile() { // best effort to commit the offsets in the case where the epoch might have changed while // the current reconciliation is in process. Note this is using the rebalance timeout as // it is the limit enforced by the broker to complete the reconciliation process. - commitResult = commitRequestManager.maybeAutoCommitSyncBeforeRevocation(getDeadlineMsForTimeout(rebalanceTimeoutMs)); + commitResult = commitRequestManager.maybeAutoCommitSyncBeforeRevocation(getExpirationTimeForTimeout(rebalanceTimeoutMs)); // Execute commit -> onPartitionsRevoked -> onPartitionsAssigned. commitResult.whenComplete((__, commitReqError) -> { @@ -986,7 +986,7 @@ void maybeReconcile() { }); } - long getDeadlineMsForTimeout(final long timeoutMs) { + long getExpirationTimeForTimeout(final long timeoutMs) { long expiration = time.milliseconds() + timeoutMs; if (expiration < 0) { return Long.MAX_VALUE; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java index 310e0c417f617..141f5f955c8b5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java @@ -21,12 +21,9 @@ import org.apache.kafka.clients.ClientResponse; import org.apache.kafka.clients.ClientUtils; import org.apache.kafka.clients.KafkaClient; -import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.NetworkClientUtils; import org.apache.kafka.clients.RequestCompletionHandler; import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.clients.consumer.internals.events.BackgroundEventHandler; -import org.apache.kafka.clients.consumer.internals.events.ErrorEvent; import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.DisconnectException; @@ -60,8 +57,6 @@ public class NetworkClientDelegate implements AutoCloseable { private final KafkaClient client; - private final BackgroundEventHandler backgroundEventHandler; - private final Metadata metadata; private final Time time; private final Logger log; private final int requestTimeoutMs; @@ -72,13 +67,9 @@ public NetworkClientDelegate( final Time time, final ConsumerConfig config, final LogContext logContext, - final KafkaClient client, - final Metadata metadata, - final BackgroundEventHandler backgroundEventHandler) { + final KafkaClient client) { this.time = time; this.client = client; - this.metadata = metadata; - this.backgroundEventHandler = backgroundEventHandler; this.log = logContext.logger(getClass()); this.unsentRequests = new ArrayDeque<>(); this.requestTimeoutMs = config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG); @@ -136,25 +127,9 @@ public void poll(final long timeoutMs, final long currentTimeMs) { pollTimeoutMs = Math.min(retryBackoffMs, pollTimeoutMs); } this.client.poll(pollTimeoutMs, currentTimeMs); - maybePropagateMetadataError(); checkDisconnects(currentTimeMs); } - private void maybePropagateMetadataError() { - try { - metadata.maybeThrowAnyException(); - } catch (Exception e) { - backgroundEventHandler.add(new ErrorEvent(e)); - } - } - - /** - * Return true if there is at least one in-flight request or unsent request. - */ - public boolean hasAnyPendingRequests() { - return client.hasInFlightRequests() || !unsentRequests.isEmpty(); - } - /** * Tries to send the requests in the unsentRequest queue. If the request doesn't have an assigned node, it will * find the leastLoadedOne, and will be retried in the next {@code poll()}. If the request is expired, a @@ -334,20 +309,11 @@ Optional node() { @Override public String toString() { - String remainingMs; - - if (timer != null) { - timer.update(); - remainingMs = String.valueOf(timer.remainingMs()); - } else { - remainingMs = ""; - } - return "UnsentRequest{" + "requestBuilder=" + requestBuilder + ", handler=" + handler + ", node=" + node + - ", remainingMs=" + remainingMs + + ", timer=" + timer + '}'; } } @@ -405,8 +371,7 @@ public static Supplier supplier(final Time time, final ApiVersions apiVersions, final Metrics metrics, final FetchMetricsManager fetchMetricsManager, - final ClientTelemetrySender clientTelemetrySender, - final BackgroundEventHandler backgroundEventHandler) { + final ClientTelemetrySender clientTelemetrySender) { return new CachedSupplier() { @Override protected NetworkClientDelegate create() { @@ -420,7 +385,7 @@ protected NetworkClientDelegate create() { metadata, fetchMetricsManager.throttleTimeSensor(), clientTelemetrySender); - return new NetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler); + return new NetworkClientDelegate(time, config, logContext, client); } }; } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManagers.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManagers.java index 874631db84ec7..75d87432db680 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManagers.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManagers.java @@ -155,7 +155,6 @@ protected RequestManagers create() { apiVersions); final TopicMetadataRequestManager topic = new TopicMetadataRequestManager( logContext, - time, config); HeartbeatRequestManager heartbeatRequestManager = null; MembershipManager membershipManager = null; @@ -164,7 +163,7 @@ protected RequestManagers create() { if (groupRebalanceConfig != null && groupRebalanceConfig.groupId != null) { Optional serverAssignor = Optional.ofNullable(config.getString(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG)); - coordinator = new CoordinatorRequestManager( + coordinator = new CoordinatorRequestManager(time, logContext, retryBackoffMs, retryBackoffMaxMs, diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegate.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegate.java deleted file mode 100644 index 65dd9d2c30f91..0000000000000 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegate.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.consumer.internals; - -import org.apache.kafka.clients.consumer.ShareConsumer; -import org.apache.kafka.common.metrics.Metrics; - -/** - * This extension interface provides a handful of methods to expose internals of the {@link ShareConsumer} for - * various tests. - * - *

- * - * Note: this is for internal use only and is not intended for use by end users. Internal users should - * not attempt to determine the underlying implementation to avoid coding to an unstable interface. Rather, it is - * the {@link ShareConsumer} API contract that should serve as the caller's interface. - */ -public interface ShareConsumerDelegate extends ShareConsumer { - - String clientId(); - - Metrics metricsRegistry(); -} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegateCreator.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegateCreator.java deleted file mode 100644 index 3c41a58e0570b..0000000000000 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegateCreator.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.consumer.internals; - -import org.apache.kafka.clients.KafkaClient; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.clients.consumer.KafkaShareConsumer; -import org.apache.kafka.clients.consumer.ShareConsumer; -import org.apache.kafka.common.serialization.Deserializer; -import org.apache.kafka.common.utils.LogContext; -import org.apache.kafka.common.utils.Time; - -/** - * {@code ShareConsumerDelegateCreator} implements a quasi-factory pattern to allow the caller to remain unaware of the - * underlying {@link ShareConsumer} implementation that is created. This provides the means by which - * {@link KafkaShareConsumer} can remain the top-level facade for implementations, but allow different implementations - * to co-exist under the covers. - * - *

- * Note: this is for internal use only and is not intended for use by end users. Internal users should - * not attempt to determine the underlying implementation to avoid coding to an unstable interface. Rather, it is - * the {@link ShareConsumer} API contract that should serve as the caller's interface. - */ -public class ShareConsumerDelegateCreator { - public ShareConsumerDelegate create(final ConsumerConfig config, - final Deserializer keyDeserializer, - final Deserializer valueDeserializer) { - throw new UnsupportedOperationException("Not implemented"); - } - - public ShareConsumerDelegate create(final LogContext logContext, - final String clientId, - final String groupId, - final ConsumerConfig config, - final Deserializer keyDeserializer, - final Deserializer valueDeserializer, - final Time time, - final KafkaClient client, - final SubscriptionState subscriptions, - final ConsumerMetadata metadata) { - throw new UnsupportedOperationException("Not implemented"); - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TimedRequestState.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TimedRequestState.java deleted file mode 100644 index c61032cea7249..0000000000000 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TimedRequestState.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.consumer.internals; - -import org.apache.kafka.common.utils.LogContext; -import org.apache.kafka.common.utils.Time; -import org.apache.kafka.common.utils.Timer; - -/** - * {@code TimedRequestState} adds to a {@link RequestState} a {@link Timer} with which to keep track - * of the request's expiration. - */ -public class TimedRequestState extends RequestState { - - private final Timer timer; - - public TimedRequestState(final LogContext logContext, - final String owner, - final long retryBackoffMs, - final long retryBackoffMaxMs, - final Timer timer) { - super(logContext, owner, retryBackoffMs, retryBackoffMaxMs); - this.timer = timer; - } - - public TimedRequestState(final LogContext logContext, - final String owner, - final long retryBackoffMs, - final int retryBackoffExpBase, - final long retryBackoffMaxMs, - final double jitter, - final Timer timer) { - super(logContext, owner, retryBackoffMs, retryBackoffExpBase, retryBackoffMaxMs, jitter); - this.timer = timer; - } - - public boolean isExpired() { - timer.update(); - return timer.isExpired(); - } - - public long remainingMs() { - timer.update(); - return timer.remainingMs(); - } - - public static Timer deadlineTimer(final Time time, final long deadlineMs) { - long diff = Math.max(0, deadlineMs - time.milliseconds()); - return time.timer(diff); - } - - - @Override - protected String toStringBase() { - return super.toStringBase() + ", remainingMs=" + remainingMs(); - } -} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManager.java index a555d6ce7f359..75a5ed08d1554 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManager.java @@ -29,7 +29,6 @@ import org.apache.kafka.common.requests.MetadataRequest; import org.apache.kafka.common.requests.MetadataResponse; import org.apache.kafka.common.utils.LogContext; -import org.apache.kafka.common.utils.Time; import org.slf4j.Logger; import java.util.Collections; @@ -62,7 +61,6 @@ */ public class TopicMetadataRequestManager implements RequestManager { - private final Time time; private final boolean allowAutoTopicCreation; private final List inflightRequests; private final long retryBackoffMs; @@ -70,10 +68,9 @@ public class TopicMetadataRequestManager implements RequestManager { private final Logger log; private final LogContext logContext; - public TopicMetadataRequestManager(final LogContext context, final Time time, final ConsumerConfig config) { + public TopicMetadataRequestManager(final LogContext context, final ConsumerConfig config) { logContext = context; log = logContext.logger(getClass()); - this.time = time; inflightRequests = new LinkedList<>(); retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); retryBackoffMaxMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MAX_MS_CONFIG); @@ -84,7 +81,7 @@ public TopicMetadataRequestManager(final LogContext context, final Time time, fi public NetworkClientDelegate.PollResult poll(final long currentTimeMs) { // Prune any requests which have timed out List expiredRequests = inflightRequests.stream() - .filter(TimedRequestState::isExpired) + .filter(req -> req.isExpired(currentTimeMs)) .collect(Collectors.toList()); expiredRequests.forEach(TopicMetadataRequestState::expire); @@ -102,10 +99,10 @@ public NetworkClientDelegate.PollResult poll(final long currentTimeMs) { * * @return the future of the metadata request. */ - public CompletableFuture>> requestAllTopicsMetadata(final long deadlineMs) { + public CompletableFuture>> requestAllTopicsMetadata(final long expirationTimeMs) { TopicMetadataRequestState newRequest = new TopicMetadataRequestState( logContext, - deadlineMs, + expirationTimeMs, retryBackoffMs, retryBackoffMaxMs); inflightRequests.add(newRequest); @@ -118,11 +115,11 @@ public CompletableFuture>> requestAllTopicsMetad * @param topic to be requested. * @return the future of the metadata request. */ - public CompletableFuture>> requestTopicMetadata(final String topic, final long deadlineMs) { + public CompletableFuture>> requestTopicMetadata(final String topic, final long expirationTimeMs) { TopicMetadataRequestState newRequest = new TopicMetadataRequestState( logContext, topic, - deadlineMs, + expirationTimeMs, retryBackoffMs, retryBackoffMaxMs); inflightRequests.add(newRequest); @@ -134,32 +131,35 @@ List inflightRequests() { return inflightRequests; } - class TopicMetadataRequestState extends TimedRequestState { + class TopicMetadataRequestState extends RequestState { private final String topic; private final boolean allTopics; + private final long expirationTimeMs; CompletableFuture>> future; public TopicMetadataRequestState(final LogContext logContext, - final long deadlineMs, + final long expirationTimeMs, final long retryBackoffMs, final long retryBackoffMaxMs) { super(logContext, TopicMetadataRequestState.class.getSimpleName(), retryBackoffMs, - retryBackoffMaxMs, deadlineTimer(time, deadlineMs)); + retryBackoffMaxMs); future = new CompletableFuture<>(); this.topic = null; this.allTopics = true; + this.expirationTimeMs = expirationTimeMs; } public TopicMetadataRequestState(final LogContext logContext, final String topic, - final long deadlineMs, + final long expirationTimeMs, final long retryBackoffMs, final long retryBackoffMaxMs) { super(logContext, TopicMetadataRequestState.class.getSimpleName(), retryBackoffMs, - retryBackoffMaxMs, deadlineTimer(time, deadlineMs)); + retryBackoffMaxMs); future = new CompletableFuture<>(); this.topic = topic; this.allTopics = false; + this.expirationTimeMs = expirationTimeMs; } /** @@ -167,6 +167,10 @@ public TopicMetadataRequestState(final LogContext logContext, * {@link org.apache.kafka.clients.consumer.internals.NetworkClientDelegate.UnsentRequest} if needed. */ private Optional send(final long currentTimeMs) { + if (currentTimeMs >= expirationTimeMs) { + return Optional.empty(); + } + if (!canSendRequest(currentTimeMs)) { return Optional.empty(); } @@ -179,6 +183,10 @@ private Optional send(final long currentTim return Optional.of(createUnsentRequest(request)); } + private boolean isExpired(final long currentTimeMs) { + return currentTimeMs >= expirationTimeMs; + } + private void expire() { completeFutureAndRemoveRequest( new TimeoutException("Timeout expired while fetching topic metadata")); @@ -202,8 +210,9 @@ private NetworkClientDelegate.UnsentRequest createUnsentRequest( private void handleError(final Throwable exception, final long completionTimeMs) { if (exception instanceof RetriableException) { - if (isExpired()) { - completeFutureAndRemoveRequest(new TimeoutException("Timeout expired while fetching topic metadata")); + if (completionTimeMs >= expirationTimeMs) { + completeFutureAndRemoveRequest( + new TimeoutException("Timeout expired while fetching topic metadata")); } else { onFailedAttempt(completionTimeMs); } @@ -213,12 +222,20 @@ private void handleError(final Throwable exception, } private void handleResponse(final ClientResponse response) { + long responseTimeMs = response.receivedTimeMs(); try { Map> res = handleTopicMetadataResponse((MetadataResponse) response.responseBody()); future.complete(res); inflightRequests.remove(this); - } catch (Exception e) { - handleError(e, response.receivedTimeMs()); + } catch (RetriableException e) { + if (responseTimeMs >= expirationTimeMs) { + completeFutureAndRemoveRequest( + new TimeoutException("Timeout expired while fetching topic metadata")); + } else { + onFailedAttempt(responseTimeMs); + } + } catch (Exception t) { + completeFutureAndRemoveRequest(t); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableApplicationEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableApplicationEvent.java index 8cd17d19feb66..dffac12902177 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableApplicationEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableApplicationEvent.java @@ -43,7 +43,6 @@ public CompletableFuture future() { return future; } - @Override public long deadlineMs() { return deadlineMs; } diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java index 9ea7a32f949c5..12e77b0d516b7 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java @@ -855,12 +855,13 @@ private boolean shouldStopDrainBatchesForPartition(ProducerBatch first, TopicPar } int firstInFlightSequence = transactionManager.firstInFlightSequence(first.topicPartition); - // If the queued batch already has an assigned sequence, then it is being retried. - // In this case, we wait until the next immediate batch is ready and drain that. - // We only move on when the next in line batch is complete (either successfully or due to - // a fatal broker error). This effectively reduces our in flight request count to 1. - return firstInFlightSequence != RecordBatch.NO_SEQUENCE && first.hasSequence() - && first.baseSequence() != firstInFlightSequence; + if (firstInFlightSequence != RecordBatch.NO_SEQUENCE && first.hasSequence() + && first.baseSequence() != firstInFlightSequence) + // If the queued batch already has an assigned sequence, then it is being retried. + // In this case, we wait until the next immediate batch is ready and drain that. + // We only move on when the next in line batch is complete (either successfully or due to + // a fatal broker error). This effectively reduces our in flight request count to 1. + return true; } return false; } diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/TxnPartitionMap.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/TxnPartitionMap.java index 3b3a711990822..ce8aa4a824f90 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/TxnPartitionMap.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/TxnPartitionMap.java @@ -48,7 +48,7 @@ TxnPartitionEntry get(TopicPartition topicPartition) { } TxnPartitionEntry getOrCreate(TopicPartition topicPartition) { - return topicPartitions.computeIfAbsent(topicPartition, TxnPartitionEntry::new); + return topicPartitions.computeIfAbsent(topicPartition, tp -> new TxnPartitionEntry(tp)); } boolean contains(TopicPartition topicPartition) { diff --git a/clients/src/main/java/org/apache/kafka/common/Cluster.java b/clients/src/main/java/org/apache/kafka/common/Cluster.java index 93f2f4225bc74..820adbdb5fbfa 100644 --- a/clients/src/main/java/org/apache/kafka/common/Cluster.java +++ b/clients/src/main/java/org/apache/kafka/common/Cluster.java @@ -246,8 +246,8 @@ public Node nodeById(int id) { /** * Get the node by node id if the replica for the given partition is online - * @param partition The TopicPartition - * @param id The node id + * @param partition + * @param id * @return the node */ public Optional nodeIfOnline(TopicPartition partition, int id) { diff --git a/clients/src/main/java/org/apache/kafka/common/ClusterResource.java b/clients/src/main/java/org/apache/kafka/common/ClusterResource.java index 2f857ff560975..749f2d124e077 100644 --- a/clients/src/main/java/org/apache/kafka/common/ClusterResource.java +++ b/clients/src/main/java/org/apache/kafka/common/ClusterResource.java @@ -30,7 +30,7 @@ public class ClusterResource { * Create {@link ClusterResource} with a cluster id. Note that cluster id may be {@code null} if the * metadata request was sent to a broker without support for cluster ids. The first version of Kafka * to support cluster id is 0.10.1.0. - * @param clusterId The cluster id + * @param clusterId */ public ClusterResource(String clusterId) { this.clusterId = clusterId; diff --git a/clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockInputStream.java b/clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockInputStream.java index 75576eedb5d75..e2c9ef8a63b6c 100644 --- a/clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockInputStream.java +++ b/clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockInputStream.java @@ -276,6 +276,11 @@ public void reset() { throw new RuntimeException("reset not supported"); } + @Override + public boolean markSupported() { + return false; + } + /** * Checks whether the version of lz4 on the classpath has the fix for reading from ByteBuffers with * non-zero array offsets (see https://github.com/lz4/lz4-java/pull/65) diff --git a/clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockOutputStream.java b/clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockOutputStream.java index 0f1d213f50e36..97e370a383caf 100644 --- a/clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockOutputStream.java +++ b/clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockOutputStream.java @@ -333,6 +333,10 @@ public boolean isBlockChecksumSet() { return blockChecksum == 1; } + public boolean isBlockIndependenceSet() { + return blockIndependence == 1; + } + public int getVersion() { return version; } diff --git a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java index f7632c8ca56df..b69ca06219a04 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java +++ b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java @@ -113,7 +113,9 @@ public AbstractConfig(ConfigDef definition, Map originals, Map this.originals = resolveConfigVariables(configProviderProps, originalMap); this.values = definition.parse(this.originals); Map configUpdates = postProcessParsedConfig(Collections.unmodifiableMap(this.values)); - this.values.putAll(configUpdates); + for (Map.Entry update : configUpdates.entrySet()) { + this.values.put(update.getKey(), update.getValue()); + } definition.parse(this.values); this.definition = definition; if (doLog) diff --git a/clients/src/main/java/org/apache/kafka/common/config/internals/AllowedPaths.java b/clients/src/main/java/org/apache/kafka/common/config/internals/AllowedPaths.java index b0f1f1a29c78f..4fb6a483f66e7 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/internals/AllowedPaths.java +++ b/clients/src/main/java/org/apache/kafka/common/config/internals/AllowedPaths.java @@ -70,7 +70,7 @@ public Path parseUntrustedPath(String path) { if (allowedPaths != null) { Path normalisedPath = parsedPath.normalize(); - long allowed = allowedPaths.stream().filter(normalisedPath::startsWith).count(); + long allowed = allowedPaths.stream().filter(allowedPath -> normalisedPath.startsWith(allowedPath)).count(); if (allowed == 0) { return null; } diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/KafkaMetricsContext.java b/clients/src/main/java/org/apache/kafka/common/metrics/KafkaMetricsContext.java index 5fafa144956b5..43eb8cb319465 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/KafkaMetricsContext.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/KafkaMetricsContext.java @@ -21,7 +21,7 @@ import java.util.Map; /** - * An implementation of MetricsContext, it encapsulates required metrics context properties for Kafka services and clients + * A implementation of MetricsContext, it encapsulates required metrics context properties for Kafka services and clients */ public class KafkaMetricsContext implements MetricsContext { /** diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java b/clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java index 9e987aaedaed6..b52285dac63f4 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java @@ -392,7 +392,7 @@ public synchronized Sensor sensor(String name, MetricConfig config, Sensor.Recor * receive every value recorded with this sensor. * @param name The name of the sensor * @param config A default configuration to use for this sensor for metrics that don't have their own config - * @param inactiveSensorExpirationTimeSeconds If no value is recorded on the Sensor for this duration of time, + * @param inactiveSensorExpirationTimeSeconds If no value if recorded on the Sensor for this duration of time, * it is eligible for removal * @param parents The parent sensors * @param recordingLevel The recording level. @@ -419,7 +419,7 @@ public synchronized Sensor sensor(String name, MetricConfig config, long inactiv * receive every value recorded with this sensor. This uses a default recording level of INFO. * @param name The name of the sensor * @param config A default configuration to use for this sensor for metrics that don't have their own config - * @param inactiveSensorExpirationTimeSeconds If no value is recorded on the Sensor for this duration of time, + * @param inactiveSensorExpirationTimeSeconds If no value if recorded on the Sensor for this duration of time, * it is eligible for removal * @param parents The parent sensors * @return The sensor that is created diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/MetricsReporter.java b/clients/src/main/java/org/apache/kafka/common/metrics/MetricsReporter.java index e9b4582096dd1..75771fb4acedc 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/MetricsReporter.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/MetricsReporter.java @@ -40,13 +40,13 @@ public interface MetricsReporter extends Reconfigurable, AutoCloseable { /** * This is called whenever a metric is updated or added - * @param metric The metric that has been added or changed + * @param metric */ void metricChange(KafkaMetric metric); /** * This is called whenever a metric is removed - * @param metric The metric that has been removed + * @param metric */ void metricRemoval(KafkaMetric metric); diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/stats/Histogram.java b/clients/src/main/java/org/apache/kafka/common/metrics/stats/Histogram.java index aa7de6fa6c931..97f91822d8261 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/stats/Histogram.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/stats/Histogram.java @@ -116,6 +116,7 @@ public interface BinScheme { public static class ConstantBinScheme implements BinScheme { private static final int MIN_BIN_NUMBER = 0; private final double min; + private final double max; private final int bins; private final double bucketWidth; private final int maxBinNumber; @@ -131,6 +132,7 @@ public ConstantBinScheme(int bins, double min, double max) { if (bins < 2) throw new IllegalArgumentException("Must have at least 2 bins."); this.min = min; + this.max = max; this.bins = bins; this.bucketWidth = (max - min) / bins; this.maxBinNumber = bins - 1; diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlaintextChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/PlaintextChannelBuilder.java index c6181b81c5e73..4ff41f81b594c 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlaintextChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlaintextChannelBuilder.java @@ -25,6 +25,7 @@ import org.apache.kafka.common.utils.Utils; import java.io.Closeable; +import java.io.IOException; import java.net.InetAddress; import java.nio.channels.SelectionKey; import java.util.Map; @@ -71,7 +72,7 @@ KafkaChannel buildChannel(String id, TransportLayer transportLayer, Supplier> APIS_BY_LISTENER = new EnumMap<>(ApiMessageType.ListenerType.class); diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/MessageSizeAccumulator.java b/clients/src/main/java/org/apache/kafka/common/protocol/MessageSizeAccumulator.java index 98b6fc252a983..dac007ec5551f 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/MessageSizeAccumulator.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/MessageSizeAccumulator.java @@ -40,6 +40,17 @@ public int sizeExcludingZeroCopy() { return totalSize - zeroCopySize; } + /** + * Get the total "zero-copy" size of the message. This is the summed + * total of all fields which have either have a type of 'bytes' with + * 'zeroCopy' enabled, or a type of 'records' + * + * @return total size of zero-copy data in the message + */ + public int zeroCopySize() { + return zeroCopySize; + } + public void addZeroCopyBytes(int size) { zeroCopySize += size; totalSize += size; diff --git a/clients/src/main/java/org/apache/kafka/common/record/AbstractLegacyRecordBatch.java b/clients/src/main/java/org/apache/kafka/common/record/AbstractLegacyRecordBatch.java index 9ab8715236e74..0f29581623754 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/AbstractLegacyRecordBatch.java +++ b/clients/src/main/java/org/apache/kafka/common/record/AbstractLegacyRecordBatch.java @@ -279,7 +279,7 @@ static void writeHeader(DataOutputStream out, long offset, int size) throws IOEx private static final class DataLogInputStream implements LogInputStream { private final InputStream stream; - private final int maxMessageSize; + protected final int maxMessageSize; private final ByteBuffer offsetAndSizeBuffer; DataLogInputStream(InputStream stream, int maxMessageSize) { diff --git a/clients/src/main/java/org/apache/kafka/common/record/CompressionRatioEstimator.java b/clients/src/main/java/org/apache/kafka/common/record/CompressionRatioEstimator.java index 0dbfb3e2f95b1..264525b380920 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/CompressionRatioEstimator.java +++ b/clients/src/main/java/org/apache/kafka/common/record/CompressionRatioEstimator.java @@ -72,6 +72,13 @@ public static void resetEstimation(String topic) { } } + /** + * Remove the compression ratio estimation for a topic. + */ + public static void removeEstimation(String topic) { + COMPRESSION_RATIO.remove(topic); + } + /** * Set the compression estimation for a topic compression type combination. This method is for unit test purpose. */ diff --git a/clients/src/main/java/org/apache/kafka/common/record/LegacyRecord.java b/clients/src/main/java/org/apache/kafka/common/record/LegacyRecord.java index 58fe9a0c9008a..014a5f4dfb135 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/LegacyRecord.java +++ b/clients/src/main/java/org/apache/kafka/common/record/LegacyRecord.java @@ -123,6 +123,14 @@ public boolean isValid() { return sizeInBytes() >= RECORD_OVERHEAD_V0 && checksum() == computeChecksum(); } + public Long wrapperRecordTimestamp() { + return wrapperRecordTimestamp; + } + + public TimestampType wrapperRecordTimestampType() { + return wrapperRecordTimestampType; + } + /** * Throw an InvalidRecordException if isValid is false for this record */ diff --git a/clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java b/clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java index 70f279a6c29bc..a5985103ec0a0 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java @@ -80,10 +80,10 @@ public void write(int b) { private long producerId; private short producerEpoch; private int baseSequence; - private int uncompressedRecordsSizeInBytes; // Number of bytes (excluding the header) written before compression - private int numRecords; - private float actualCompressionRatio; - private long maxTimestamp; + private int uncompressedRecordsSizeInBytes = 0; // Number of bytes (excluding the header) written before compression + private int numRecords = 0; + private float actualCompressionRatio = 1; + private long maxTimestamp = RecordBatch.NO_TIMESTAMP; private long offsetOfMaxTimestamp = -1; private Long lastOffset = null; private Long baseTimestamp = null; @@ -814,7 +814,7 @@ private void ensureOpenForRecordBatchWrite() { } /** - * Get an estimate of the number of bytes written (based on the estimation factor hard-coded in {@link CompressionType}). + * Get an estimate of the number of bytes written (based on the estimation factor hard-coded in {@link CompressionType}. * @return The estimated number of bytes written */ private int estimatedBytesWritten() { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java index eaa5e6dcb8bbd..b51221f5af642 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java @@ -326,20 +326,6 @@ private static AbstractRequest doParseRequest(ApiKeys apiKey, short apiVersion, return ListClientMetricsResourcesRequest.parse(buffer, apiVersion); case DESCRIBE_TOPIC_PARTITIONS: return DescribeTopicPartitionsRequest.parse(buffer, apiVersion); - case SHARE_GROUP_HEARTBEAT: - return ShareGroupHeartbeatRequest.parse(buffer, apiVersion); - case SHARE_GROUP_DESCRIBE: - return ShareGroupDescribeRequest.parse(buffer, apiVersion); - case SHARE_FETCH: - return ShareFetchRequest.parse(buffer, apiVersion); - case SHARE_ACKNOWLEDGE: - return ShareAcknowledgeRequest.parse(buffer, apiVersion); - case ADD_RAFT_VOTER: - return AddRaftVoterRequest.parse(buffer, apiVersion); - case REMOVE_RAFT_VOTER: - return RemoveRaftVoterRequest.parse(buffer, apiVersion); - case UPDATE_RAFT_VOTER: - return UpdateRaftVoterRequest.parse(buffer, apiVersion); default: throw new AssertionError(String.format("ApiKey %s is not currently handled in `parseRequest`, the " + "code should be updated to do so.", apiKey)); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java index 8210fb2a7f6b3..dbafdbf3bcb07 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java @@ -263,20 +263,6 @@ public static AbstractResponse parseResponse(ApiKeys apiKey, ByteBuffer response return ListClientMetricsResourcesResponse.parse(responseBuffer, version); case DESCRIBE_TOPIC_PARTITIONS: return DescribeTopicPartitionsResponse.parse(responseBuffer, version); - case SHARE_GROUP_HEARTBEAT: - return ShareGroupHeartbeatResponse.parse(responseBuffer, version); - case SHARE_GROUP_DESCRIBE: - return ShareGroupDescribeResponse.parse(responseBuffer, version); - case SHARE_FETCH: - return ShareFetchResponse.parse(responseBuffer, version); - case SHARE_ACKNOWLEDGE: - return ShareAcknowledgeResponse.parse(responseBuffer, version); - case ADD_RAFT_VOTER: - return AddRaftVoterResponse.parse(responseBuffer, version); - case REMOVE_RAFT_VOTER: - return RemoveRaftVoterResponse.parse(responseBuffer, version); - case UPDATE_RAFT_VOTER: - return UpdateRaftVoterResponse.parse(responseBuffer, version); default: throw new AssertionError(String.format("ApiKey %s is not currently handled in `parseResponse`, the " + "code should be updated to do so.", apiKey)); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterRequest.java deleted file mode 100644 index 2d385d861c4ac..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterRequest.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.message.AddRaftVoterRequestData; -import org.apache.kafka.common.message.AddRaftVoterResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; - -import java.nio.ByteBuffer; - -public class AddRaftVoterRequest extends AbstractRequest { - public static class Builder extends AbstractRequest.Builder { - private final AddRaftVoterRequestData data; - - public Builder(AddRaftVoterRequestData data) { - super(ApiKeys.ADD_RAFT_VOTER); - this.data = data; - } - - @Override - public AddRaftVoterRequest build(short version) { - return new AddRaftVoterRequest(data, version); - } - - @Override - public String toString() { - return data.toString(); - } - - } - - private final AddRaftVoterRequestData data; - - public AddRaftVoterRequest(AddRaftVoterRequestData data, short version) { - super(ApiKeys.ADD_RAFT_VOTER, version); - this.data = data; - } - - @Override - public AddRaftVoterRequestData data() { - return data; - } - - @Override - public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { - Errors error = Errors.forException(e); - return new AddRaftVoterResponse(new AddRaftVoterResponseData(). - setErrorCode(error.code()). - setErrorMessage(error.message()). - setThrottleTimeMs(throttleTimeMs)); - } - - public static AddRaftVoterRequest parse(ByteBuffer buffer, short version) { - return new AddRaftVoterRequest( - new AddRaftVoterRequestData(new ByteBufferAccessor(buffer), version), - version); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterResponse.java deleted file mode 100644 index 3b931702d4419..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterResponse.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.message.AddRaftVoterResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; - -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.Map; - -public class AddRaftVoterResponse extends AbstractResponse { - private final AddRaftVoterResponseData data; - - public AddRaftVoterResponse(AddRaftVoterResponseData data) { - super(ApiKeys.ADD_RAFT_VOTER); - this.data = data; - } - - @Override - public AddRaftVoterResponseData data() { - return data; - } - - @Override - public int throttleTimeMs() { - return data.throttleTimeMs(); - } - - @Override - public void maybeSetThrottleTimeMs(int throttleTimeMs) { - // not supported - } - - @Override - public Map errorCounts() { - if (data.errorCode() != Errors.NONE.code()) { - return Collections.singletonMap(Errors.forCode(data.errorCode()), 1); - } else { - return Collections.emptyMap(); - } - } - - public static AddRaftVoterResponse parse(ByteBuffer buffer, short version) { - return new AddRaftVoterResponse( - new AddRaftVoterResponseData(new ByteBufferAccessor(buffer), version)); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionRequest.java index 142210f765d01..0aae47aa24a96 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionRequest.java @@ -83,20 +83,20 @@ public Builder(AlterPartitionRequestData data, boolean canUseTopicIds) { @Override public AlterPartitionRequest build(short version) { if (version < 3) { - data.topics().forEach(topicData -> + data.topics().forEach(topicData -> { topicData.partitions().forEach(partitionData -> { // The newIsrWithEpochs will be empty after build. Then we can skip the conversion if the build // is called again. if (!partitionData.newIsrWithEpochs().isEmpty()) { List newIsr = new ArrayList<>(partitionData.newIsrWithEpochs().size()); - partitionData.newIsrWithEpochs().forEach(brokerState -> - newIsr.add(brokerState.brokerId()) - ); + partitionData.newIsrWithEpochs().forEach(brokerState -> { + newIsr.add(brokerState.brokerId()); + }); partitionData.setNewIsr(newIsr); partitionData.setNewIsrWithEpochs(Collections.emptyList()); } - }) - ); + }); + }); } return new AlterPartitionRequest(data, version); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionResponse.java index 38b8eaf275bbd..9ee92f7b809cd 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionResponse.java @@ -44,9 +44,9 @@ public AlterPartitionResponseData data() { public Map errorCounts() { Map counts = new HashMap<>(); updateErrorCounts(counts, Errors.forCode(data.errorCode())); - data.topics().forEach(topicResponse -> topicResponse.partitions().forEach(partitionResponse -> - updateErrorCounts(counts, Errors.forCode(partitionResponse.errorCode())) - )); + data.topics().forEach(topicResponse -> topicResponse.partitions().forEach(partitionResponse -> { + updateErrorCounts(counts, Errors.forCode(partitionResponse.errorCode())); + })); return counts; } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ControlledShutdownResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ControlledShutdownResponse.java index d3b606eeac63a..bc5aa0ba35a33 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ControlledShutdownResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ControlledShutdownResponse.java @@ -76,11 +76,11 @@ public static ControlledShutdownResponse prepareResponse(Errors error, Set + tps.forEach(tp -> { pSet.add(new RemainingPartition() .setTopicName(tp.topic()) - .setPartitionIndex(tp.partition())) - ); + .setPartitionIndex(tp.partition())); + }); data.setRemainingPartitions(pSet); return new ControlledShutdownResponse(data); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DeleteAclsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DeleteAclsResponse.java index 2cf8dd40a15c9..27e7c581c33bb 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DeleteAclsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DeleteAclsResponse.java @@ -107,7 +107,7 @@ private void validate(short version) { } public static DeleteAclsFilterResult filterResult(AclDeleteResult result) { - ApiError error = result.exception().map(ApiError::fromThrowable).orElse(ApiError.NONE); + ApiError error = result.exception().map(e -> ApiError.fromThrowable(e)).orElse(ApiError.NONE); List matchingAcls = result.aclBindingDeleteResults().stream() .map(DeleteAclsResponse::matchingAcl) .collect(Collectors.toList()); @@ -118,7 +118,7 @@ public static DeleteAclsFilterResult filterResult(AclDeleteResult result) { } private static DeleteAclsMatchingAcl matchingAcl(AclDeleteResult.AclBindingDeleteResult result) { - ApiError error = result.exception().map(ApiError::fromThrowable).orElse(ApiError.NONE); + ApiError error = result.exception().map(e -> ApiError.fromThrowable(e)).orElse(ApiError.NONE); AclBinding acl = result.aclBinding(); return matchingAcl(acl, error); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DeleteTopicsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DeleteTopicsRequest.java index 60b4031725e80..006491f74c067 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DeleteTopicsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DeleteTopicsRequest.java @@ -92,7 +92,7 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { public List topicNames() { if (version() >= 6) - return data.topics().stream().map(DeleteTopicState::name).collect(Collectors.toList()); + return data.topics().stream().map(topic -> topic.name()).collect(Collectors.toList()); return data.topicNames(); } @@ -104,7 +104,7 @@ public int numberOfTopics() { public List topicIds() { if (version() >= 6) - return data.topics().stream().map(DeleteTopicState::topicId).collect(Collectors.toList()); + return data.topics().stream().map(topic -> topic.topicId()).collect(Collectors.toList()); return Collections.emptyList(); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsRequest.java index 1bae21a9e9c91..d612ca80949a0 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsRequest.java @@ -58,12 +58,12 @@ public DescribeConfigsResponse getErrorResponse(int throttleTimeMs, Throwable e) Errors error = Errors.forException(e); return new DescribeConfigsResponse(new DescribeConfigsResponseData() .setThrottleTimeMs(throttleTimeMs) - .setResults(data.resources().stream().map(result -> - new DescribeConfigsResponseData.DescribeConfigsResult().setErrorCode(error.code()) + .setResults(data.resources().stream().map(result -> { + return new DescribeConfigsResponseData.DescribeConfigsResult().setErrorCode(error.code()) .setErrorMessage(error.message()) .setResourceName(result.resourceName()) - .setResourceType(result.resourceType())) - .collect(Collectors.toList()) + .setResourceType(result.resourceType()); + }).collect(Collectors.toList()) )); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java index e0bba392a076b..cbf3054217363 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java @@ -59,9 +59,9 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { public Map errorCounts() { Map errorCounts = new HashMap<>(); errorCounts.put(Errors.forCode(data.errorCode()), 1); - data.results().forEach(result -> - updateErrorCounts(errorCounts, Errors.forCode(result.errorCode())) - ); + data.results().forEach(result -> { + updateErrorCounts(errorCounts, Errors.forCode(result.errorCode())); + }); return errorCounts; } @@ -93,11 +93,13 @@ public LogDirInfo(Errors error, Map replicaInfos) { @Override public String toString() { - return "(error=" + - error + - ", replicas=" + - replicaInfos + - ")"; + StringBuilder builder = new StringBuilder(); + builder.append("(error=") + .append(error) + .append(", replicas=") + .append(replicaInfos) + .append(")"); + return builder.toString(); } } @@ -124,13 +126,15 @@ public ReplicaInfo(long size, long offsetLag, boolean isFuture) { @Override public String toString() { - return "(size=" + - size + - ", offsetLag=" + - offsetLag + - ", isFuture=" + - isFuture + - ")"; + StringBuilder builder = new StringBuilder(); + builder.append("(size=") + .append(size) + .append(", offsetLag=") + .append(offsetLag) + .append(", isFuture=") + .append(isFuture) + .append(")"); + return builder.toString(); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/FetchRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/FetchRequest.java index 1082200ec393c..2065a15d94259 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/FetchRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/FetchRequest.java @@ -316,18 +316,20 @@ public FetchRequest build(short version) { @Override public String toString() { - return "(type=FetchRequest" + - ", replicaId=" + replicaId + - ", maxWait=" + maxWait + - ", minBytes=" + minBytes + - ", maxBytes=" + maxBytes + - ", fetchData=" + toFetch + - ", isolationLevel=" + isolationLevel + - ", removed=" + removed.stream().map(TopicIdPartition::toString).collect(Collectors.joining(", ")) + - ", replaced=" + replaced.stream().map(TopicIdPartition::toString).collect(Collectors.joining(", ")) + - ", metadata=" + metadata + - ", rackId=" + rackId + - ")"; + StringBuilder bld = new StringBuilder(); + bld.append("(type=FetchRequest"). + append(", replicaId=").append(replicaId). + append(", maxWait=").append(maxWait). + append(", minBytes=").append(minBytes). + append(", maxBytes=").append(maxBytes). + append(", fetchData=").append(toFetch). + append(", isolationLevel=").append(isolationLevel). + append(", removed=").append(removed.stream().map(TopicIdPartition::toString).collect(Collectors.joining(", "))). + append(", replaced=").append(replaced.stream().map(TopicIdPartition::toString).collect(Collectors.joining(", "))). + append(", metadata=").append(metadata). + append(", rackId=").append(rackId). + append(")"); + return bld.toString(); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotResponse.java index 2ea0265f6992d..c86ba9cfc6616 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotResponse.java @@ -84,7 +84,7 @@ public static FetchSnapshotResponseData withTopLevelError(Errors error) { /** * Creates a FetchSnapshotResponseData with a single PartitionSnapshot for the topic partition. * - * The partition index will already be populated when calling operator. + * The partition index will already by populated when calling operator. * * @param topicPartition the topic partition to include * @param operator unary operator responsible for populating all of the appropriate fields diff --git a/clients/src/main/java/org/apache/kafka/common/requests/JoinGroupRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/JoinGroupRequest.java index 10041633faeed..af10a6c78aebd 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/JoinGroupRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/JoinGroupRequest.java @@ -86,7 +86,7 @@ public static String maybeTruncateReason(final String reason) { /** * Since JoinGroupRequest version 4, a client that sends a join group request with - * {@link #UNKNOWN_MEMBER_ID} needs to rejoin with a new member id generated + * {@link UNKNOWN_MEMBER_ID} needs to rejoin with a new member id generated * by the server. Once the second join group request is complete, the client is * added as a new member of the group. * diff --git a/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrRequest.java index 9fc83cfd847a8..8caddb0054169 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrRequest.java @@ -112,14 +112,16 @@ private static Map groupByTopic(List errorCounts() { updateErrorCounts(combinedErrorCounts, Errors.forCode(data.errorCode())); // Member level error. - data.members().forEach(memberResponse -> - updateErrorCounts(combinedErrorCounts, Errors.forCode(memberResponse.errorCode())) - ); + data.members().forEach(memberResponse -> { + updateErrorCounts(combinedErrorCounts, Errors.forCode(memberResponse.errorCode())); + }); return combinedErrorCounts; } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsRequest.java index 57aeb9de1bbb5..fc996453d6470 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsRequest.java @@ -181,4 +181,13 @@ public static List toListOffsetsTopics(Map(topics.values()); } + + public static ListOffsetsTopic singletonRequestData(String topic, int partitionIndex, long timestamp, int maxNumOffsets) { + return new ListOffsetsTopic() + .setName(topic) + .setPartitions(Collections.singletonList(new ListOffsetsPartition() + .setPartitionIndex(partitionIndex) + .setTimestamp(timestamp) + .setMaxNumOffsets(maxNumOffsets))); + } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitRequest.java index 88111b1007717..0eba5f29ab4a6 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitRequest.java @@ -101,11 +101,11 @@ public static OffsetCommitResponseData getErrorResponse( .setName(topic.name()); response.topics().add(responseTopic); - topic.partitions().forEach(partition -> + topic.partitions().forEach(partition -> { responseTopic.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) - .setErrorCode(error.code())) - ); + .setErrorCode(error.code())); + }); }); return response; } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitResponse.java index 2b6d00b1a47f6..9848ad380fab0 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitResponse.java @@ -160,11 +160,13 @@ public

Builder addPartitions( Errors error ) { final OffsetCommitResponseTopic topicResponse = getOrCreateTopic(topicName); - partitions.forEach(partition -> + + partitions.forEach(partition -> { topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partitionIndex.apply(partition)) - .setErrorCode(error.code())) - ); + .setErrorCode(error.code())); + }); + return this; } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetDeleteResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetDeleteResponse.java index aa9b4bc4ffe66..e8c21969e9e3b 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetDeleteResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetDeleteResponse.java @@ -83,11 +83,13 @@ public

Builder addPartitions( Errors error ) { final OffsetDeleteResponseTopic topicResponse = getOrCreateTopic(topicName); - partitions.forEach(partition -> + + partitions.forEach(partition -> { topicResponse.partitions().add(new OffsetDeleteResponsePartition() .setPartitionIndex(partitionIndex.apply(partition)) - .setErrorCode(error.code())) - ); + .setErrorCode(error.code())); + }); + return this; } @@ -111,9 +113,9 @@ public Builder merge( // Otherwise, we add the partitions to the existing one. Note we // expect non-overlapping partitions here as we don't verify // if the partition is already in the list before adding it. - newTopic.partitions().forEach(partition -> - existingTopic.partitions().add(partition.duplicate()) - ); + newTopic.partitions().forEach(partition -> { + existingTopic.partitions().add(partition.duplicate()); + }); } }); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchRequest.java index 1b748dca28e61..70473565f63d5 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchRequest.java @@ -237,12 +237,12 @@ public List groups() { group.setTopics(null); } else { // Otherwise, topics are translated to the new structure. - data.topics().forEach(topic -> + data.topics().forEach(topic -> { group.topics().add(new OffsetFetchRequestTopics() .setName(topic.name()) .setPartitionIndexes(topic.partitionIndexes()) - ) - ); + ); + }); } return Collections.singletonList(group); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ProduceRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ProduceRequest.java index 3f2679c95aa72..4724ce4789ccb 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ProduceRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ProduceRequest.java @@ -94,12 +94,14 @@ private ProduceRequest build(short version, boolean validate) { @Override public String toString() { - return "(type=ProduceRequest" + - ", acks=" + data.acks() + - ", timeout=" + data.timeoutMs() + - ", partitionRecords=(" + data.topicData().stream().flatMap(d -> d.partitionData().stream()).collect(Collectors.toList()) + - "), transactionalId='" + (data.transactionalId() != null ? data.transactionalId() : "") + - "'"; + StringBuilder bld = new StringBuilder(); + bld.append("(type=ProduceRequest") + .append(", acks=").append(data.acks()) + .append(", timeout=").append(data.timeoutMs()) + .append(", partitionRecords=(").append(data.topicData().stream().flatMap(d -> d.partitionData().stream()).collect(Collectors.toList())) + .append("), transactionalId='").append(data.transactionalId() != null ? data.transactionalId() : "") + .append("'"); + return bld.toString(); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/RemoveRaftVoterRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/RemoveRaftVoterRequest.java deleted file mode 100644 index cf5f1dc0ce20b..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/RemoveRaftVoterRequest.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.message.RemoveRaftVoterRequestData; -import org.apache.kafka.common.message.RemoveRaftVoterResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; - -import java.nio.ByteBuffer; - -public class RemoveRaftVoterRequest extends AbstractRequest { - public static class Builder extends AbstractRequest.Builder { - private final RemoveRaftVoterRequestData data; - - public Builder(RemoveRaftVoterRequestData data) { - super(ApiKeys.REMOVE_RAFT_VOTER); - this.data = data; - } - - @Override - public RemoveRaftVoterRequest build(short version) { - return new RemoveRaftVoterRequest(data, version); - } - - @Override - public String toString() { - return data.toString(); - } - - } - - private final RemoveRaftVoterRequestData data; - - public RemoveRaftVoterRequest(RemoveRaftVoterRequestData data, short version) { - super(ApiKeys.REMOVE_RAFT_VOTER, version); - this.data = data; - } - - @Override - public RemoveRaftVoterRequestData data() { - return data; - } - - @Override - public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { - Errors error = Errors.forException(e); - return new RemoveRaftVoterResponse(new RemoveRaftVoterResponseData(). - setErrorCode(error.code()). - setErrorMessage(error.message()). - setThrottleTimeMs(throttleTimeMs)); - } - - public static RemoveRaftVoterRequest parse(ByteBuffer buffer, short version) { - return new RemoveRaftVoterRequest( - new RemoveRaftVoterRequestData(new ByteBufferAccessor(buffer), version), - version); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/RemoveRaftVoterResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/RemoveRaftVoterResponse.java deleted file mode 100644 index a74711e2f836f..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/RemoveRaftVoterResponse.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.message.RemoveRaftVoterResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; - -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.Map; - -public class RemoveRaftVoterResponse extends AbstractResponse { - private final RemoveRaftVoterResponseData data; - - public RemoveRaftVoterResponse(RemoveRaftVoterResponseData data) { - super(ApiKeys.REMOVE_RAFT_VOTER); - this.data = data; - } - - @Override - public RemoveRaftVoterResponseData data() { - return data; - } - - @Override - public int throttleTimeMs() { - return data.throttleTimeMs(); - } - - @Override - public void maybeSetThrottleTimeMs(int throttleTimeMs) { - // not supported - } - - @Override - public Map errorCounts() { - if (data.errorCode() != Errors.NONE.code()) { - return Collections.singletonMap(Errors.forCode(data.errorCode()), 1); - } else { - return Collections.emptyMap(); - } - } - - public static RemoveRaftVoterResponse parse(ByteBuffer buffer, short version) { - return new RemoveRaftVoterResponse( - new RemoveRaftVoterResponseData(new ByteBufferAccessor(buffer), version)); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/StopReplicaRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/StopReplicaRequest.java index 6245cb27c6c47..940a16f0a8589 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/StopReplicaRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/StopReplicaRequest.java @@ -94,13 +94,15 @@ public StopReplicaRequest build(short version) { @Override public String toString() { - return "(type=StopReplicaRequest" + - ", controllerId=" + controllerId + - ", controllerEpoch=" + controllerEpoch + - ", brokerEpoch=" + brokerEpoch + - ", deletePartitions=" + deletePartitions + - ", topicStates=" + topicStates.stream().map(StopReplicaTopicState::toString).collect(Collectors.joining(",")) + - ")"; + StringBuilder bld = new StringBuilder(); + bld.append("(type=StopReplicaRequest"). + append(", controllerId=").append(controllerId). + append(", controllerEpoch=").append(controllerEpoch). + append(", brokerEpoch=").append(brokerEpoch). + append(", deletePartitions=").append(deletePartitions). + append(", topicStates=").append(topicStates.stream().map(StopReplicaTopicState::toString).collect(Collectors.joining(","))). + append(")"); + return bld.toString(); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/SyncGroupRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/SyncGroupRequest.java index d2da48fb018ee..8242b71a03fbd 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/SyncGroupRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/SyncGroupRequest.java @@ -77,7 +77,7 @@ public Map groupAssignments() { } /** - * ProtocolType and ProtocolName are mandatory since version 5. This method verifies that + * ProtocolType and ProtocolName are mandatory since version 5. This methods verifies that * they are defined for version 5 or higher, or returns true otherwise for older versions. */ public boolean areMandatoryProtocolTypeAndNamePresent() { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitRequest.java index e79b3bbc7b3be..e70fe11891afe 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitRequest.java @@ -199,11 +199,11 @@ public static TxnOffsetCommitResponseData getErrorResponse( .setName(topic.name()); response.topics().add(responseTopic); - topic.partitions().forEach(partition -> + topic.partitions().forEach(partition -> { responseTopic.partitions().add(new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) - .setErrorCode(error.code())) - ); + .setErrorCode(error.code())); + }); }); return response; } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitResponse.java index ce7dd9e7f1cbb..58f9d5c099c24 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitResponse.java @@ -88,11 +88,11 @@ public

Builder addPartitions( ) { final TxnOffsetCommitResponseTopic topicResponse = getOrCreateTopic(topicName); - partitions.forEach(partition -> + partitions.forEach(partition -> { topicResponse.partitions().add(new TxnOffsetCommitResponsePartition() .setPartitionIndex(partitionIndex.apply(partition)) - .setErrorCode(error.code())) - ); + .setErrorCode(error.code())); + }); return this; } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/UpdateMetadataRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/UpdateMetadataRequest.java index 15a4dfff1a6a0..b846fb7b0f9ed 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/UpdateMetadataRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/UpdateMetadataRequest.java @@ -133,15 +133,17 @@ private static Map groupByTopic(Map { - private final UpdateRaftVoterRequestData data; - - public Builder(UpdateRaftVoterRequestData data) { - super(ApiKeys.UPDATE_RAFT_VOTER); - this.data = data; - } - - @Override - public UpdateRaftVoterRequest build(short version) { - return new UpdateRaftVoterRequest(data, version); - } - - @Override - public String toString() { - return data.toString(); - } - - } - - private final UpdateRaftVoterRequestData data; - - public UpdateRaftVoterRequest(UpdateRaftVoterRequestData data, short version) { - super(ApiKeys.UPDATE_RAFT_VOTER, version); - this.data = data; - } - - @Override - public UpdateRaftVoterRequestData data() { - return data; - } - - @Override - public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { - return new UpdateRaftVoterResponse(new UpdateRaftVoterResponseData(). - setErrorCode(Errors.forException(e).code()). - setThrottleTimeMs(throttleTimeMs)); - } - - public static UpdateRaftVoterRequest parse(ByteBuffer buffer, short version) { - return new UpdateRaftVoterRequest( - new UpdateRaftVoterRequestData(new ByteBufferAccessor(buffer), version), - version); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/UpdateRaftVoterResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/UpdateRaftVoterResponse.java deleted file mode 100644 index 5c89caed2ef94..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/UpdateRaftVoterResponse.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.message.UpdateRaftVoterResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; - -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.Map; - -public class UpdateRaftVoterResponse extends AbstractResponse { - private final UpdateRaftVoterResponseData data; - - public UpdateRaftVoterResponse(UpdateRaftVoterResponseData data) { - super(ApiKeys.UPDATE_RAFT_VOTER); - this.data = data; - } - - @Override - public UpdateRaftVoterResponseData data() { - return data; - } - - @Override - public int throttleTimeMs() { - return data.throttleTimeMs(); - } - - @Override - public void maybeSetThrottleTimeMs(int throttleTimeMs) { - // not supported - } - - @Override - public Map errorCounts() { - if (data.errorCode() != Errors.NONE.code()) { - return Collections.singletonMap(Errors.forCode(data.errorCode()), 1); - } else { - return Collections.emptyMap(); - } - } - - public static UpdateRaftVoterResponse parse(ByteBuffer buffer, short version) { - return new UpdateRaftVoterResponse( - new UpdateRaftVoterResponseData(new ByteBufferAccessor(buffer), version)); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/security/authenticator/LoginManager.java b/clients/src/main/java/org/apache/kafka/common/security/authenticator/LoginManager.java index 49e537c1f3a1f..f84a8ac9b7532 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/authenticator/LoginManager.java +++ b/clients/src/main/java/org/apache/kafka/common/security/authenticator/LoginManager.java @@ -76,7 +76,7 @@ private LoginManager(JaasContext jaasContext, String saslMechanism, Map payload, String claimName) { diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwksVerificationKeyResolver.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwksVerificationKeyResolver.java index a910dfdf02d33..f74629bc5e1ad 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwksVerificationKeyResolver.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwksVerificationKeyResolver.java @@ -136,15 +136,17 @@ public Key resolveKey(JsonWebSignature jws, List nestingContex if (refreshingHttpsJwks.maybeExpediteRefresh(keyId)) log.debug("Refreshing JWKs from {} as no suitable verification key for JWS w/ header {} was found in {}", refreshingHttpsJwks.getLocation(), jws.getHeaders().getFullHeaderAsJsonString(), jwks); - String sb = "Unable to find a suitable verification key for JWS w/ header " + jws.getHeaders().getFullHeaderAsJsonString() + - " from JWKs " + jwks + " obtained from " + - refreshingHttpsJwks.getLocation(); - throw new UnresolvableKeyException(sb); + StringBuilder sb = new StringBuilder(); + sb.append("Unable to find a suitable verification key for JWS w/ header ").append(jws.getHeaders().getFullHeaderAsJsonString()); + sb.append(" from JWKs ").append(jwks).append(" obtained from ").append( + refreshingHttpsJwks.getLocation()); + throw new UnresolvableKeyException(sb.toString()); } catch (JoseException | IOException e) { - String sb = "Unable to find a suitable verification key for JWS w/ header " + jws.getHeaders().getFullHeaderAsJsonString() + - " due to an unexpected exception (" + e + ") while obtaining or using keys from JWKS endpoint at " + - refreshingHttpsJwks.getLocation(); - throw new UnresolvableKeyException(sb, e); + StringBuilder sb = new StringBuilder(); + sb.append("Unable to find a suitable verification key for JWS w/ header ").append(jws.getHeaders().getFullHeaderAsJsonString()); + sb.append(" due to an unexpected exception (").append(e).append(") while obtaining or using keys from JWKS endpoint at ").append( + refreshingHttpsJwks.getLocation()); + throw new UnresolvableKeyException(sb.toString(), e); } } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidatorAccessTokenValidator.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidatorAccessTokenValidator.java index d07663b723e7e..5f51d456efdcb 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidatorAccessTokenValidator.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidatorAccessTokenValidator.java @@ -181,11 +181,13 @@ else if (scopeRaw instanceof Collection) Long issuedAt = ClaimValidationUtils.validateIssuedAt(ReservedClaimNames.ISSUED_AT, issuedAtRaw != null ? issuedAtRaw.getValueInMillis() : null); - return new BasicOAuthBearerToken(accessToken, + OAuthBearerToken token = new BasicOAuthBearerToken(accessToken, scopes, expiration, sub, issuedAt); + + return token; } private T getClaim(ClaimSupplier supplier, String claimName) throws ValidateException { diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandler.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandler.java index 629053e6550c3..4c776159580e5 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandler.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandler.java @@ -188,7 +188,7 @@ private void handleTokenCallback(OAuthBearerTokenCallback callback) { callback.token(null); return; } - if (moduleOptions.keySet().stream().allMatch(name -> name.startsWith(EXTENSION_PREFIX))) { + if (moduleOptions.keySet().stream().noneMatch(name -> !name.startsWith(EXTENSION_PREFIX))) { throw new OAuthBearerConfigException("Extensions provided in login context without a token"); } String principalClaimNameValue = optionValue(PRINCIPAL_CLAIM_NAME_OPTION); diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandler.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandler.java index e9fa9e8bce850..7a81521518cd1 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandler.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandler.java @@ -117,8 +117,9 @@ public void configure(Map configs, String saslMechanism, List unmodifiableModuleOptions = Collections .unmodifiableMap((Map) jaasConfigEntries.get(0).getOptions()); + this.moduleOptions = unmodifiableModuleOptions; configured = true; } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtils.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtils.java index 4dc720ac59771..f12a482f14901 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtils.java @@ -176,7 +176,7 @@ public static OAuthBearerValidationResult validateScope(OAuthBearerToken token, if (!tokenScope.contains(requiredScopeElement)) return OAuthBearerValidationResult.newFailure(String.format( "The provided scope (%s) was missing a required scope (%s). All required scope elements: %s", - tokenScope, requiredScopeElement, requiredScope), + String.valueOf(tokenScope), requiredScopeElement, requiredScope), requiredScope.toString(), null); } return OAuthBearerValidationResult.newSuccess(); diff --git a/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslServer.java b/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslServer.java index 28c2e5568ee40..d5d55a65021c8 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslServer.java +++ b/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslServer.java @@ -70,6 +70,7 @@ enum State { private final ScramFormatter formatter; private final CallbackHandler callbackHandler; private State state; + private String username; private ClientFirstMessage clientFirstMessage; private ServerFirstMessage serverFirstMessage; private ScramExtensions scramExtensions; @@ -107,7 +108,7 @@ public byte[] evaluateResponse(byte[] response) throws SaslException, SaslAuthen String serverNonce = formatter.secureRandomString(); try { String saslName = clientFirstMessage.saslName(); - String username = ScramFormatter.username(saslName); + this.username = ScramFormatter.username(saslName); NameCallback nameCallback = new NameCallback("username", username); ScramCredentialCallback credentialCallback; if (scramExtensions.tokenAuthenticated()) { diff --git a/clients/src/main/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactory.java b/clients/src/main/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactory.java index 3bdf9b70f2a0a..3ca8ca6fcc027 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactory.java +++ b/clients/src/main/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactory.java @@ -109,7 +109,10 @@ public boolean shouldBeRebuilt(Map nextConfigs) { if (truststore != null && truststore.modified()) { return true; } - return keystore != null && keystore.modified(); + if (keystore != null && keystore.modified()) { + return true; + } + return false; } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/security/ssl/SslPrincipalMapper.java b/clients/src/main/java/org/apache/kafka/common/security/ssl/SslPrincipalMapper.java index bd8c50a0a8b7f..0fb83281bf864 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/ssl/SslPrincipalMapper.java +++ b/clients/src/main/java/org/apache/kafka/common/security/ssl/SslPrincipalMapper.java @@ -178,7 +178,7 @@ private String escapeLiteralBackReferences(final String unescaped, final int num final StringBuilder sb = new StringBuilder(value.length() + 1); final int groupStart = backRefMatcher.start(1); - sb.append(value, 0, groupStart - 1); + sb.append(value.substring(0, groupStart - 1)); sb.append("\\"); sb.append(value.substring(groupStart - 1)); value = sb.toString(); diff --git a/clients/src/main/java/org/apache/kafka/common/utils/ByteBufferUnmapper.java b/clients/src/main/java/org/apache/kafka/common/utils/ByteBufferUnmapper.java index 68e182d22784a..4777f7bf96b49 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/ByteBufferUnmapper.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/ByteBufferUnmapper.java @@ -117,8 +117,9 @@ private static MethodHandle unmapJava7Or8(MethodHandles.Lookup lookup) throws Re MethodHandle nonNullTest = lookup.findStatic(ByteBufferUnmapper.class, "nonNull", methodType(boolean.class, Object.class)).asType(methodType(boolean.class, cleanerClass)); MethodHandle noop = dropArguments(constant(Void.class, null).asType(methodType(void.class)), 0, cleanerClass); - return filterReturnValue(directBufferCleanerMethod, guardWithTest(nonNullTest, cleanMethod, noop)) + MethodHandle unmapper = filterReturnValue(directBufferCleanerMethod, guardWithTest(nonNullTest, cleanMethod, noop)) .asType(methodType(void.class, ByteBuffer.class)); + return unmapper; } private static MethodHandle unmapJava9(MethodHandles.Lookup lookup) throws ReflectiveOperationException { diff --git a/clients/src/main/java/org/apache/kafka/common/utils/Java.java b/clients/src/main/java/org/apache/kafka/common/utils/Java.java index 2552db250d55a..2310f422d0e1b 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/Java.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/Java.java @@ -36,7 +36,7 @@ static Version parseVersion(String versionString) { return new Version(majorVersion, minorVersion); } - // Having these as static final provides the best opportunity for compiler optimization + // Having these as static final provides the best opportunity for compilar optimization public static final boolean IS_JAVA9_COMPATIBLE = VERSION.isJava9Compatible(); public static final boolean IS_JAVA11_COMPATIBLE = VERSION.isJava11Compatible(); diff --git a/clients/src/main/java/org/apache/kafka/common/utils/LoggingSignalHandler.java b/clients/src/main/java/org/apache/kafka/common/utils/LoggingSignalHandler.java index 824a1c4ddb0d3..55eb49a2704aa 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/LoggingSignalHandler.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/LoggingSignalHandler.java @@ -37,6 +37,7 @@ public class LoggingSignalHandler { private final Constructor signalConstructor; private final Class signalHandlerClass; + private final Class signalClass; private final Method signalHandleMethod; private final Method signalGetNameMethod; private final Method signalHandlerHandleMethod; @@ -47,7 +48,7 @@ public class LoggingSignalHandler { * @throws ReflectiveOperationException if the underlying API has changed in an incompatible manner. */ public LoggingSignalHandler() throws ReflectiveOperationException { - Class signalClass = Class.forName("sun.misc.Signal"); + signalClass = Class.forName("sun.misc.Signal"); signalConstructor = signalClass.getConstructor(String.class); signalHandlerClass = Class.forName("sun.misc.SignalHandler"); signalHandlerHandleMethod = signalHandlerClass.getMethod("handle", signalClass); diff --git a/clients/src/main/resources/common/message/AddRaftVoterRequest.json b/clients/src/main/resources/common/message/AddRaftVoterRequest.json deleted file mode 100644 index e2f129b13353a..0000000000000 --- a/clients/src/main/resources/common/message/AddRaftVoterRequest.json +++ /dev/null @@ -1,40 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 80, - "type": "request", - "listeners": ["controller", "broker"], - "name": "AddRaftVoterRequest", - "validVersions": "0", - "flexibleVersions": "0+", - "fields": [ - { "name": "ClusterId", "type": "string", "versions": "0+" }, - { "name": "TimeoutMs", "type": "int32", "versions": "0+" }, - { "name": "VoterId", "type": "int32", "versions": "0+", - "about": "The replica id of the voter getting added to the topic partition" }, - { "name": "VoterDirectoryId", "type": "uuid", "versions": "0+", - "about": "The directory id of the voter getting added to the topic partition" }, - { "name": "Listeners", "type": "[]Listener", "versions": "0+", - "about": "The endpoints that can be used to communicate with the voter", "fields": [ - { "name": "Name", "type": "string", "versions": "0+", "mapKey": true, - "about": "The name of the endpoint" }, - { "name": "Host", "type": "string", "versions": "0+", - "about": "The hostname" }, - { "name": "Port", "type": "uint16", "versions": "0+", - "about": "The port" } - ]} - ] -} diff --git a/clients/src/main/resources/common/message/AddRaftVoterResponse.json b/clients/src/main/resources/common/message/AddRaftVoterResponse.json deleted file mode 100644 index 3173f0d4d3a57..0000000000000 --- a/clients/src/main/resources/common/message/AddRaftVoterResponse.json +++ /dev/null @@ -1,30 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 80, - "type": "response", - "name": "AddRaftVoterResponse", - "validVersions": "0", - "flexibleVersions": "0+", - "fields": [ - { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", - "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The error code, or 0 if there was no error" }, - { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "ignorable": true, - "about": "The error message, or null if there was no error." } - ] -} diff --git a/clients/src/main/resources/common/message/RemoveRaftVoterRequest.json b/clients/src/main/resources/common/message/RemoveRaftVoterRequest.json deleted file mode 100644 index 182c8647db629..0000000000000 --- a/clients/src/main/resources/common/message/RemoveRaftVoterRequest.json +++ /dev/null @@ -1,30 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 81, - "type": "request", - "listeners": ["controller", "broker"], - "name": "RemoveRaftVoterRequest", - "validVersions": "0", - "flexibleVersions": "0+", - "fields": [ - { "name": "ClusterId", "type": "string", "versions": "0+" }, - { "name": "VoterId", "type": "int32", "versions": "0+", - "about": "The replica id of the voter getting removed from the topic partition" }, - { "name": "VoterDirectoryId", "type": "uuid", "versions": "0+", - "about": "The directory id of the voter getting removed from the topic partition" } - ] -} diff --git a/clients/src/main/resources/common/message/RemoveRaftVoterResponse.json b/clients/src/main/resources/common/message/RemoveRaftVoterResponse.json deleted file mode 100644 index 5f62059f35047..0000000000000 --- a/clients/src/main/resources/common/message/RemoveRaftVoterResponse.json +++ /dev/null @@ -1,30 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 81, - "type": "response", - "name": "RemoveRaftVoterResponse", - "validVersions": "0", - "flexibleVersions": "0+", - "fields": [ - { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", - "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The error code, or 0 if there was no error" }, - { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "ignorable": true, - "about": "The error message, or null if there was no error." } - ] -} diff --git a/clients/src/main/resources/common/message/UpdateRaftVoterRequest.json b/clients/src/main/resources/common/message/UpdateRaftVoterRequest.json deleted file mode 100644 index 80ee58a43a3d6..0000000000000 --- a/clients/src/main/resources/common/message/UpdateRaftVoterRequest.json +++ /dev/null @@ -1,46 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 82, - "type": "request", - "listeners": ["controller"], - "name": "UpdateRaftVoterRequest", - "validVersions": "0", - "flexibleVersions": "0+", - "fields": [ - { "name": "ClusterId", "type": "string", "versions": "0+" }, - { "name": "VoterId", "type": "int32", "versions": "0+", - "about": "The replica id of the voter getting updated in the topic partition" }, - { "name": "VoterDirectoryId", "type": "uuid", "versions": "0+", - "about": "The directory id of the voter getting updated in the topic partition" }, - { "name": "Listeners", "type": "[]Listener", "versions": "0+", - "about": "The endpoint that can be used to communicate with the leader", "fields": [ - { "name": "Name", "type": "string", "versions": "0+", "mapKey": true, - "about": "The name of the endpoint" }, - { "name": "Host", "type": "string", "versions": "0+", - "about": "The hostname" }, - { "name": "Port", "type": "uint16", "versions": "0+", - "about": "The port" } - ]}, - { "name": "KRaftVersionFeature", "type": "KRaftVersionFeature", "versions": "0+", - "about": "The range of versions of the protocol that the replica supports", "fields": [ - { "name": "MinSupportedVersion", "type": "int16", "versions": "0+", - "about": "The minimum supported KRaft protocol version" }, - { "name": "MaxSupportedVersion", "type": "int16", "versions": "0+", - "about": "The maximum supported KRaft protocol version" } - ]} - ] -} diff --git a/clients/src/main/resources/common/message/UpdateRaftVoterResponse.json b/clients/src/main/resources/common/message/UpdateRaftVoterResponse.json deleted file mode 100644 index 64816406c7426..0000000000000 --- a/clients/src/main/resources/common/message/UpdateRaftVoterResponse.json +++ /dev/null @@ -1,28 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 82, - "type": "response", - "name": "UpdateRaftVoterResponse", - "validVersions": "0", - "flexibleVersions": "0+", - "fields": [ - { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", - "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The error code, or 0 if there was no error" } - ] -} diff --git a/clients/src/test/java/org/apache/kafka/clients/ClientUtilsTest.java b/clients/src/test/java/org/apache/kafka/clients/ClientUtilsTest.java index 9e54df834119f..76937059ce975 100644 --- a/clients/src/test/java/org/apache/kafka/clients/ClientUtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/ClientUtilsTest.java @@ -57,7 +57,7 @@ public void testParseAndValidateAddressesWithReverseLookup() { // With lookup of example.com, either one or two addresses are expected depending on // whether ipv4 and ipv6 are enabled - List validatedAddresses = checkWithLookup(Collections.singletonList("example.com:10000")); + List validatedAddresses = checkWithLookup(asList("example.com:10000")); assertFalse(validatedAddresses.isEmpty(), "Unexpected addresses " + validatedAddresses); List validatedHostNames = validatedAddresses.stream().map(InetSocketAddress::getHostName) .collect(Collectors.toList()); diff --git a/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java b/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java index a6371c6db6086..0b2733207ce42 100644 --- a/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java @@ -1140,8 +1140,8 @@ public void testTopicMetadataOnUpdatePartitionLeadership() { new Metadata.LeaderIdAndEpoch( Optional.of(2), Optional.of(3) - )), - Collections.singletonList(node1) + )), + Arrays.asList(node1) ); assertEquals(2, metadata.fetch().partitionsForTopic(topic).size()); assertEquals(1, metadata.fetch().partition(tp0).leader().id()); @@ -1161,20 +1161,20 @@ public void testUpdatePartitionLeadership() { // topic2 has 1 partition: tp21 String topic1 = "topic1"; TopicPartition tp11 = new TopicPartition(topic1, 0); - PartitionMetadata part1Metadata = new PartitionMetadata(Errors.NONE, tp11, Optional.of(1), Optional.of(100), Arrays.asList(1, 2), Arrays.asList(1, 2), Collections.singletonList(3)); + PartitionMetadata part1Metadata = new PartitionMetadata(Errors.NONE, tp11, Optional.of(1), Optional.of(100), Arrays.asList(1, 2), Arrays.asList(1, 2), Arrays.asList(3)); Uuid topic1Id = Uuid.randomUuid(); TopicPartition tp12 = new TopicPartition(topic1, 1); - PartitionMetadata part12Metadata = new PartitionMetadata(Errors.NONE, tp12, Optional.of(2), Optional.of(200), Arrays.asList(2, 3), Arrays.asList(2, 3), Collections.singletonList(1)); + PartitionMetadata part12Metadata = new PartitionMetadata(Errors.NONE, tp12, Optional.of(2), Optional.of(200), Arrays.asList(2, 3), Arrays.asList(2, 3), Arrays.asList(1)); String topic2 = "topic2"; TopicPartition tp21 = new TopicPartition(topic2, 0); - PartitionMetadata part2Metadata = new PartitionMetadata(Errors.NONE, tp21, Optional.of(2), Optional.of(200), Arrays.asList(2, 3), Arrays.asList(2, 3), Collections.singletonList(1)); + PartitionMetadata part2Metadata = new PartitionMetadata(Errors.NONE, tp21, Optional.of(2), Optional.of(200), Arrays.asList(2, 3), Arrays.asList(2, 3), Arrays.asList(1)); Uuid topic2Id = Uuid.randomUuid(); Set internalTopics = Collections.singleton(Topic.GROUP_METADATA_TOPIC_NAME); TopicPartition internalPart = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0); Uuid internalTopicId = Uuid.randomUuid(); - PartitionMetadata internalTopicMetadata = new PartitionMetadata(Errors.NONE, internalPart, Optional.of(2), Optional.of(200), Arrays.asList(2, 3), Arrays.asList(2, 3), Collections.singletonList(1)); + PartitionMetadata internalTopicMetadata = new PartitionMetadata(Errors.NONE, internalPart, Optional.of(2), Optional.of(200), Arrays.asList(2, 3), Arrays.asList(2, 3), Arrays.asList(1)); Map topicIds = new HashMap<>(); topicIds.put(topic1, topic1Id); @@ -1359,7 +1359,7 @@ public void testConcurrentUpdateAndFetchForSnapshotAndCluster() throws Interrupt } /** - * For testUpdatePartially, validates that updatedMetadata is matching expected part1Metadata, part2Metadata, internalPartMetadata, nodes & more. + * For testUpdatePartially, validates that updatedMetadata is matching expected part1Metadata, part2Metadata, interalPartMetadata, nodes & more. */ void validateForUpdatePartitionLeadership(Metadata updatedMetadata, PartitionMetadata part1Metadata, PartitionMetadata part2Metadata, PartitionMetadata part12Metadata, @@ -1379,7 +1379,7 @@ void validateForUpdatePartitionLeadership(Metadata updatedMetadata, assertEquals(expectedController, updatedCluster.controller()); assertEquals(expectedTopicIds, updatedMetadata.topicIds()); - Map nodeMap = expectedNodes.stream().collect(Collectors.toMap(Node::id, e -> e)); + Map nodeMap = expectedNodes.stream().collect(Collectors.toMap(e -> e.id(), e -> e)); for (PartitionMetadata partitionMetadata: Arrays.asList(part1Metadata, part2Metadata, part12Metadata, internalPartMetadata)) { TopicPartition tp = new TopicPartition(partitionMetadata.topic(), partitionMetadata.partition()); diff --git a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java index 4c22108b2b3b6..cd3ec36f38593 100644 --- a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java @@ -1351,17 +1351,17 @@ public TestMetadataUpdater(List nodes) { @Override public void handleServerDisconnect(long now, String destinationId, Optional maybeAuthException) { - maybeAuthException.ifPresent(exception -> - failure = exception - ); + maybeAuthException.ifPresent(exception -> { + failure = exception; + }); super.handleServerDisconnect(now, destinationId, maybeAuthException); } @Override public void handleFailedRequest(long now, Optional maybeFatalException) { - maybeFatalException.ifPresent(exception -> - failure = exception - ); + maybeFatalException.ifPresent(exception -> { + failure = exception; + }); } public KafkaException getAndClearFailure() { diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/AdminClientTestUtils.java b/clients/src/test/java/org/apache/kafka/clients/admin/AdminClientTestUtils.java index a84927c735f58..8c9244e23d80d 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/AdminClientTestUtils.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/AdminClientTestUtils.java @@ -156,7 +156,7 @@ public static ListConsumerGroupOffsetsResult listConsumerGroupOffsetsResult(Stri public static ListClientMetricsResourcesResult listClientMetricsResourcesResult(String... names) { return new ListClientMetricsResourcesResult( KafkaFuture.completedFuture(Arrays.stream(names) - .map(ClientMetricsResourceListing::new) + .map(name -> new ClientMetricsResourceListing(name)) .collect(Collectors.toList()))); } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/ConfigTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/ConfigTest.java index 7d70d58a71ac2..59d1150ac3ba8 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/ConfigTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/ConfigTest.java @@ -21,7 +21,6 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.util.Collections; import java.util.List; import static java.util.Arrays.asList; @@ -62,7 +61,7 @@ public void shouldGetAllEntries() { public void shouldImplementEqualsProperly() { assertEquals(config, config); assertEquals(config, new Config(config.entries())); - assertNotEquals(new Config(Collections.singletonList(E1)), config); + assertNotEquals(new Config(asList(E1)), config); assertNotEquals(config, "this"); } @@ -70,7 +69,7 @@ public void shouldImplementEqualsProperly() { public void shouldImplementHashCodeProperly() { assertEquals(config.hashCode(), config.hashCode()); assertEquals(config.hashCode(), new Config(config.entries()).hashCode()); - assertNotEquals(new Config(Collections.singletonList(E1)).hashCode(), config.hashCode()); + assertNotEquals(new Config(asList(E1)).hashCode(), config.hashCode()); } @Test diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResultTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResultTest.java index 13119e9f2cf5e..9b5e98a005f06 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResultTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResultTest.java @@ -23,7 +23,6 @@ import org.junit.jupiter.api.Test; import java.util.Arrays; -import java.util.Collections; import java.util.Map; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -65,7 +64,7 @@ public void testUserLevelErrors() throws Exception { int iterations = 4096; dataFuture.complete(new DescribeUserScramCredentialsResponseData().setErrorCode(Errors.NONE.code()).setResults(Arrays.asList( new DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult().setUser(goodUser).setCredentialInfos( - Collections.singletonList(new DescribeUserScramCredentialsResponseData.CredentialInfo().setMechanism(scramSha256.type()).setIterations(iterations))), + Arrays.asList(new DescribeUserScramCredentialsResponseData.CredentialInfo().setMechanism(scramSha256.type()).setIterations(iterations))), new DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult().setUser(unknownUser).setErrorCode(Errors.RESOURCE_NOT_FOUND.code()), new DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult().setUser(failedUser).setErrorCode(Errors.DUPLICATE_RESOURCE.code())))); DescribeUserScramCredentialsResult results = new DescribeUserScramCredentialsResult(dataFuture); @@ -77,7 +76,7 @@ public void testUserLevelErrors() throws Exception { } assertEquals(Arrays.asList(goodUser, failedUser), results.users().get(), "Expected 2 users with credentials"); UserScramCredentialsDescription goodUserDescription = results.description(goodUser).get(); - assertEquals(new UserScramCredentialsDescription(goodUser, Collections.singletonList(new ScramCredentialInfo(scramSha256, iterations))), goodUserDescription); + assertEquals(new UserScramCredentialsDescription(goodUser, Arrays.asList(new ScramCredentialInfo(scramSha256, iterations))), goodUserDescription); try { results.description(failedUser).get(); fail("expected description(failedUser) to fail when there is a user-level error"); @@ -99,15 +98,15 @@ public void testSuccessfulDescription() throws Exception { KafkaFutureImpl dataFuture = new KafkaFutureImpl<>(); ScramMechanism scramSha256 = ScramMechanism.SCRAM_SHA_256; int iterations = 4096; - dataFuture.complete(new DescribeUserScramCredentialsResponseData().setErrorCode(Errors.NONE.code()).setResults(Collections.singletonList( + dataFuture.complete(new DescribeUserScramCredentialsResponseData().setErrorCode(Errors.NONE.code()).setResults(Arrays.asList( new DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult().setUser(goodUser).setCredentialInfos( - Collections.singletonList(new DescribeUserScramCredentialsResponseData.CredentialInfo().setMechanism(scramSha256.type()).setIterations(iterations)))))); + Arrays.asList(new DescribeUserScramCredentialsResponseData.CredentialInfo().setMechanism(scramSha256.type()).setIterations(iterations)))))); DescribeUserScramCredentialsResult results = new DescribeUserScramCredentialsResult(dataFuture); - assertEquals(Collections.singletonList(goodUser), results.users().get(), "Expected 1 user with credentials"); + assertEquals(Arrays.asList(goodUser), results.users().get(), "Expected 1 user with credentials"); Map allResults = results.all().get(); assertEquals(1, allResults.size()); UserScramCredentialsDescription goodUserDescriptionViaAll = allResults.get(goodUser); - assertEquals(new UserScramCredentialsDescription(goodUser, Collections.singletonList(new ScramCredentialInfo(scramSha256, iterations))), goodUserDescriptionViaAll); + assertEquals(new UserScramCredentialsDescription(goodUser, Arrays.asList(new ScramCredentialInfo(scramSha256, iterations))), goodUserDescriptionViaAll); assertEquals(goodUserDescriptionViaAll, results.description(goodUser).get(), "Expected same thing via all() and description()"); try { results.description(unknownUser).get(); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java index bd7c7d0b1ab47..ea1305e533bef 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java @@ -749,7 +749,7 @@ private static DescribeQuorumResponse prepareDescribeQuorumResponse( Boolean partitionIndexError, Boolean emptyOptionals) { String topicName = topicNameError ? "RANDOM" : Topic.CLUSTER_METADATA_TOPIC_NAME; - int partitionIndex = partitionIndexError ? 1 : Topic.CLUSTER_METADATA_TOPIC_PARTITION.partition(); + Integer partitionIndex = partitionIndexError ? 1 : Topic.CLUSTER_METADATA_TOPIC_PARTITION.partition(); List topics = new ArrayList<>(); List partitions = new ArrayList<>(); for (int i = 0; i < (partitionCountError ? 2 : 1); i++) { @@ -1426,7 +1426,7 @@ public void testDescribeTopicsWithDescribeTopicPartitionsApiBasic() { ); DescribeTopicPartitionsResponseData dataFirstPart = new DescribeTopicPartitionsResponseData(); - addPartitionToDescribeTopicPartitionsResponse(dataFirstPart, topicName0, topics.get(topicName0), singletonList(0)); + addPartitionToDescribeTopicPartitionsResponse(dataFirstPart, topicName0, topics.get(topicName0), Arrays.asList(0)); dataFirstPart.setNextCursor(new DescribeTopicPartitionsResponseData.Cursor() .setTopicName(topicName0) .setPartitionIndex(1)); @@ -1435,12 +1435,13 @@ public void testDescribeTopicsWithDescribeTopicPartitionsApiBasic() { if (request.topics().size() != 2) return false; if (!request.topics().get(0).name().equals(topicName0)) return false; if (!request.topics().get(1).name().equals(topicName1)) return false; - return request.cursor() == null; + if (request.cursor() != null) return false; + return true; }, new DescribeTopicPartitionsResponse(dataFirstPart)); DescribeTopicPartitionsResponseData dataSecondPart = new DescribeTopicPartitionsResponseData(); - addPartitionToDescribeTopicPartitionsResponse(dataSecondPart, topicName0, topics.get(topicName0), singletonList(1)); - addPartitionToDescribeTopicPartitionsResponse(dataSecondPart, topicName1, topics.get(topicName1), singletonList(0)); + addPartitionToDescribeTopicPartitionsResponse(dataSecondPart, topicName0, topics.get(topicName0), Arrays.asList(1)); + addPartitionToDescribeTopicPartitionsResponse(dataSecondPart, topicName1, topics.get(topicName1), Arrays.asList(0)); env.kafkaClient().prepareResponse(body -> { DescribeTopicPartitionsRequestData request = (DescribeTopicPartitionsRequestData) body.data(); if (request.topics().size() != 2) return false; @@ -1448,7 +1449,9 @@ public void testDescribeTopicsWithDescribeTopicPartitionsApiBasic() { if (!request.topics().get(1).name().equals(topicName1)) return false; DescribeTopicPartitionsRequestData.Cursor cursor = request.cursor(); - return cursor != null && cursor.topicName() == topicName0 && cursor.partitionIndex() == 1; + if (cursor == null || cursor.topicName() != topicName0 || cursor.partitionIndex() != 1) return false; + + return true; }, new DescribeTopicPartitionsResponse(dataSecondPart)); try { DescribeTopicsResult result = env.adminClient().describeTopics( @@ -1490,8 +1493,8 @@ public void testDescribeTopicsWithDescribeTopicPartitionsApiEdgeCase() { ); DescribeTopicPartitionsResponseData dataFirstPart = new DescribeTopicPartitionsResponseData(); - addPartitionToDescribeTopicPartitionsResponse(dataFirstPart, topicName0, topics.get(topicName0), singletonList(0)); - addPartitionToDescribeTopicPartitionsResponse(dataFirstPart, topicName1, topics.get(topicName1), singletonList(0)); + addPartitionToDescribeTopicPartitionsResponse(dataFirstPart, topicName0, topics.get(topicName0), Arrays.asList(0)); + addPartitionToDescribeTopicPartitionsResponse(dataFirstPart, topicName1, topics.get(topicName1), Arrays.asList(0)); dataFirstPart.setNextCursor(new DescribeTopicPartitionsResponseData.Cursor() .setTopicName(topicName1) .setPartitionIndex(1)); @@ -1501,12 +1504,13 @@ public void testDescribeTopicsWithDescribeTopicPartitionsApiEdgeCase() { if (!request.topics().get(0).name().equals(topicName0)) return false; if (!request.topics().get(1).name().equals(topicName1)) return false; if (!request.topics().get(2).name().equals(topicName2)) return false; - return request.cursor() == null; + if (request.cursor() != null) return false; + return true; }, new DescribeTopicPartitionsResponse(dataFirstPart)); DescribeTopicPartitionsResponseData dataSecondPart = new DescribeTopicPartitionsResponseData(); - addPartitionToDescribeTopicPartitionsResponse(dataSecondPart, topicName1, topics.get(topicName1), singletonList(1)); - addPartitionToDescribeTopicPartitionsResponse(dataSecondPart, topicName2, topics.get(topicName2), singletonList(0)); + addPartitionToDescribeTopicPartitionsResponse(dataSecondPart, topicName1, topics.get(topicName1), Arrays.asList(1)); + addPartitionToDescribeTopicPartitionsResponse(dataSecondPart, topicName2, topics.get(topicName2), Arrays.asList(0)); dataSecondPart.setNextCursor(new DescribeTopicPartitionsResponseData.Cursor() .setTopicName(topicName2) .setPartitionIndex(1)); @@ -1516,17 +1520,19 @@ public void testDescribeTopicsWithDescribeTopicPartitionsApiEdgeCase() { if (!request.topics().get(0).name().equals(topicName1)) return false; if (!request.topics().get(1).name().equals(topicName2)) return false; DescribeTopicPartitionsRequestData.Cursor cursor = request.cursor(); - return cursor != null && cursor.topicName().equals(topicName1) && cursor.partitionIndex() == 1; + if (cursor == null || !cursor.topicName().equals(topicName1) || cursor.partitionIndex() != 1) return false; + return true; }, new DescribeTopicPartitionsResponse(dataSecondPart)); DescribeTopicPartitionsResponseData dataThirdPart = new DescribeTopicPartitionsResponseData(); - addPartitionToDescribeTopicPartitionsResponse(dataThirdPart, topicName2, topics.get(topicName2), singletonList(1)); + addPartitionToDescribeTopicPartitionsResponse(dataThirdPart, topicName2, topics.get(topicName2), Arrays.asList(1)); env.kafkaClient().prepareResponse(body -> { DescribeTopicPartitionsRequestData request = (DescribeTopicPartitionsRequestData) body.data(); if (request.topics().size() != 1) return false; if (!request.topics().get(0).name().equals(topicName2)) return false; DescribeTopicPartitionsRequestData.Cursor cursor = request.cursor(); - return cursor != null && cursor.topicName().equals(topicName2) && cursor.partitionIndex() == 1; + if (cursor == null || !cursor.topicName().equals(topicName2) || cursor.partitionIndex() != 1) return false; + return true; }, new DescribeTopicPartitionsResponse(dataThirdPart)); try { DescribeTopicsResult result = env.adminClient().describeTopics( @@ -1553,17 +1559,17 @@ private void addPartitionToDescribeTopicPartitionsResponse( Uuid topicId, List partitions) { List addingPartitions = new ArrayList<>(); - partitions.forEach(partition -> + partitions.forEach(partition -> { addingPartitions.add(new DescribeTopicPartitionsResponsePartition() - .setIsrNodes(singletonList(0)) + .setIsrNodes(Arrays.asList(0)) .setErrorCode((short) 0) .setLeaderEpoch(0) .setLeaderId(0) - .setEligibleLeaderReplicas(singletonList(1)) - .setLastKnownElr(singletonList(2)) + .setEligibleLeaderReplicas(Arrays.asList(1)) + .setLastKnownElr(Arrays.asList(2)) .setPartitionIndex(partition) - .setReplicaNodes(Arrays.asList(0, 1, 2))) - ); + .setReplicaNodes(Arrays.asList(0, 1, 2))); + }); data.topics().add(new DescribeTopicPartitionsResponseTopic() .setErrorCode((short) 0) .setTopicId(topicId) @@ -1597,15 +1603,15 @@ public void testDescribeTopicsWithDescribeTopicPartitionsApiErrorHandling() { .setTopicId(topics.get(topicName0)) .setName(topicName0) .setIsInternal(false) - .setPartitions(singletonList(new DescribeTopicPartitionsResponsePartition() - .setIsrNodes(singletonList(0)) + .setPartitions(Arrays.asList(new DescribeTopicPartitionsResponsePartition() + .setIsrNodes(Arrays.asList(0)) .setErrorCode((short) 0) .setLeaderEpoch(0) .setLeaderId(0) - .setEligibleLeaderReplicas(singletonList(1)) - .setLastKnownElr(singletonList(2)) + .setEligibleLeaderReplicas(Arrays.asList(1)) + .setLastKnownElr(Arrays.asList(2)) .setPartitionIndex(0) - .setReplicaNodes(asList(0, 1, 2)))) + .setReplicaNodes(Arrays.asList(0, 1, 2)))) ); dataFirstPart.topics().add(new DescribeTopicPartitionsResponseTopic() .setErrorCode((short) 29) @@ -1618,7 +1624,8 @@ public void testDescribeTopicsWithDescribeTopicPartitionsApiErrorHandling() { if (request.topics().size() != 2) return false; if (!request.topics().get(0).name().equals(topicName0)) return false; if (!request.topics().get(1).name().equals(topicName1)) return false; - return request.cursor() == null; + if (request.cursor() != null) return false; + return true; }, new DescribeTopicPartitionsResponse(dataFirstPart)); DescribeTopicsResult result = env.adminClient().describeTopics( Arrays.asList(topicName1, topicName0), new DescribeTopicsOptions() @@ -1653,7 +1660,7 @@ private void callAdminClientApisAndExpectAnAuthenticationError(AdminClientUnitTe Map counts = new HashMap<>(); counts.put("my_topic", NewPartitions.increaseTo(3)); - counts.put("other_topic", NewPartitions.increaseTo(3, asList(singletonList(2), singletonList(3)))); + counts.put("other_topic", NewPartitions.increaseTo(3, asList(asList(2), asList(3)))); e = assertThrows(ExecutionException.class, () -> env.adminClient().createPartitions(counts).all().get()); assertInstanceOf(AuthenticationException.class, e.getCause(), "Expected an authentication error, but got " + Utils.stackTrace(e)); @@ -1683,9 +1690,9 @@ private void callClientQuotasApisAndExpectAnAuthenticationError(AdminClientUnitT "Expected an authentication error, but got " + Utils.stackTrace(e)); ClientQuotaEntity entity = new ClientQuotaEntity(Collections.singletonMap(ClientQuotaEntity.USER, "user")); - ClientQuotaAlteration alteration = new ClientQuotaAlteration(entity, singletonList(new ClientQuotaAlteration.Op("consumer_byte_rate", 1000.0))); + ClientQuotaAlteration alteration = new ClientQuotaAlteration(entity, asList(new ClientQuotaAlteration.Op("consumer_byte_rate", 1000.0))); e = assertThrows(ExecutionException.class, - () -> env.adminClient().alterClientQuotas(singletonList(alteration)).all().get()); + () -> env.adminClient().alterClientQuotas(asList(alteration)).all().get()); assertInstanceOf(AuthenticationException.class, e.getCause(), "Expected an authentication error, but got " + Utils.stackTrace(e)); @@ -1811,9 +1818,9 @@ public void testDeleteAcls() throws Exception { .setThrottleTimeMs(0) .setFilterResults(asList( new DeleteAclsResponseData.DeleteAclsFilterResult() - .setMatchingAcls(singletonList(DeleteAclsResponse.matchingAcl(ACL1, ApiError.NONE))), + .setMatchingAcls(asList(DeleteAclsResponse.matchingAcl(ACL1, ApiError.NONE))), new DeleteAclsResponseData.DeleteAclsFilterResult() - .setMatchingAcls(singletonList(DeleteAclsResponse.matchingAcl(ACL2, ApiError.NONE))))), + .setMatchingAcls(asList(DeleteAclsResponse.matchingAcl(ACL2, ApiError.NONE))))), ApiKeys.DELETE_ACLS.latestVersion())); results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2)); Collection deleted = results.all().get(); @@ -1887,11 +1894,11 @@ public void testDescribeBrokerConfigs() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponseFrom(new DescribeConfigsResponse( - new DescribeConfigsResponseData().setResults(singletonList(new DescribeConfigsResponseData.DescribeConfigsResult() + new DescribeConfigsResponseData().setResults(asList(new DescribeConfigsResponseData.DescribeConfigsResult() .setResourceName(broker0Resource.name()).setResourceType(broker0Resource.type().id()).setErrorCode(Errors.NONE.code()) .setConfigs(emptyList())))), env.cluster().nodeById(0)); env.kafkaClient().prepareResponseFrom(new DescribeConfigsResponse( - new DescribeConfigsResponseData().setResults(singletonList(new DescribeConfigsResponseData.DescribeConfigsResult() + new DescribeConfigsResponseData().setResults(asList(new DescribeConfigsResponseData.DescribeConfigsResult() .setResourceName(broker1Resource.name()).setResourceType(broker1Resource.type().id()).setErrorCode(Errors.NONE.code()) .setConfigs(emptyList())))), env.cluster().nodeById(1)); Map> result = env.adminClient().describeConfigs(asList( @@ -1932,9 +1939,9 @@ public void testDescribeConfigsPartialResponse() { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(new DescribeConfigsResponse( - new DescribeConfigsResponseData().setResults(singletonList(new DescribeConfigsResponseData.DescribeConfigsResult() - .setResourceName(topic.name()).setResourceType(topic.type().id()).setErrorCode(Errors.NONE.code()) - .setConfigs(emptyList()))))); + new DescribeConfigsResponseData().setResults(asList(new DescribeConfigsResponseData.DescribeConfigsResult() + .setResourceName(topic.name()).setResourceType(topic.type().id()).setErrorCode(Errors.NONE.code()) + .setConfigs(emptyList()))))); Map> result = env.adminClient().describeConfigs(asList( topic, topic2)).values(); @@ -1957,9 +1964,9 @@ public void testDescribeConfigsUnrequested() throws Exception { new DescribeConfigsResponseData.DescribeConfigsResult() .setResourceName(unrequested.name()).setResourceType(unrequested.type().id()).setErrorCode(Errors.NONE.code()) .setConfigs(emptyList()))))); - Map> result = env.adminClient().describeConfigs(singletonList( + Map> result = env.adminClient().describeConfigs(asList( topic)).values(); - assertEquals(new HashSet<>(singletonList(topic)), result.keySet()); + assertEquals(new HashSet<>(asList(topic)), result.keySet()); assertNotNull(result.get(topic).get()); assertNull(result.get(unrequested)); } @@ -2324,10 +2331,10 @@ public void testDescribeReplicaLogDirsUnexpected() throws ExecutionException, In prepareDescribeLogDirsResult(unexpected, broker1log1, broker1Log1PartitionSize, broker1Log1OffsetLag, true)))), env.cluster().nodeById(expected.brokerId())); - DescribeReplicaLogDirsResult result = env.adminClient().describeReplicaLogDirs(singletonList(expected)); + DescribeReplicaLogDirsResult result = env.adminClient().describeReplicaLogDirs(asList(expected)); Map> values = result.values(); - assertEquals(TestUtils.toSet(singletonList(expected)), values.keySet()); + assertEquals(TestUtils.toSet(asList(expected)), values.keySet()); assertNotNull(values.get(expected)); assertEquals(broker1log0, values.get(expected).get().getCurrentReplicaLogDir()); @@ -2353,7 +2360,7 @@ public void testCreatePartitions() throws Exception { Map counts = new HashMap<>(); counts.put("my_topic", NewPartitions.increaseTo(3)); - counts.put("other_topic", NewPartitions.increaseTo(3, asList(singletonList(2), singletonList(3)))); + counts.put("other_topic", NewPartitions.increaseTo(3, asList(asList(2), asList(3)))); CreatePartitionsResult results = env.adminClient().createPartitions(counts); Map> values = results.values(); @@ -2999,7 +3006,7 @@ public void testListConsumerGroupsWithTypes() throws Exception { expectListGroupsRequestWithFilters(singleton(ConsumerGroupState.STABLE.toString()), Collections.emptySet()), new ListGroupsResponse(new ListGroupsResponseData() .setErrorCode(Errors.NONE.code()) - .setGroups(singletonList( + .setGroups(Arrays.asList( new ListGroupsResponseData.ListedGroup() .setGroupId("group-1") .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) @@ -3565,7 +3572,7 @@ public void testDescribeNonConsumerGroups() throws Exception { "", "non-consumer", "", - emptyList(), + asList(), Collections.emptySet())); env.kafkaClient().prepareResponse(new DescribeGroupsResponse(data)); @@ -4033,7 +4040,7 @@ public void testBatchedListConsumerGroupOffsetsWithNoOffsetFetchBatching() throw ListConsumerGroupOffsetsResult result = env.adminClient().listConsumerGroupOffsets(groupSpecs); // The request handler attempts both FindCoordinator and OffsetFetch requests. This seems - // ok since we expect this scenario only during upgrades from versions < 3.0.0 where + // ok since since we expect this scenario only during upgrades from versions < 3.0.0 where // some upgraded brokers could handle batched FindCoordinator while non-upgraded coordinators // rejected batched OffsetFetch requests. sendFindCoordinatorResponse(env.kafkaClient(), env.cluster().controller()); @@ -4890,7 +4897,7 @@ public void testRemoveMembersFromGroup() throws Exception { assertNull(noErrorResult.memberResult(memberTwo).get()); // Test the "removeAll" scenario - final List topicPartitions = Stream.of(1, 2, 3).map(partition -> new TopicPartition("my_topic", partition)) + final List topicPartitions = Arrays.asList(1, 2, 3).stream().map(partition -> new TopicPartition("my_topic", partition)) .collect(Collectors.toList()); // construct the DescribeGroupsResponse DescribeGroupsResponseData data = prepareDescribeGroupsResponseData(GROUP_ID, Arrays.asList(instanceOne, instanceTwo), topicPartitions); @@ -5373,7 +5380,7 @@ public void testListOffsets() throws Exception { final Cluster cluster = new Cluster( "mockClusterId", - singletonList(node0), + Arrays.asList(node0), pInfos, Collections.emptySet(), Collections.emptySet(), @@ -5467,7 +5474,7 @@ public void testListOffsetsRetriableErrors() throws Exception { ListOffsetsTopicResponse t2 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp2, Errors.NONE, -1L, 456L, 654); responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(singletonList(t2)); + .setTopics(Arrays.asList(t2)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1); // metadata refresh because of LEADER_NOT_AVAILABLE @@ -5476,7 +5483,7 @@ public void testListOffsetsRetriableErrors() throws Exception { t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 345L, 543); responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(singletonList(t0)); + .setTopics(Arrays.asList(t0)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0); Map partitions = new HashMap<>(); @@ -5526,7 +5533,7 @@ public void testListOffsetsNonRetriableErrors() throws Exception { ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.TOPIC_AUTHORIZATION_FAILED, -1L, -1L, -1); ListOffsetsResponseData responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(singletonList(t0)); + .setTopics(Arrays.asList(t0)); env.kafkaClient().prepareResponse(new ListOffsetsResponse(responseData)); Map partitions = new HashMap<>(); @@ -5596,7 +5603,7 @@ public void testListOffsetsMaxTimestampUnsupportedMultipleOffsetSpec() throws Ex ListOffsetsTopicResponse topicResponse = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 345L, 543); ListOffsetsResponseData responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(singletonList(topicResponse)); + .setTopics(Arrays.asList(topicResponse)); env.kafkaClient().prepareResponseFrom( // ensure that no max timestamp requests are retried request -> request instanceof ListOffsetsRequest && ((ListOffsetsRequest) request).topics().stream() @@ -5764,7 +5771,7 @@ public void testListOffsetsNonMaxTimestampDowngradedImmediately() throws Excepti ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 123L, 321); ListOffsetsResponseData responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(singletonList(t0)); + .setTopics(Arrays.asList(t0)); // listoffsets response from broker 0 env.kafkaClient().prepareResponse( @@ -6089,13 +6096,13 @@ public void testListOffsetsMetadataRetriableErrors() throws Exception { ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 345L, 543); ListOffsetsResponseData responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(singletonList(t0)); + .setTopics(Arrays.asList(t0)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0); // listoffsets response from broker 1 ListOffsetsTopicResponse t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 789L, 987); responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(singletonList(t1)); + .setTopics(Arrays.asList(t1)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1); Map partitions = new HashMap<>(); @@ -6158,13 +6165,13 @@ public void testListOffsetsWithMultiplePartitionsLeaderChange() throws Exception t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 345L, 543); responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(singletonList(t0)); + .setTopics(Arrays.asList(t0)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1); t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -2L, 123L, 456); responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(singletonList(t1)); + .setTopics(Arrays.asList(t1)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node2); Map partitions = new HashMap<>(); @@ -6204,7 +6211,7 @@ public void testListOffsetsWithLeaderChange() throws Exception { ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NOT_LEADER_OR_FOLLOWER, -1L, 345L, 543); ListOffsetsResponseData responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(singletonList(t0)); + .setTopics(Arrays.asList(t0)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0); // updating leader from node0 to node1 and metadata refresh because of NOT_LEADER_OR_FOLLOWER @@ -6218,7 +6225,7 @@ public void testListOffsetsWithLeaderChange() throws Exception { t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -2L, 123L, 456); responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(singletonList(t0)); + .setTopics(Arrays.asList(t0)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1); Map partitions = new HashMap<>(); @@ -6328,7 +6335,7 @@ public void testListOffsetsPartialResponse() throws Exception { ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -2L, 123L, 456); ListOffsetsResponseData data = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(singletonList(t0)); + .setTopics(Arrays.asList(t0)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(data), node0); Map partitions = new HashMap<>(); @@ -6363,7 +6370,7 @@ public void testSuccessfulRetryAfterRequestTimeout() throws Exception { Node node0 = new Node(0, "localhost", 8121); nodes.put(0, node0); Cluster cluster = new Cluster("mockClusterId", nodes.values(), - singletonList(new PartitionInfo("foo", 0, node0, new Node[]{node0}, new Node[]{node0})), + Arrays.asList(new PartitionInfo("foo", 0, node0, new Node[]{node0}, new Node[]{node0})), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), nodes.get(0)); @@ -6418,7 +6425,7 @@ private void testApiTimeout(int requestTimeoutMs, Node node0 = new Node(0, "localhost", 8121); nodes.put(0, node0); Cluster cluster = new Cluster("mockClusterId", nodes.values(), - singletonList(new PartitionInfo("foo", 0, node0, new Node[]{node0}, new Node[]{node0})), + Arrays.asList(new PartitionInfo("foo", 0, node0, new Node[]{node0}, new Node[]{node0})), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), nodes.get(0)); @@ -6467,7 +6474,7 @@ public void testRequestTimeoutExceedingDefaultApiTimeout() throws Exception { Node node0 = new Node(0, "localhost", 8121); nodes.put(0, node0); Cluster cluster = new Cluster("mockClusterId", nodes.values(), - singletonList(new PartitionInfo("foo", 0, node0, new Node[]{node0}, new Node[]{node0})), + Arrays.asList(new PartitionInfo("foo", 0, node0, new Node[]{node0}, new Node[]{node0})), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), nodes.get(0)); @@ -6525,7 +6532,7 @@ public void testDescribeClientQuotas() throws Exception { env.kafkaClient().prepareResponse(DescribeClientQuotasResponse.fromQuotaEntities(responseData, 0)); - ClientQuotaFilter filter = ClientQuotaFilter.contains(singletonList(ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.USER, value))); + ClientQuotaFilter filter = ClientQuotaFilter.contains(asList(ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.USER, value))); DescribeClientQuotasResult result = env.adminClient().describeClientQuotas(filter); Map> resultData = result.entities().get(); @@ -6783,7 +6790,7 @@ public void testAlterUserScramCredentialsUnknownMechanism() { ScramMechanism user2ScramMechanism0 = ScramMechanism.SCRAM_SHA_256; AlterUserScramCredentialsResponseData responseData = new AlterUserScramCredentialsResponseData(); - responseData.setResults(singletonList( + responseData.setResults(Arrays.asList( new AlterUserScramCredentialsResponseData.AlterUserScramCredentialsResult().setUser(user2Name))); env.kafkaClient().prepareResponse(new AlterUserScramCredentialsResponse(responseData)); @@ -6794,7 +6801,7 @@ public void testAlterUserScramCredentialsUnknownMechanism() { new UserScramCredentialUpsertion(user2Name, new ScramCredentialInfo(user2ScramMechanism0, 4096), "password"))); Map> resultData = result.values(); assertEquals(3, resultData.size()); - Stream.of(user0Name, user1Name).forEach(u -> { + Arrays.asList(user0Name, user1Name).stream().forEach(u -> { assertTrue(resultData.containsKey(u)); try { resultData.get(u).get(); @@ -6819,7 +6826,7 @@ public void testAlterUserScramCredentialsUnknownMechanism() { } @Test - public void testAlterUserScramCredentials() { + public void testAlterUserScramCredentials() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); @@ -6831,7 +6838,7 @@ public void testAlterUserScramCredentials() { final String user2Name = "user2"; ScramMechanism user2ScramMechanism0 = ScramMechanism.SCRAM_SHA_512; AlterUserScramCredentialsResponseData responseData = new AlterUserScramCredentialsResponseData(); - responseData.setResults(Stream.of(user0Name, user1Name, user2Name).map(u -> + responseData.setResults(Arrays.asList(user0Name, user1Name, user2Name).stream().map(u -> new AlterUserScramCredentialsResponseData.AlterUserScramCredentialsResult() .setUser(u).setErrorCode(Errors.NONE.code())).collect(Collectors.toList())); @@ -6844,7 +6851,7 @@ public void testAlterUserScramCredentials() { new UserScramCredentialDeletion(user2Name, user2ScramMechanism0))); Map> resultData = result.values(); assertEquals(3, resultData.size()); - Stream.of(user0Name, user1Name, user2Name).forEach(u -> { + Arrays.asList(user0Name, user1Name, user2Name).stream().forEach(u -> { assertTrue(resultData.containsKey(u)); assertFalse(resultData.get(u).isCompletedExceptionally()); }); @@ -7160,7 +7167,7 @@ public void testRetryDescribeTransactionsAfterNotCoordinatorError() throws Excep env.kafkaClient().prepareResponse( request -> request instanceof FindCoordinatorRequest, new FindCoordinatorResponse(new FindCoordinatorResponseData() - .setCoordinators(singletonList(new FindCoordinatorResponseData.Coordinator() + .setCoordinators(Arrays.asList(new FindCoordinatorResponseData.Coordinator() .setKey(transactionalId) .setErrorCode(Errors.NONE.code()) .setNodeId(coordinator1.id()) @@ -7190,7 +7197,7 @@ public void testRetryDescribeTransactionsAfterNotCoordinatorError() throws Excep env.kafkaClient().prepareResponse( request -> request instanceof FindCoordinatorRequest, new FindCoordinatorResponse(new FindCoordinatorResponseData() - .setCoordinators(singletonList(new FindCoordinatorResponseData.Coordinator() + .setCoordinators(Arrays.asList(new FindCoordinatorResponseData.Coordinator() .setKey(transactionalId) .setErrorCode(Errors.NONE.code()) .setNodeId(coordinator2.id()) @@ -7294,14 +7301,14 @@ public void testListTransactions() throws Exception { MetadataResponseData.MetadataResponseBrokerCollection brokers = new MetadataResponseData.MetadataResponseBrokerCollection(); - env.cluster().nodes().forEach(node -> + env.cluster().nodes().forEach(node -> { brokers.add(new MetadataResponseData.MetadataResponseBroker() .setHost(node.host()) .setNodeId(node.id()) .setPort(node.port()) .setRack(node.rack()) - ) - ); + ); + }); env.kafkaClient().prepareResponse( request -> request instanceof MetadataRequest, @@ -7658,7 +7665,7 @@ public void testListClientMetricsResourcesEmpty() throws Exception { } @Test - public void testListClientMetricsResourcesNotSupported() { + public void testListClientMetricsResourcesNotSupported() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().prepareResponse( request -> request instanceof ListClientMetricsResourcesRequest, diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java b/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java index 4f070a3a2eda3..6cbc86cb0dd96 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java @@ -20,6 +20,7 @@ import org.apache.kafka.clients.admin.internals.CoordinatorKey; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.ElectionType; +import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.Metric; import org.apache.kafka.common.MetricName; @@ -102,7 +103,9 @@ public class MockAdminClient extends AdminClient { private Time mockTime; private long blockingTimeMs; - private final Map mockMetrics = new HashMap<>(); + private KafkaException listConsumerGroupOffsetsException; + + private Map mockMetrics = new HashMap<>(); private final List allTokens = new ArrayList<>(); @@ -643,8 +646,8 @@ synchronized public CreateDelegationTokenResult createDelegationToken(CreateDele synchronized public RenewDelegationTokenResult renewDelegationToken(byte[] hmac, RenewDelegationTokenOptions options) { KafkaFutureImpl future = new KafkaFutureImpl<>(); - boolean tokenFound = false; - long expiryTimestamp = options.renewTimePeriodMs(); + Boolean tokenFound = false; + Long expiryTimestamp = options.renewTimePeriodMs(); for (DelegationToken token : allTokens) { if (Arrays.equals(token.hmac(), hmac)) { token.tokenInfo().setExpiryTimestamp(expiryTimestamp); @@ -665,9 +668,9 @@ synchronized public RenewDelegationTokenResult renewDelegationToken(byte[] hmac, synchronized public ExpireDelegationTokenResult expireDelegationToken(byte[] hmac, ExpireDelegationTokenOptions options) { KafkaFutureImpl future = new KafkaFutureImpl<>(); - long expiryTimestamp = options.expiryTimePeriodMs(); + Long expiryTimestamp = options.expiryTimePeriodMs(); List tokensToRemove = new ArrayList<>(); - boolean tokenFound = false; + Boolean tokenFound = false; for (DelegationToken token : allTokens) { if (Arrays.equals(token.hmac(), hmac)) { if (expiryTimestamp == -1 || expiryTimestamp < System.currentTimeMillis()) { @@ -725,9 +728,20 @@ synchronized public ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map< String group = groupSpecs.keySet().iterator().next(); Collection topicPartitions = groupSpecs.get(group).topicPartitions(); final KafkaFutureImpl> future = new KafkaFutureImpl<>(); - future.complete(committedOffsets.entrySet().stream() - .filter(entry -> topicPartitions.isEmpty() || topicPartitions.contains(entry.getKey())) - .collect(Collectors.toMap(Map.Entry::getKey, entry -> new OffsetAndMetadata(entry.getValue())))); + + if (listConsumerGroupOffsetsException != null) { + future.completeExceptionally(listConsumerGroupOffsetsException); + } else { + if (topicPartitions.isEmpty()) { + future.complete(committedOffsets.entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, entry -> new OffsetAndMetadata(entry.getValue())))); + } else { + future.complete(committedOffsets.entrySet().stream() + .filter(entry -> topicPartitions.contains(entry.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, entry -> new OffsetAndMetadata(entry.getValue())))); + } + } + return new ListConsumerGroupOffsetsResult(Collections.singletonMap(CoordinatorKey.byGroupId(group), future)); } @@ -1306,16 +1320,6 @@ public ListClientMetricsResourcesResult listClientMetricsResources(ListClientMet return new ListClientMetricsResourcesResult(future); } - @Override - public AddRaftVoterResult addRaftVoter(int voterId, Uuid voterDirectoryId, Set endpoints, AddRaftVoterOptions options) { - throw new UnsupportedOperationException("Not implemented yet"); - } - - @Override - public RemoveRaftVoterResult removeRaftVoter(int voterId, Uuid voterDirectoryId, RemoveRaftVoterOptions options) { - throw new UnsupportedOperationException("Not implemented yet"); - } - @Override synchronized public void close(Duration timeout) {} @@ -1331,6 +1335,10 @@ public synchronized void updateConsumerGroupOffsets(final Map groupIds = new HashSet<>(Arrays.asList("g1", "g2")); DeleteConsumerGroupsHandler handler = new DeleteConsumerGroupsHandler(lc); AdminApiFuture future = AdminApiFuture.forKeys( - groupIds.stream().map(CoordinatorKey::byGroupId).collect(Collectors.toSet())); + groupIds.stream().map(g -> CoordinatorKey.byGroupId(g)).collect(Collectors.toSet())); AdminApiDriver driver = new AdminApiDriver<>( handler, @@ -660,13 +660,13 @@ public TestContext( new LogContext() ); - staticKeys.forEach((key, brokerId) -> - assertMappedKey(this, key, brokerId) - ); + staticKeys.forEach((key, brokerId) -> { + assertMappedKey(this, key, brokerId); + }); - dynamicKeys.keySet().forEach(key -> - assertUnmappedKey(this, key) - ); + dynamicKeys.keySet().forEach(key -> { + assertUnmappedKey(this, key); + }); } public static TestContext staticMapped(Map staticKeys) { @@ -681,22 +681,22 @@ private void assertLookupResponse( RequestSpec requestSpec, LookupResult result ) { - requestSpec.keys.forEach(key -> - assertUnmappedKey(this, key) - ); + requestSpec.keys.forEach(key -> { + assertUnmappedKey(this, key); + }); // The response is just a placeholder. The result is all we are interested in MetadataResponse response = new MetadataResponse(new MetadataResponseData(), ApiKeys.METADATA.latestVersion()); driver.onResponse(time.milliseconds(), requestSpec, response, Node.noNode()); - result.mappedKeys.forEach((key, brokerId) -> - assertMappedKey(this, key, brokerId) - ); + result.mappedKeys.forEach((key, brokerId) -> { + assertMappedKey(this, key, brokerId); + }); - result.failedKeys.forEach((key, exception) -> - assertFailedKey(this, key, exception) - ); + result.failedKeys.forEach((key, exception) -> { + assertFailedKey(this, key, exception); + }); } private void assertResponse( @@ -707,9 +707,9 @@ private void assertResponse( int brokerId = requestSpec.scope.destinationBrokerId().orElseThrow(() -> new AssertionError("Fulfillment requests must specify a target brokerId")); - requestSpec.keys.forEach(key -> - assertMappedKey(this, key, brokerId) - ); + requestSpec.keys.forEach(key -> { + assertMappedKey(this, key, brokerId); + }); // The response is just a placeholder. The result is all we are interested in MetadataResponse response = new MetadataResponse(new MetadataResponseData(), @@ -717,17 +717,17 @@ private void assertResponse( driver.onResponse(time.milliseconds(), requestSpec, response, node); - result.unmappedKeys.forEach(key -> - assertUnmappedKey(this, key) - ); + result.unmappedKeys.forEach(key -> { + assertUnmappedKey(this, key); + }); - result.failedKeys.forEach((key, exception) -> - assertFailedKey(this, key, exception) - ); + result.failedKeys.forEach((key, exception) -> { + assertFailedKey(this, key, exception); + }); - result.completedKeys.forEach((key, value) -> - assertCompletedKey(this, key, value) - ); + result.completedKeys.forEach((key, value) -> { + assertCompletedKey(this, key, value); + }); } private MockLookupStrategy lookupStrategy() { diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/AllBrokersStrategyTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/AllBrokersStrategyTest.java index 8c30d93038f70..8e4b961394cbe 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/AllBrokersStrategyTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/AllBrokersStrategyTest.java @@ -85,9 +85,9 @@ public void testHandleResponse() { ); assertEquals(expectedMappedKeys, lookupResult.mappedKeys.keySet()); - lookupResult.mappedKeys.forEach((brokerKey, brokerId) -> - assertEquals(OptionalInt.of(brokerId), brokerKey.brokerId) - ); + lookupResult.mappedKeys.forEach((brokerKey, brokerId) -> { + assertEquals(OptionalInt.of(brokerId), brokerKey.brokerId); + }); } @Test diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java index 823d4b39b1e9a..fc52e9e6717ed 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java @@ -90,7 +90,7 @@ public void testBuildOldLookupRequestRequiresAtLeastOneKey() { strategy.disableBatch(); assertThrows(IllegalArgumentException.class, () -> strategy.buildRequest( - new HashSet<>(Collections.singletonList(CoordinatorKey.byTransactionalId("txnid"))))); + new HashSet<>(Arrays.asList(CoordinatorKey.byTransactionalId("txnid"))))); } @Test diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/FenceProducersHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/FenceProducersHandlerTest.java index 9665bd0bdf120..34ed2e6772c2f 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/FenceProducersHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/FenceProducersHandlerTest.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.clients.admin.internals; -import org.apache.kafka.clients.admin.FenceProducersOptions; import org.apache.kafka.clients.admin.internals.AdminApiHandler.ApiResult; import org.apache.kafka.common.Node; import org.apache.kafka.common.message.InitProducerIdResponseData; @@ -40,21 +39,11 @@ public class FenceProducersHandlerTest { private final LogContext logContext = new LogContext(); private final Node node = new Node(1, "host", 1234); - private final int requestTimeoutMs = 30000; - private final FenceProducersOptions options = new FenceProducersOptions(); @Test public void testBuildRequest() { - FenceProducersHandler handler = new FenceProducersHandler(options, logContext, requestTimeoutMs); - mkSet("foo", "bar", "baz").forEach(transactionalId -> assertLookup(handler, transactionalId, requestTimeoutMs)); - } - - @Test - public void testBuildRequestOptionsTimeout() { - final int optionsTimeoutMs = 50000; - options.timeoutMs(optionsTimeoutMs); - FenceProducersHandler handler = new FenceProducersHandler(options, logContext, requestTimeoutMs); - mkSet("foo", "bar", "baz").forEach(transactionalId -> assertLookup(handler, transactionalId, optionsTimeoutMs)); + FenceProducersHandler handler = new FenceProducersHandler(logContext); + mkSet("foo", "bar", "baz").forEach(transactionalId -> assertLookup(handler, transactionalId)); } @Test @@ -62,7 +51,7 @@ public void testHandleSuccessfulResponse() { String transactionalId = "foo"; CoordinatorKey key = CoordinatorKey.byTransactionalId(transactionalId); - FenceProducersHandler handler = new FenceProducersHandler(options, logContext, requestTimeoutMs); + FenceProducersHandler handler = new FenceProducersHandler(logContext); short epoch = 57; long producerId = 7; @@ -84,7 +73,7 @@ public void testHandleSuccessfulResponse() { @Test public void testHandleErrorResponse() { String transactionalId = "foo"; - FenceProducersHandler handler = new FenceProducersHandler(options, logContext, requestTimeoutMs); + FenceProducersHandler handler = new FenceProducersHandler(logContext); assertFatalError(handler, transactionalId, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED); assertFatalError(handler, transactionalId, Errors.CLUSTER_AUTHORIZATION_FAILED); assertFatalError(handler, transactionalId, Errors.UNKNOWN_SERVER_ERROR); @@ -147,10 +136,10 @@ private ApiResult handleResponseError( return result; } - private void assertLookup(FenceProducersHandler handler, String transactionalId, int txnTimeoutMs) { + private void assertLookup(FenceProducersHandler handler, String transactionalId) { CoordinatorKey key = CoordinatorKey.byTransactionalId(transactionalId); InitProducerIdRequest.Builder request = handler.buildSingleRequest(1, key); assertEquals(transactionalId, request.data.transactionalId()); - assertEquals(txnTimeoutMs, request.data.transactionTimeoutMs()); + assertEquals(1, request.data.transactionTimeoutMs()); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java index d34d09cd8a926..850ac2bd8f9fb 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java @@ -495,7 +495,7 @@ public void testSubscriptionOnEmptyPattern(GroupProtocol groupProtocol) { } @ParameterizedTest - @EnumSource(value = GroupProtocol.class, names = "CLASSIC") + @EnumSource(GroupProtocol.class) public void testSubscriptionWithEmptyPartitionAssignment(GroupProtocol groupProtocol) { Properties props = new Properties(); props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name()); @@ -3227,7 +3227,7 @@ public void testUnusedConfigs(GroupProtocol groupProtocol) { } @ParameterizedTest - @EnumSource(value = GroupProtocol.class, names = "CLASSIC") + @EnumSource(GroupProtocol.class) public void testAssignorNameConflict(GroupProtocol groupProtocol) { Map configs = new HashMap<>(); configs.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name()); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/MockShareConsumerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/MockShareConsumerTest.java deleted file mode 100644 index 79ba87d459711..0000000000000 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/MockShareConsumerTest.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.consumer; - -import org.apache.kafka.common.header.internals.RecordHeaders; -import org.apache.kafka.common.record.TimestampType; -import org.junit.jupiter.api.Test; - -import java.time.Duration; -import java.util.Collections; -import java.util.Iterator; -import java.util.Optional; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; - -public class MockShareConsumerTest { - private final MockShareConsumer consumer = new MockShareConsumer<>(); - - @Test - public void testSimpleMock() { - consumer.subscribe(Collections.singleton("test")); - assertEquals(0, consumer.poll(Duration.ZERO).count()); - ConsumerRecord rec1 = new ConsumerRecord<>("test", 0, 0, 0L, TimestampType.CREATE_TIME, - 0, 0, "key1", "value1", new RecordHeaders(), Optional.empty()); - ConsumerRecord rec2 = new ConsumerRecord<>("test", 0, 1, 0L, TimestampType.CREATE_TIME, - 0, 0, "key2", "value2", new RecordHeaders(), Optional.empty()); - consumer.addRecord(rec1); - consumer.addRecord(rec2); - ConsumerRecords recs = consumer.poll(Duration.ofMillis(1)); - Iterator> iter = recs.iterator(); - assertEquals(rec1, iter.next()); - assertEquals(rec2, iter.next()); - assertFalse(iter.hasNext()); - } -} \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java index 7984aaf6d98f3..fc6d2c85ba561 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java @@ -299,9 +299,9 @@ public void testWakeupFromEnsureCoordinatorReady() { coordinator.ensureCoordinatorReadyAsync(); // But should wakeup in sync variation even if timer is 0. - assertThrows(WakeupException.class, () -> - coordinator.ensureCoordinatorReady(mockTime.timer(0)) - ); + assertThrows(WakeupException.class, () -> { + coordinator.ensureCoordinatorReady(mockTime.timer(0)); + }); } @Test diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java index 5f59794797815..66ee724a0e515 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java @@ -19,6 +19,7 @@ import org.apache.kafka.clients.Metadata.LeaderAndEpoch; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; +import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; @@ -29,6 +30,7 @@ import org.apache.kafka.clients.consumer.OffsetCommitCallback; import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.clients.consumer.RetriableCommitFailedException; +import org.apache.kafka.clients.consumer.RoundRobinAssignor; import org.apache.kafka.clients.consumer.internals.events.ApplicationEvent; import org.apache.kafka.clients.consumer.internals.events.ApplicationEventHandler; import org.apache.kafka.clients.consumer.internals.events.AssignmentChangeEvent; @@ -54,7 +56,6 @@ import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Metric; import org.apache.kafka.common.Node; -import org.apache.kafka.common.Cluster; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.FencedInstanceIdException; import org.apache.kafka.common.errors.GroupAuthorizationException; @@ -104,7 +105,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -import java.util.regex.Pattern; import java.util.stream.Stream; import static java.util.Arrays.asList; @@ -141,7 +141,6 @@ import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.mockito.Mockito.clearInvocations; @SuppressWarnings("unchecked") public class AsyncKafkaConsumerTest { @@ -206,6 +205,7 @@ private AsyncKafkaConsumer newConsumer( ConsumerInterceptors interceptors, ConsumerRebalanceListenerInvoker rebalanceListenerInvoker, SubscriptionState subscriptions, + List assignors, String groupId, String clientId) { long retryBackoffMs = 100L; @@ -228,6 +228,7 @@ private AsyncKafkaConsumer newConsumer( metadata, retryBackoffMs, defaultApiTimeoutMs, + assignors, groupId, autoCommitEnabled); } @@ -479,44 +480,6 @@ public void onPartitionsAssigned(final Collection partitions) { assertTrue(callbackExecuted.get()); } - @Test - public void testSubscriptionRegexEvalOnPollOnlyIfMetadataChanges() { - SubscriptionState subscriptions = mock(SubscriptionState.class); - Cluster cluster = mock(Cluster.class); - - consumer = newConsumer( - mock(FetchBuffer.class), - mock(ConsumerInterceptors.class), - mock(ConsumerRebalanceListenerInvoker.class), - subscriptions, - "group-id", - "client-id"); - - final String topicName = "foo"; - final int partition = 3; - final TopicPartition tp = new TopicPartition(topicName, partition); - doReturn(Fetch.empty()).when(fetchCollector).collectFetch(any(FetchBuffer.class)); - Map offsets = Collections.singletonMap(tp, new OffsetAndMetadata(1)); - completeFetchedCommittedOffsetApplicationEventSuccessfully(offsets); - doReturn(LeaderAndEpoch.noLeaderOrEpoch()).when(metadata).currentLeader(any()); - doReturn(cluster).when(metadata).fetch(); - doReturn(Collections.singleton(topicName)).when(cluster).topics(); - - consumer.subscribe(Pattern.compile("f*")); - verify(metadata).requestUpdateForNewTopics(); - verify(subscriptions).matchesSubscribedPattern(topicName); - clearInvocations(subscriptions); - - when(subscriptions.hasPatternSubscription()).thenReturn(true); - consumer.poll(Duration.ZERO); - verify(subscriptions, never()).matchesSubscribedPattern(topicName); - - when(metadata.updateVersion()).thenReturn(2); - when(subscriptions.hasPatternSubscription()).thenReturn(true); - consumer.poll(Duration.ZERO); - verify(subscriptions).matchesSubscribedPattern(topicName); - } - @Test public void testClearWakeupTriggerAfterPoll() { consumer = newConsumer(); @@ -601,6 +564,7 @@ public void testCommitAsyncLeaderEpochUpdate() { new ConsumerInterceptors<>(Collections.emptyList()), mock(ConsumerRebalanceListenerInvoker.class), subscriptions, + singletonList(new RoundRobinAssignor()), "group-id", "client-id"); completeCommitSyncApplicationEventSuccessfully(); @@ -820,6 +784,7 @@ public void testPartitionRevocationOnClose() { mock(ConsumerInterceptors.class), mock(ConsumerRebalanceListenerInvoker.class), subscriptions, + singletonList(new RoundRobinAssignor()), "group-id", "client-id"); @@ -841,6 +806,7 @@ public void testFailedPartitionRevocationOnClose() { new ConsumerInterceptors<>(Collections.emptyList()), mock(ConsumerRebalanceListenerInvoker.class), subscriptions, + singletonList(new RoundRobinAssignor()), "group-id", "client-id"); subscriptions.subscribe(singleton("topic"), Optional.of(listener)); @@ -858,7 +824,9 @@ public void testCompleteQuietly() { AtomicReference exception = new AtomicReference<>(); CompletableFuture future = CompletableFuture.completedFuture(null); consumer = newConsumer(); - assertDoesNotThrow(() -> consumer.completeQuietly(() -> future.get(0, TimeUnit.MILLISECONDS), "test", exception)); + assertDoesNotThrow(() -> consumer.completeQuietly(() -> { + future.get(0, TimeUnit.MILLISECONDS); + }, "test", exception)); assertNull(exception.get()); assertDoesNotThrow(() -> consumer.completeQuietly(() -> { @@ -876,6 +844,7 @@ public void testAutoCommitSyncEnabled() { mock(ConsumerInterceptors.class), mock(ConsumerRebalanceListenerInvoker.class), subscriptions, + singletonList(new RoundRobinAssignor()), "group-id", "client-id"); consumer.subscribe(singleton("topic"), mock(ConsumerRebalanceListener.class)); @@ -893,6 +862,7 @@ public void testAutoCommitSyncDisabled() { mock(ConsumerInterceptors.class), mock(ConsumerRebalanceListenerInvoker.class), subscriptions, + singletonList(new RoundRobinAssignor()), "group-id", "client-id"); consumer.subscribe(singleton("topic"), mock(ConsumerRebalanceListener.class)); @@ -1654,18 +1624,6 @@ public void testGroupRemoteAssignorUsedInConsumerProtocol() { assertFalse(config.unused().contains(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG)); } - @Test - public void testPartitionAssignmentStrategyUnusedInAsyncConsumer() { - final Properties props = requiredConsumerConfig(); - props.put(ConsumerConfig.GROUP_ID_CONFIG, "consumerGroup1"); - props.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)); - props.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, "CooperativeStickyAssignor"); - final ConsumerConfig config = new ConsumerConfig(props); - consumer = newConsumer(config); - - assertTrue(config.unused().contains(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG)); - } - @Test public void testGroupIdNull() { final Properties props = requiredConsumerConfig(); @@ -1708,6 +1666,7 @@ public void testEnsurePollEventSentOnConsumerPoll() { new ConsumerInterceptors<>(Collections.emptyList()), mock(ConsumerRebalanceListenerInvoker.class), subscriptions, + singletonList(new RoundRobinAssignor()), "group-id", "client-id"); final TopicPartition tp = new TopicPartition("topic", 0); @@ -1805,9 +1764,9 @@ public void testLongPollWaitIsLimited() { // Mock the subscription being assigned as the first fetch is collected consumer.subscriptions().assignFromSubscribed(Collections.singleton(tp)); return Fetch.empty(); - }).doAnswer(invocation -> - Fetch.forPartition(tp, records, true) - ).when(fetchCollector).collectFetch(any(FetchBuffer.class)); + }).doAnswer(invocation -> { + return Fetch.forPartition(tp, records, true); + }).when(fetchCollector).collectFetch(any(FetchBuffer.class)); // And then poll for up to 10000ms, which should return 2 records without timing out ConsumerRecords returnedRecords = consumer.poll(Duration.ofMillis(10000)); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index e165aa9092a60..1daab57807e70 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -52,6 +52,7 @@ import org.mockito.Mockito; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -237,11 +238,11 @@ public void testPollEnsureCorrectInflightRequestBufferSize() { offsets2.put(new TopicPartition("test", 4), new OffsetAndMetadata(20L)); // Add the requests to the CommitRequestManager and store their futures - long deadlineMs = time.milliseconds() + defaultApiTimeoutMs; - commitManager.commitSync(offsets1, deadlineMs); - commitManager.fetchOffsets(Collections.singleton(new TopicPartition("test", 0)), deadlineMs); - commitManager.commitSync(offsets2, deadlineMs); - commitManager.fetchOffsets(Collections.singleton(new TopicPartition("test", 1)), deadlineMs); + long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; + commitManager.commitSync(offsets1, expirationTimeMs); + commitManager.fetchOffsets(Collections.singleton(new TopicPartition("test", 0)), expirationTimeMs); + commitManager.commitSync(offsets2, expirationTimeMs); + commitManager.fetchOffsets(Collections.singleton(new TopicPartition("test", 1)), expirationTimeMs); // Poll the CommitRequestManager and verify that the inflightOffsetFetches size is correct NetworkClientDelegate.PollResult result = commitManager.poll(time.milliseconds()); @@ -331,8 +332,8 @@ public void testCommitSyncRetriedAfterExpectedRetriableException(Errors error) { Map offsets = Collections.singletonMap( new TopicPartition("topic", 1), new OffsetAndMetadata(0)); - long deadlineMs = time.milliseconds() + defaultApiTimeoutMs; - CompletableFuture commitResult = commitRequestManager.commitSync(offsets, deadlineMs); + long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; + CompletableFuture commitResult = commitRequestManager.commitSync(offsets, expirationTimeMs); sendAndVerifyOffsetCommitRequestFailedAndMaybeRetried(commitRequestManager, error, commitResult); // We expect that request should have been retried on this sync commit. @@ -351,8 +352,8 @@ public void testCommitSyncFailsWithExpectedException(Errors commitError, new OffsetAndMetadata(0)); // Send sync offset commit that fails and verify it propagates the expected exception. - long deadlineMs = time.milliseconds() + retryBackoffMs; - CompletableFuture commitResult = commitRequestManager.commitSync(offsets, deadlineMs); + long expirationTimeMs = time.milliseconds() + retryBackoffMs; + CompletableFuture commitResult = commitRequestManager.commitSync(offsets, expirationTimeMs); completeOffsetCommitRequestWithError(commitRequestManager, commitError); assertFutureThrows(commitResult, expectedException); } @@ -376,8 +377,8 @@ public void testCommitSyncFailsWithCommitFailedExceptionIfUnknownMemberId() { Map offsets = Collections.singletonMap( new TopicPartition("topic", 1), new OffsetAndMetadata(0)); - long deadlineMs = time.milliseconds() + defaultApiTimeoutMs; - CompletableFuture commitResult = commitRequestManager.commitSync(offsets, deadlineMs); + long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; + CompletableFuture commitResult = commitRequestManager.commitSync(offsets, expirationTimeMs); completeOffsetCommitRequestWithError(commitRequestManager, Errors.UNKNOWN_MEMBER_ID); NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); @@ -638,7 +639,7 @@ public void testOffsetFetchRequestEnsureDuplicatedRequestSucceed() { @ParameterizedTest @MethodSource("offsetFetchExceptionSupplier") - public void testOffsetFetchRequestErroredRequests(final Errors error) { + public void testOffsetFetchRequestErroredRequests(final Errors error, final boolean isRetriable) { CommitRequestManager commitRequestManager = create(true, 100); when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); @@ -650,7 +651,7 @@ public void testOffsetFetchRequestErroredRequests(final Errors error) { 1, error); // we only want to make sure to purge the outbound buffer for non-retriables, so retriable will be re-queued. - if (isRetriableOnOffsetFetch(error)) + if (isRetriable) testRetriable(commitRequestManager, futures); else { testNonRetriable(futures); @@ -658,49 +659,15 @@ public void testOffsetFetchRequestErroredRequests(final Errors error) { } } - @ParameterizedTest - @MethodSource("offsetFetchExceptionSupplier") - public void testOffsetFetchRequestTimeoutRequests(final Errors error) { - CommitRequestManager commitRequestManager = create(true, 100); - when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); - - Set partitions = new HashSet<>(); - partitions.add(new TopicPartition("t1", 0)); - List>> futures = sendAndVerifyDuplicatedOffsetFetchRequests( - commitRequestManager, - partitions, - 1, - error); - - if (isRetriableOnOffsetFetch(error)) { - futures.forEach(f -> assertFalse(f.isDone())); - - // Insert a long enough sleep to force a timeout of the operation. Invoke poll() again so that each - // OffsetFetchRequestState is evaluated via isExpired(). - time.sleep(defaultApiTimeoutMs); - assertFalse(commitRequestManager.pendingRequests.unsentOffsetFetches.isEmpty()); - commitRequestManager.poll(time.milliseconds()); - futures.forEach(f -> assertFutureThrows(f, TimeoutException.class)); - assertTrue(commitRequestManager.pendingRequests.unsentOffsetFetches.isEmpty()); - } else { - futures.forEach(f -> assertFutureThrows(f, KafkaException.class)); - assertEmptyPendingRequests(commitRequestManager); - } - } - - private boolean isRetriableOnOffsetFetch(Errors error) { - return error == Errors.NOT_COORDINATOR || error == Errors.COORDINATOR_LOAD_IN_PROGRESS || error == Errors.COORDINATOR_NOT_AVAILABLE; - } - @Test public void testSuccessfulOffsetFetch() { CommitRequestManager commitManager = create(false, 100); when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); - long deadlineMs = time.milliseconds() + defaultApiTimeoutMs; + long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; CompletableFuture> fetchResult = commitManager.fetchOffsets(Collections.singleton(new TopicPartition("test", 0)), - deadlineMs); + expirationTimeMs); // Send fetch request NetworkClientDelegate.PollResult result = commitManager.poll(time.milliseconds()); @@ -745,8 +712,8 @@ public void testOffsetFetchMarksCoordinatorUnknownOnRetriableCoordinatorErrors(E Set partitions = new HashSet<>(); partitions.add(new TopicPartition("t1", 0)); - long deadlineMs = time.milliseconds() + defaultApiTimeoutMs; - CompletableFuture> result = commitRequestManager.fetchOffsets(partitions, deadlineMs); + long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; + CompletableFuture> result = commitRequestManager.fetchOffsets(partitions, expirationTimeMs); completeOffsetFetchRequestWithError(commitRequestManager, partitions, error); @@ -772,8 +739,8 @@ public void testOffsetFetchMarksCoordinatorUnknownOnCoordinatorDisconnectedAndRe Set partitions = new HashSet<>(); partitions.add(new TopicPartition("t1", 0)); - long deadlineMs = time.milliseconds() + defaultApiTimeoutMs; - CompletableFuture> result = commitRequestManager.fetchOffsets(partitions, deadlineMs); + long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; + CompletableFuture> result = commitRequestManager.fetchOffsets(partitions, expirationTimeMs); NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); assertEquals(1, res.unsentRequests.size()); @@ -826,8 +793,8 @@ public void testOffsetCommitSyncTimeoutNotReturnedOnPollAndFails() { new OffsetAndMetadata(0)); // Send sync offset commit request that fails with retriable error. - long deadlineMs = time.milliseconds() + retryBackoffMs * 2; - CompletableFuture commitResult = commitRequestManager.commitSync(offsets, deadlineMs); + long expirationTimeMs = time.milliseconds() + retryBackoffMs * 2; + CompletableFuture commitResult = commitRequestManager.commitSync(offsets, expirationTimeMs); completeOffsetCommitRequestWithError(commitRequestManager, Errors.REQUEST_TIMED_OUT); // Request retried after backoff, and fails with retriable again. Should not complete yet @@ -848,9 +815,8 @@ public void testOffsetCommitSyncTimeoutNotReturnedOnPollAndFails() { * Sync commit requests that fail with an expected retriable error should be retried * while there is time. When time expires, they should fail with a TimeoutException. */ - @ParameterizedTest - @MethodSource("offsetCommitExceptionSupplier") - public void testOffsetCommitSyncFailedWithRetriableThrowsTimeoutWhenRetryTimeExpires(final Errors error) { + @Test + public void testOffsetCommitSyncFailedWithRetriableThrowsTimeoutWhenRetryTimeExpires() { CommitRequestManager commitRequestManager = create(false, 100); when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); @@ -859,21 +825,17 @@ public void testOffsetCommitSyncFailedWithRetriableThrowsTimeoutWhenRetryTimeExp new OffsetAndMetadata(0)); // Send offset commit request that fails with retriable error. - long deadlineMs = time.milliseconds() + retryBackoffMs * 2; - CompletableFuture commitResult = commitRequestManager.commitSync(offsets, deadlineMs); - completeOffsetCommitRequestWithError(commitRequestManager, error); + long expirationTimeMs = time.milliseconds() + retryBackoffMs * 2; + CompletableFuture commitResult = commitRequestManager.commitSync(offsets, expirationTimeMs); + completeOffsetCommitRequestWithError(commitRequestManager, Errors.COORDINATOR_NOT_AVAILABLE); // Sleep to expire the request timeout. Request should fail on the next poll with a // TimeoutException. - time.sleep(deadlineMs); + time.sleep(expirationTimeMs); NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); assertEquals(0, res.unsentRequests.size()); assertTrue(commitResult.isDone()); - - if (error.exception() instanceof RetriableException) - assertFutureThrows(commitResult, TimeoutException.class); - else - assertFutureThrows(commitResult, KafkaException.class); + assertFutureThrows(commitResult, TimeoutException.class); } /** @@ -912,8 +874,8 @@ public void testEnsureBackoffRetryOnOffsetCommitRequestTimeout() { Map offsets = Collections.singletonMap(new TopicPartition("topic", 1), new OffsetAndMetadata(0)); - long deadlineMs = time.milliseconds() + defaultApiTimeoutMs; - commitRequestManager.commitSync(offsets, deadlineMs); + long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; + commitRequestManager.commitSync(offsets, expirationTimeMs); NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); assertEquals(1, res.unsentRequests.size()); res.unsentRequests.get(0).handler().onFailure(time.milliseconds(), new TimeoutException()); @@ -994,8 +956,8 @@ public void testSyncOffsetFetchFailsWithStaleEpochAndRetriesWithNewEpoch() { when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); // Send request that is expected to fail with invalid epoch. - long deadlineMs = time.milliseconds() + defaultApiTimeoutMs; - commitRequestManager.fetchOffsets(partitions, deadlineMs); + long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; + commitRequestManager.fetchOffsets(partitions, expirationTimeMs); // Mock member has new a valid epoch. int newEpoch = 8; @@ -1033,9 +995,9 @@ public void testSyncOffsetFetchFailsWithStaleEpochAndNotRetriedIfMemberNotInGrou when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); // Send request that is expected to fail with invalid epoch. - long deadlineMs = time.milliseconds() + defaultApiTimeoutMs; + long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; CompletableFuture> requestResult = - commitRequestManager.fetchOffsets(partitions, deadlineMs); + commitRequestManager.fetchOffsets(partitions, expirationTimeMs); // Mock member not having a valid epoch anymore (left/failed/fenced). commitRequestManager.onMemberEpochUpdated(Optional.empty(), Optional.empty()); @@ -1066,10 +1028,10 @@ public void testAutoCommitSyncBeforeRevocationRetriesOnRetriableAndStaleEpoch(Er TopicPartition tp = new TopicPartition("topic", 1); subscriptionState.assignFromUser(singleton(tp)); subscriptionState.seek(tp, 5); - long deadlineMs = time.milliseconds() + retryBackoffMs * 2; + long expirationTimeMs = time.milliseconds() + retryBackoffMs * 2; // Send commit request expected to be retried on STALE_MEMBER_EPOCH error while it does not expire - commitRequestManager.maybeAutoCommitSyncBeforeRevocation(deadlineMs); + commitRequestManager.maybeAutoCommitSyncBeforeRevocation(expirationTimeMs); int newEpoch = 8; String memberId = "member1"; @@ -1177,7 +1139,7 @@ private void testNonRetriable(final List offsetCommitExceptionSupplier() { return Stream.of( @@ -1196,27 +1158,25 @@ private static Stream offsetCommitExceptionSupplier() { Arguments.of(Errors.UNKNOWN_MEMBER_ID)); } - /** - * @return {@link Errors} that could be received in {@link ApiKeys#OFFSET_FETCH} responses. - */ + // Supplies (error, isRetriable) private static Stream offsetFetchExceptionSupplier() { + // fetchCommit is only retrying on a subset of RetriableErrors return Stream.of( - Arguments.of(Errors.NOT_COORDINATOR), - Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS), - Arguments.of(Errors.UNKNOWN_SERVER_ERROR), - Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED), - Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE), - Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE), - Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION), - Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE), - Arguments.of(Errors.REQUEST_TIMED_OUT), - Arguments.of(Errors.FENCED_INSTANCE_ID), - Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED), - Arguments.of(Errors.UNKNOWN_MEMBER_ID), + Arguments.of(Errors.NOT_COORDINATOR, true), + Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS, true), + Arguments.of(Errors.UNKNOWN_SERVER_ERROR, false), + Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED, false), + Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE, false), + Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE, false), + Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION, false), + Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE, true), + Arguments.of(Errors.REQUEST_TIMED_OUT, false), + Arguments.of(Errors.FENCED_INSTANCE_ID, false), + Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED, false), + Arguments.of(Errors.UNKNOWN_MEMBER_ID, false), // Adding STALE_MEMBER_EPOCH as non-retriable here because it is only retried if a new // member epoch is received. Tested separately. - Arguments.of(Errors.STALE_MEMBER_EPOCH), - Arguments.of(Errors.UNSTABLE_OFFSET_COMMIT)); + Arguments.of(Errors.STALE_MEMBER_EPOCH, false)); } /** @@ -1240,9 +1200,9 @@ public void testOffsetFetchRequestPartitionDataError(final Errors error, final b TopicPartition tp2 = new TopicPartition("t2", 3); partitions.add(tp1); partitions.add(tp2); - long deadlineMs = time.milliseconds() + defaultApiTimeoutMs; + long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; CompletableFuture> future = - commitRequestManager.fetchOffsets(partitions, deadlineMs); + commitRequestManager.fetchOffsets(partitions, expirationTimeMs); NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); assertEquals(1, res.unsentRequests.size()); @@ -1300,9 +1260,9 @@ private List>> sendAndV int numRequest, final Errors error) { List>> futures = new ArrayList<>(); - long deadlineMs = time.milliseconds() + defaultApiTimeoutMs; + long expirationTimeMs = time.milliseconds() + defaultApiTimeoutMs; for (int i = 0; i < numRequest; i++) { - futures.add(commitRequestManager.fetchOffsets(partitions, deadlineMs)); + futures.add(commitRequestManager.fetchOffsets(partitions, expirationTimeMs)); } NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); @@ -1421,7 +1381,7 @@ public ClientResponse mockOffsetCommitResponse(String topic, long receivedTimeMs, Errors error) { OffsetCommitResponseData responseData = new OffsetCommitResponseData() - .setTopics(Collections.singletonList( + .setTopics(Arrays.asList( new OffsetCommitResponseData.OffsetCommitResponseTopic() .setName(topic) .setPartitions(Collections.singletonList( @@ -1447,7 +1407,7 @@ public ClientResponse mockOffsetCommitResponseDisconnected(String topic, int par short apiKeyVersion, NetworkClientDelegate.UnsentRequest unsentRequest) { OffsetCommitResponseData responseData = new OffsetCommitResponseData() - .setTopics(Collections.singletonList( + .setTopics(Arrays.asList( new OffsetCommitResponseData.OffsetCommitResponseTopic() .setName(topic) .setPartitions(Collections.singletonList( diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java index d2bf9b49cb9d3..954ed1c11e09b 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java @@ -162,13 +162,13 @@ public abstract class ConsumerCoordinatorTest { private final String consumerId2 = "consumer2"; private MockClient client; - private final MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(1, new HashMap() { + private MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(1, new HashMap() { { put(topic1, 1); put(topic2, 1); } }); - private final Node node = metadataResponse.brokers().iterator().next(); + private Node node = metadataResponse.brokers().iterator().next(); private SubscriptionState subscriptions; private ConsumerMetadata metadata; private Metrics metrics; @@ -311,7 +311,8 @@ public void testPerformAssignmentShouldUpdateGroupSubscriptionAfterAssignmentIfN List> capturedTopics = topicsCaptor.getAllValues(); // expected the final group subscribed topics to be updated to "topic1" - assertEquals(Collections.singleton(topic1), capturedTopics.get(0)); + Set expectedTopicsGotCalled = new HashSet<>(Arrays.asList(topic1)); + assertEquals(expectedTopicsGotCalled, capturedTopics.get(0)); } Mockito.clearInvocations(mockSubscriptionState); @@ -388,8 +389,8 @@ public void testPerformAssignmentShouldValidateCooperativeAssignment() { // simulate the custom cooperative assignor didn't revoke the partition first before assign to other consumer Map> assignment = new HashMap<>(); - assignment.put(consumerId, singletonList(t1p)); - assignment.put(consumerId2, singletonList(t2p)); + assignment.put(consumerId, Arrays.asList(t1p)); + assignment.put(consumerId2, Arrays.asList(t2p)); partitionAssignor.prepare(assignment); try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, false, mockSubscriptionState)) { @@ -449,8 +450,8 @@ public String name() { // simulate the cooperative sticky assignor do the assignment with out-of-date ownedPartition Map> assignment = new HashMap<>(); - assignment.put(consumerId, singletonList(t1p)); - assignment.put(consumerId2, singletonList(t2p)); + assignment.put(consumerId, Arrays.asList(t1p)); + assignment.put(consumerId2, Arrays.asList(t2p)); mockCooperativeStickyAssignor.prepare(assignment); try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignorsWithCooperativeStickyAssignor, false, mockSubscriptionState)) { @@ -978,7 +979,7 @@ public void testNormalJoinGroupLeader() { final String consumerId = "leader"; final Set subscription = singleton(topic1); final List owned = Collections.emptyList(); - final List assigned = singletonList(t1p); + final List assigned = Arrays.asList(t1p); subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener)); @@ -1015,9 +1016,9 @@ public void testOutdatedCoordinatorAssignment() { final String consumerId = "outdated_assignment"; final List owned = Collections.emptyList(); final List oldSubscription = singletonList(topic2); - final List oldAssignment = singletonList(t2p); + final List oldAssignment = Arrays.asList(t2p); final List newSubscription = singletonList(topic1); - final List newAssignment = singletonList(t1p); + final List newAssignment = Arrays.asList(t1p); subscriptions.subscribe(toSet(oldSubscription), Optional.of(rebalanceListener)); @@ -2050,7 +2051,7 @@ public void testUpdateMetadataDuringRebalance() { // prepare initial rebalance Map> memberSubscriptions = singletonMap(consumerId, topics); - partitionAssignor.prepare(singletonMap(consumerId, singletonList(tp1))); + partitionAssignor.prepare(singletonMap(consumerId, Arrays.asList(tp1))); client.prepareResponse(joinGroupLeaderResponse(1, consumerId, memberSubscriptions, Errors.NONE)); client.prepareResponse(body -> { @@ -2253,7 +2254,7 @@ private void testInternalTopicInclusion(boolean includeInternalTopics) { public void testRejoinGroup() { String otherTopic = "otherTopic"; final List owned = Collections.emptyList(); - final List assigned = singletonList(t1p); + final List assigned = Arrays.asList(t1p); subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener)); @@ -2285,7 +2286,7 @@ public void testRejoinGroup() { public void testDisconnectInJoin() { subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener)); final List owned = Collections.emptyList(); - final List assigned = singletonList(t1p); + final List assigned = Arrays.asList(t1p); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptorsTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptorsTest.java index 27404877ec014..4331e72054177 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptorsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptorsTest.java @@ -54,7 +54,7 @@ public class ConsumerInterceptorsTest { * Test consumer interceptor that filters records in onConsume() intercept */ private class FilterConsumerInterceptor implements ConsumerInterceptor { - private final int filterPartition; + private int filterPartition; private boolean throwExceptionOnConsume = false; private boolean throwExceptionOnCommit = false; diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 160825a308808..8c3f97dd64379 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -53,6 +53,7 @@ import org.junit.jupiter.params.provider.ValueSource; import java.time.Duration; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -63,6 +64,7 @@ import java.util.concurrent.CompletableFuture; import static org.apache.kafka.clients.consumer.internals.ConsumerTestBuilder.DEFAULT_HEARTBEAT_INTERVAL_MS; +import static org.apache.kafka.clients.consumer.internals.ConsumerTestBuilder.DEFAULT_REQUEST_TIMEOUT_MS; import static org.apache.kafka.clients.consumer.internals.ConsumerTestBuilder.createDefaultGroupInformation; import static org.apache.kafka.clients.consumer.internals.events.CompletableEvent.calculateDeadlineMs; import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; @@ -242,6 +244,28 @@ void testFetchTopicMetadata() { verify(applicationEventProcessor).process(any(TopicMetadataEvent.class)); } + @Test + void testPollResultTimer() { + NetworkClientDelegate.UnsentRequest req = new NetworkClientDelegate.UnsentRequest( + new FindCoordinatorRequest.Builder( + new FindCoordinatorRequestData() + .setKeyType(FindCoordinatorRequest.CoordinatorType.TRANSACTION.id()) + .setKey("foobar")), + Optional.empty()); + req.setTimer(time, DEFAULT_REQUEST_TIMEOUT_MS); + + // purposely setting a non-MAX time to ensure it is returning Long.MAX_VALUE upon success + NetworkClientDelegate.PollResult success = new NetworkClientDelegate.PollResult( + 10, + Collections.singletonList(req)); + assertEquals(10, networkClient.addAll(success)); + + NetworkClientDelegate.PollResult failure = new NetworkClientDelegate.PollResult( + 10, + new ArrayList<>()); + assertEquals(10, networkClient.addAll(failure)); + } + @Test void testMaximumTimeToWait() { // Initial value before runOnce has been called @@ -315,27 +339,6 @@ void testRunOnceInvokesReaper() { verify(applicationEventReaper).reap(any(Long.class)); } - @Test - void testSendUnsentRequest() { - String groupId = "group-id"; - NetworkClientDelegate.UnsentRequest request = new NetworkClientDelegate.UnsentRequest( - new FindCoordinatorRequest.Builder( - new FindCoordinatorRequestData() - .setKeyType(FindCoordinatorRequest.CoordinatorType.TRANSACTION.id()) - .setKey(groupId)), - Optional.empty()); - - networkClient.add(request); - assertTrue(networkClient.hasAnyPendingRequests()); - assertFalse(networkClient.unsentRequests().isEmpty()); - assertFalse(client.hasInFlightRequests()); - consumerNetworkThread.cleanup(); - - assertTrue(networkClient.unsentRequests().isEmpty()); - assertFalse(client.hasInFlightRequests()); - assertFalse(networkClient.hasAnyPendingRequests()); - } - private void prepareOffsetCommitRequest(final Map expectedOffsets, final Errors error, final boolean disconnected) { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerTestBuilder.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerTestBuilder.java index 86cb44d748562..9f6fd4a764b0a 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerTestBuilder.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerTestBuilder.java @@ -162,9 +162,7 @@ public ConsumerTestBuilder(Optional groupInfo, boolean enableA this.networkClientDelegate = spy(new NetworkClientDelegate(time, config, logContext, - client, - metadata, - backgroundEventHandler)); + client)); this.offsetsRequestManager = spy(new OffsetsRequestManager(subscriptions, metadata, fetchConfig.isolationLevel, @@ -176,11 +174,12 @@ public ConsumerTestBuilder(Optional groupInfo, boolean enableA backgroundEventHandler, logContext)); - this.topicMetadataRequestManager = spy(new TopicMetadataRequestManager(logContext, time, config)); + this.topicMetadataRequestManager = spy(new TopicMetadataRequestManager(logContext, config)); if (groupInfo.isPresent()) { GroupInformation gi = groupInfo.get(); CoordinatorRequestManager coordinator = spy(new CoordinatorRequestManager( + time, logContext, DEFAULT_RETRY_BACKOFF_MS, DEFAULT_RETRY_BACKOFF_MAX_MS, diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManagerTest.java index 01ba51134d23f..d4496522c07b8 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManagerTest.java @@ -189,6 +189,7 @@ private void expectFindCoordinatorRequest( private CoordinatorRequestManager setupCoordinatorManager(String groupId) { return new CoordinatorRequestManager( + time, new LogContext(), RETRY_BACKOFF_MS, RETRY_BACKOFF_MS, diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java index d65d6d5f2fc43..eaabcb8f814d6 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java @@ -28,7 +28,6 @@ import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.OffsetOutOfRangeException; import org.apache.kafka.clients.consumer.OffsetResetStrategy; -import org.apache.kafka.clients.consumer.internals.events.BackgroundEventHandler; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.KafkaException; @@ -144,7 +143,6 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -185,7 +183,6 @@ public class FetchRequestManagerTest { private MockTime time = new MockTime(1); private SubscriptionState subscriptions; private ConsumerMetadata metadata; - private BackgroundEventHandler backgroundEventHandler; private FetchMetricsRegistry metricsRegistry; private FetchMetricsManager metricsManager; private MockClient client; @@ -3620,7 +3617,6 @@ private void buildDependencies(MetricConfig metricConfig, metrics = new Metrics(metricConfig, time); metricsRegistry = new FetchMetricsRegistry(metricConfig.tags().keySet(), "consumer" + groupId); metricsManager = new FetchMetricsManager(metrics, metricsRegistry); - backgroundEventHandler = mock(BackgroundEventHandler.class); Properties properties = new Properties(); properties.put(KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); @@ -3628,7 +3624,7 @@ private void buildDependencies(MetricConfig metricConfig, properties.setProperty(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(requestTimeoutMs)); properties.setProperty(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG, String.valueOf(retryBackoffMs)); ConsumerConfig config = new ConsumerConfig(properties); - networkClientDelegate = spy(new TestableNetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler)); + networkClientDelegate = spy(new TestableNetworkClientDelegate(time, config, logContext, client)); } private List collectRecordOffsets(List> records) { @@ -3676,10 +3672,8 @@ private class TestableNetworkClientDelegate extends NetworkClientDelegate { public TestableNetworkClientDelegate(Time time, ConsumerConfig config, LogContext logContext, - KafkaClient client, - Metadata metadata, - BackgroundEventHandler backgroundEventHandler) { - super(time, config, logContext, client, metadata, backgroundEventHandler); + KafkaClient client) { + super(time, config, logContext, client); } @Override diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java index 720e6456644c8..70f33bfdf451e 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java @@ -17,14 +17,9 @@ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.ClientResponse; -import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.MockClient; import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.clients.consumer.internals.events.BackgroundEvent; -import org.apache.kafka.clients.consumer.internals.events.BackgroundEventHandler; -import org.apache.kafka.clients.consumer.internals.events.ErrorEvent; import org.apache.kafka.common.Node; -import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.DisconnectException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.message.FindCoordinatorRequestData; @@ -38,9 +33,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.util.ArrayList; import java.util.Collections; -import java.util.LinkedList; import java.util.Objects; import java.util.Optional; import java.util.Properties; @@ -50,55 +43,24 @@ import static org.apache.kafka.clients.consumer.ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class NetworkClientDelegateTest { private static final int REQUEST_TIMEOUT_MS = 5000; private static final String GROUP_ID = "group"; - private static final long DEFAULT_REQUEST_TIMEOUT_MS = 500; private MockTime time; private MockClient client; - private Metadata metadata; - private BackgroundEventHandler backgroundEventHandler; @BeforeEach public void setup() { this.time = new MockTime(0); - this.metadata = mock(Metadata.class); - this.backgroundEventHandler = mock(BackgroundEventHandler.class); this.client = new MockClient(time, Collections.singletonList(mockNode())); } - @Test - void testPollResultTimer() throws Exception { - try (NetworkClientDelegate ncd = newNetworkClientDelegate()) { - NetworkClientDelegate.UnsentRequest req = new NetworkClientDelegate.UnsentRequest( - new FindCoordinatorRequest.Builder( - new FindCoordinatorRequestData() - .setKeyType(FindCoordinatorRequest.CoordinatorType.TRANSACTION.id()) - .setKey("foobar")), - Optional.empty()); - req.setTimer(time, DEFAULT_REQUEST_TIMEOUT_MS); - - // purposely setting a non-MAX time to ensure it is returning Long.MAX_VALUE upon success - NetworkClientDelegate.PollResult success = new NetworkClientDelegate.PollResult( - 10, - Collections.singletonList(req)); - assertEquals(10, ncd.addAll(success)); - - NetworkClientDelegate.PollResult failure = new NetworkClientDelegate.PollResult( - 10, - new ArrayList<>()); - assertEquals(10, ncd.addAll(failure)); - } - } - @Test public void testSuccessfulResponse() throws Exception { try (NetworkClientDelegate ncd = newNetworkClientDelegate()) { @@ -178,53 +140,6 @@ public void testEnsureTimerSetOnAdd() { assertEquals(REQUEST_TIMEOUT_MS, ncd.unsentRequests().poll().timer().timeoutMs()); } - @Test - public void testHasAnyPendingRequests() throws Exception { - try (NetworkClientDelegate networkClientDelegate = newNetworkClientDelegate()) { - NetworkClientDelegate.UnsentRequest unsentRequest = newUnsentFindCoordinatorRequest(); - networkClientDelegate.add(unsentRequest); - - // unsent - assertTrue(networkClientDelegate.hasAnyPendingRequests()); - assertFalse(networkClientDelegate.unsentRequests().isEmpty()); - assertFalse(client.hasInFlightRequests()); - - networkClientDelegate.poll(0, time.milliseconds()); - - // in-flight - assertTrue(networkClientDelegate.hasAnyPendingRequests()); - assertTrue(networkClientDelegate.unsentRequests().isEmpty()); - assertTrue(client.hasInFlightRequests()); - - client.respond(FindCoordinatorResponse.prepareResponse(Errors.NONE, GROUP_ID, mockNode())); - networkClientDelegate.poll(0, time.milliseconds()); - - // get response - assertFalse(networkClientDelegate.hasAnyPendingRequests()); - assertTrue(networkClientDelegate.unsentRequests().isEmpty()); - assertFalse(client.hasInFlightRequests()); - } - } - - @Test - public void testPropagateMetadataError() { - AuthenticationException authException = new AuthenticationException("Test Auth Exception"); - doThrow(authException).when(metadata).maybeThrowAnyException(); - - LinkedList backgroundEventQueue = new LinkedList<>(); - this.backgroundEventHandler = new BackgroundEventHandler(backgroundEventQueue); - NetworkClientDelegate networkClientDelegate = newNetworkClientDelegate(); - - assertEquals(0, backgroundEventQueue.size()); - networkClientDelegate.poll(0, time.milliseconds()); - assertEquals(1, backgroundEventQueue.size()); - - BackgroundEvent event = backgroundEventQueue.poll(); - assertNotNull(event); - assertEquals(BackgroundEvent.Type.ERROR, event.type()); - assertEquals(authException, ((ErrorEvent) event).error()); - } - public NetworkClientDelegate newNetworkClientDelegate() { LogContext logContext = new LogContext(); Properties properties = new Properties(); @@ -232,12 +147,7 @@ public NetworkClientDelegate newNetworkClientDelegate() { properties.put(VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); properties.put(GROUP_ID_CONFIG, GROUP_ID); properties.put(REQUEST_TIMEOUT_MS_CONFIG, REQUEST_TIMEOUT_MS); - return new NetworkClientDelegate(this.time, - new ConsumerConfig(properties), - logContext, - this.client, - this.metadata, - this.backgroundEventHandler); + return new NetworkClientDelegate(this.time, new ConsumerConfig(properties), logContext, this.client); } public NetworkClientDelegate.UnsentRequest newUnsentFindCoordinatorRequest() { @@ -260,4 +170,4 @@ public void prepareFindCoordinatorResponse(Errors error) { private Node mockNode() { return new Node(0, "localhost", 99); } -} \ No newline at end of file +} diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TimedRequestStateTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TimedRequestStateTest.java deleted file mode 100644 index ddde3ae84d4b1..0000000000000 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TimedRequestStateTest.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.consumer.internals; - -import org.apache.kafka.common.utils.LogContext; -import org.apache.kafka.common.utils.MockTime; -import org.apache.kafka.common.utils.Time; -import org.apache.kafka.common.utils.Timer; -import org.junit.jupiter.api.Test; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; - -public class TimedRequestStateTest { - - private final static long DEFAULT_TIMEOUT_MS = 30000; - private final Time time = new MockTime(); - - @Test - public void testIsExpired() { - TimedRequestState state = new TimedRequestState( - new LogContext(), - this.getClass().getSimpleName(), - 100, - 1000, - time.timer(DEFAULT_TIMEOUT_MS) - ); - assertFalse(state.isExpired()); - time.sleep(DEFAULT_TIMEOUT_MS); - assertTrue(state.isExpired()); - } - - @Test - public void testRemainingMs() { - TimedRequestState state = new TimedRequestState( - new LogContext(), - this.getClass().getSimpleName(), - 100, - 1000, - time.timer(DEFAULT_TIMEOUT_MS) - ); - assertEquals(DEFAULT_TIMEOUT_MS, state.remainingMs()); - time.sleep(DEFAULT_TIMEOUT_MS); - assertEquals(0, state.remainingMs()); - } - - @Test - public void testDeadlineTimer() { - long deadlineMs = time.milliseconds() + DEFAULT_TIMEOUT_MS; - Timer timer = TimedRequestState.deadlineTimer(time, deadlineMs); - assertEquals(DEFAULT_TIMEOUT_MS, timer.remainingMs()); - timer.sleep(DEFAULT_TIMEOUT_MS); - assertEquals(0, timer.remainingMs()); - } - - @Test - public void testAllowOverdueDeadlineTimer() { - long deadlineMs = time.milliseconds() - DEFAULT_TIMEOUT_MS; - Timer timer = TimedRequestState.deadlineTimer(time, deadlineMs); - assertEquals(0, timer.remainingMs()); - } - - @Test - public void testToStringUpdatesTimer() { - TimedRequestState state = new TimedRequestState( - new LogContext(), - this.getClass().getSimpleName(), - 100, - 1000, - time.timer(DEFAULT_TIMEOUT_MS) - ); - - assertToString(state, DEFAULT_TIMEOUT_MS); - time.sleep(DEFAULT_TIMEOUT_MS); - assertToString(state, 0); - } - - private void assertToString(TimedRequestState state, long timerMs) { - assertTrue(state.toString().contains("remainingMs=" + timerMs + "}")); - } -} diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManagerTest.java index 56eff5b4f4b58..3f2b2c3d983d2 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManagerTest.java @@ -74,7 +74,6 @@ public void setup() { props.put(VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); this.topicMetadataRequestManager = spy(new TopicMetadataRequestManager( new LogContext(), - time, new ConsumerConfig(props))); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java index 6a4b9faf479d6..451743ae2ad83 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java @@ -35,6 +35,7 @@ import java.util.Collections; import java.util.List; import java.util.Optional; +import java.util.concurrent.BlockingQueue; import java.util.concurrent.CompletableFuture; import static org.apache.kafka.clients.consumer.internals.events.CompletableEvent.calculateDeadlineMs; @@ -46,6 +47,7 @@ public class ApplicationEventProcessorTest { private final Time time = new MockTime(1); + private final BlockingQueue applicationEventQueue = mock(BlockingQueue.class); private final ConsumerMetadata metadata = mock(ConsumerMetadata.class); private ApplicationEventProcessor processor; private CommitRequestManager commitRequestManager; @@ -53,6 +55,7 @@ public class ApplicationEventProcessorTest { private MembershipManager membershipManager; @BeforeEach + @SuppressWarnings("unchecked") public void setup() { LogContext logContext = new LogContext(); OffsetsRequestManager offsetsRequestManager = mock(OffsetsRequestManager.class); diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java index 81a7804b97ea6..7d4aa5e3a85d6 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java @@ -615,9 +615,10 @@ public void testInterceptorConstructorConfigurationWithExceptionShouldCloseRemai MockProducerInterceptor.setThrowOnConfigExceptionThreshold(targetInterceptor); - assertThrows(KafkaException.class, () -> - new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()) - ); + assertThrows(KafkaException.class, () -> { + new KafkaProducer<>( + props, new StringSerializer(), new StringSerializer()); + }); assertEquals(3, MockProducerInterceptor.CONFIG_COUNT.get()); assertEquals(3, MockProducerInterceptor.CLOSE_COUNT.get()); @@ -1206,8 +1207,9 @@ public void testInitTransactionsResponseAfterTimeout() throws Exception { ExecutorService executor = Executors.newFixedThreadPool(1); - try (Producer producer = kafkaProducer(configs, new StringSerializer(), - new StringSerializer(), metadata, client, null, time)) { + Producer producer = kafkaProducer(configs, new StringSerializer(), + new StringSerializer(), metadata, client, null, time); + try { client.prepareResponse( request -> request instanceof FindCoordinatorRequest && ((FindCoordinatorRequest) request).data().keyType() == FindCoordinatorRequest.CoordinatorType.TRANSACTION.id(), @@ -1224,6 +1226,8 @@ public void testInitTransactionsResponseAfterTimeout() throws Exception { Thread.sleep(1000); producer.initTransactions(); + } finally { + producer.close(Duration.ZERO); } } @@ -2066,7 +2070,7 @@ public void testCallbackAndInterceptorHandleError() { String invalidTopicName = "topic abc"; // Invalid topic name due to space ProducerInterceptors producerInterceptors = - new ProducerInterceptors<>(Collections.singletonList(new MockProducerInterceptor())); + new ProducerInterceptors<>(Arrays.asList(new MockProducerInterceptor())); try (Producer producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), producerMetadata, client, producerInterceptors, time)) { diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/BufferPoolTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/BufferPoolTest.java index 1578c2e22492a..ed372e452f426 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/BufferPoolTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/BufferPoolTest.java @@ -89,7 +89,7 @@ public void testSimple() throws Exception { } /** - * Test that we cannot try to allocate more memory than we have in the whole pool + * Test that we cannot try to allocate more memory then we have in the whole pool */ @Test public void testCantAllocateMoreMemoryThanWeHave() throws Exception { diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java index ff4e2eec498da..4719bf99c1c0a 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java @@ -1486,7 +1486,7 @@ public void testReadyAndDrainWhenABatchIsBeingRetried() throws InterruptedExcept int part1LeaderEpoch = 100; // Create cluster metadata, partition1 being hosted by node1 PartitionMetadata part1Metadata = new PartitionMetadata(Errors.NONE, tp1, Optional.of(node1.id()), Optional.of(part1LeaderEpoch), null, null, null); - MetadataSnapshot metadataCache = new MetadataSnapshot(null, nodes, Collections.singletonList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); + MetadataSnapshot metadataCache = new MetadataSnapshot(null, nodes, Arrays.asList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); int batchSize = 10; int lingerMs = 10; @@ -1528,7 +1528,7 @@ deliveryTimeoutMs, metrics, metricGrpName, time, new ApiVersions(), null, // Try to drain from node1, it should return no batches. Map> batches = accum.drain(metadataCache, - new HashSet<>(Collections.singletonList(node1)), 999999 /* maxSize */, now); + new HashSet<>(Arrays.asList(node1)), 999999 /* maxSize */, now); assertTrue(batches.containsKey(node1.id()) && batches.get(node1.id()).isEmpty(), "No batches ready to be drained on Node1"); } @@ -1539,7 +1539,7 @@ deliveryTimeoutMs, metrics, metricGrpName, time, new ApiVersions(), null, part1LeaderEpoch++; // Create cluster metadata, with new leader epoch. part1Metadata = new PartitionMetadata(Errors.NONE, tp1, Optional.of(node1.id()), Optional.of(part1LeaderEpoch), null, null, null); - metadataCache = new MetadataSnapshot(null, nodes, Collections.singletonList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); + metadataCache = new MetadataSnapshot(null, nodes, Arrays.asList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); RecordAccumulator.ReadyCheckResult result = accum.ready(metadataCache, now); assertTrue(result.readyNodes.contains(node1), "Node1 is ready"); @@ -1559,7 +1559,7 @@ deliveryTimeoutMs, metrics, metricGrpName, time, new ApiVersions(), null, now += 2 * retryBackoffMaxMs; // Create cluster metadata, with new leader epoch. part1Metadata = new PartitionMetadata(Errors.NONE, tp1, Optional.of(node1.id()), Optional.of(part1LeaderEpoch), null, null, null); - metadataCache = new MetadataSnapshot(null, nodes, Collections.singletonList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); + metadataCache = new MetadataSnapshot(null, nodes, Arrays.asList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); RecordAccumulator.ReadyCheckResult result = accum.ready(metadataCache, now); assertTrue(result.readyNodes.contains(node1), "Node1 is ready"); @@ -1580,7 +1580,7 @@ deliveryTimeoutMs, metrics, metricGrpName, time, new ApiVersions(), null, part1LeaderEpoch++; // Create cluster metadata, with new leader epoch. part1Metadata = new PartitionMetadata(Errors.NONE, tp1, Optional.of(node1.id()), Optional.of(part1LeaderEpoch), null, null, null); - metadataCache = new MetadataSnapshot(null, nodes, Collections.singletonList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); + metadataCache = new MetadataSnapshot(null, nodes, Arrays.asList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); RecordAccumulator.ReadyCheckResult result = accum.ready(metadataCache, now); assertTrue(result.readyNodes.contains(node1), "Node1 is ready"); @@ -1605,11 +1605,11 @@ public void testDrainWithANodeThatDoesntHostAnyPartitions() { // Create cluster metadata, node2 doesn't host any partitions. PartitionMetadata part1Metadata = new PartitionMetadata(Errors.NONE, tp1, Optional.of(node1.id()), Optional.empty(), null, null, null); - MetadataSnapshot metadataCache = new MetadataSnapshot(null, nodes, Collections.singletonList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); + MetadataSnapshot metadataCache = new MetadataSnapshot(null, nodes, Arrays.asList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); // Drain for node2, it should return 0 batches, Map> batches = accum.drain(metadataCache, - new HashSet<>(Collections.singletonList(node2)), 999999 /* maxSize */, time.milliseconds()); + new HashSet<>(Arrays.asList(node2)), 999999 /* maxSize */, time.milliseconds()); assertTrue(batches.get(node2.id()).isEmpty()); } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java index f112e424a2de7..cdbd7c3f7b919 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java @@ -2512,7 +2512,7 @@ public void testAllowDrainInAbortableErrorState() throws InterruptedException { // Try to drain a message destined for tp1, it should get drained. Node node1 = new Node(1, "localhost", 1112); PartitionMetadata part1Metadata = new PartitionMetadata(Errors.NONE, tp1, Optional.of(node1.id()), Optional.empty(), null, null, null); - MetadataSnapshot metadataCache = new MetadataSnapshot(null, Collections.singletonMap(node1.id(), node1), singletonList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); + MetadataSnapshot metadataCache = new MetadataSnapshot(null, Collections.singletonMap(node1.id(), node1), Arrays.asList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); appendToAccumulator(tp1); Map> drainedBatches = accumulator.drain(metadataCache, Collections.singleton(node1), Integer.MAX_VALUE, @@ -2533,7 +2533,7 @@ public void testRaiseErrorWhenNoPartitionsPendingOnDrain() throws InterruptedExc appendToAccumulator(tp0); Node node1 = new Node(0, "localhost", 1111); PartitionMetadata part1Metadata = new PartitionMetadata(Errors.NONE, tp0, Optional.of(node1.id()), Optional.empty(), null, null, null); - MetadataSnapshot metadataCache = new MetadataSnapshot(null, Collections.singletonMap(node1.id(), node1), singletonList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); + MetadataSnapshot metadataCache = new MetadataSnapshot(null, Collections.singletonMap(node1.id(), node1), Arrays.asList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); Set nodes = new HashSet<>(); nodes.add(node1); diff --git a/clients/src/test/java/org/apache/kafka/common/KafkaFutureTest.java b/clients/src/test/java/org/apache/kafka/common/KafkaFutureTest.java index ad904d6e73cc0..81d2530650dfa 100644 --- a/clients/src/test/java/org/apache/kafka/common/KafkaFutureTest.java +++ b/clients/src/test/java/org/apache/kafka/common/KafkaFutureTest.java @@ -99,7 +99,7 @@ private Throwable awaitAndAssertFailure(KafkaFuture future, assertEquals(expectedException, executionException.getCause().getClass()); assertEquals(expectedMessage, executionException.getCause().getMessage()); - executionException = assertThrows(ExecutionException.class, future::get); + executionException = assertThrows(ExecutionException.class, () -> future.get()); assertEquals(expectedException, executionException.getCause().getClass()); assertEquals(expectedMessage, executionException.getCause().getMessage()); @@ -114,7 +114,7 @@ private void awaitAndAssertCancelled(KafkaFuture future, String expectedMessa assertEquals(expectedMessage, cancellationException.getMessage()); assertEquals(CancellationException.class, cancellationException.getClass()); - cancellationException = assertThrows(CancellationException.class, future::get); + cancellationException = assertThrows(CancellationException.class, () -> future.get()); assertEquals(expectedMessage, cancellationException.getMessage()); assertEquals(CancellationException.class, cancellationException.getClass()); @@ -155,7 +155,7 @@ public void testCompleteFuturesExceptionally() { assertFalse(futureFail.completeExceptionally(new RuntimeException("We require more minerals"))); assertFalse(futureFail.cancel(true)); - ExecutionException executionException = assertThrows(ExecutionException.class, futureFail::get); + ExecutionException executionException = assertThrows(ExecutionException.class, () -> futureFail.get()); assertEquals(RuntimeException.class, executionException.getCause().getClass()); assertEquals("We require more vespene gas", executionException.getCause().getMessage()); diff --git a/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java b/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java index 7d8ca5fdff7ee..b85dd3556e007 100644 --- a/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java +++ b/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java @@ -121,7 +121,7 @@ public void testDefinedTwice() { @Test public void testBadInputs() { testBadInputs(Type.INT, "hello", "42.5", 42.5, Long.MAX_VALUE, Long.toString(Long.MAX_VALUE), new Object()); - testBadInputs(Type.LONG, "hello", "42.5", Long.MAX_VALUE + "00", new Object()); + testBadInputs(Type.LONG, "hello", "42.5", Long.toString(Long.MAX_VALUE) + "00", new Object()); testBadInputs(Type.DOUBLE, "hello", new Object()); testBadInputs(Type.STRING, new Object()); testBadInputs(Type.LIST, 53, new Object()); @@ -242,7 +242,7 @@ public void testParseForValidate() { String errorMessageC = "Missing required configuration \"c\" which has no default value."; ConfigValue configA = new ConfigValue("a", 1, Collections.emptyList(), Collections.emptyList()); ConfigValue configB = new ConfigValue("b", null, Collections.emptyList(), Arrays.asList(errorMessageB, errorMessageB)); - ConfigValue configC = new ConfigValue("c", null, Collections.emptyList(), singletonList(errorMessageC)); + ConfigValue configC = new ConfigValue("c", null, Collections.emptyList(), Arrays.asList(errorMessageC)); ConfigValue configD = new ConfigValue("d", 10, Collections.emptyList(), Collections.emptyList()); expected.put("a", configA); expected.put("b", configB); @@ -253,7 +253,7 @@ public void testParseForValidate() { .define("a", Type.INT, Importance.HIGH, "docs", "group", 1, Width.SHORT, "a", Arrays.asList("b", "c"), new IntegerRecommender(false)) .define("b", Type.INT, Importance.HIGH, "docs", "group", 2, Width.SHORT, "b", new IntegerRecommender(true)) .define("c", Type.INT, Importance.HIGH, "docs", "group", 3, Width.SHORT, "c", new IntegerRecommender(true)) - .define("d", Type.INT, Importance.HIGH, "docs", "group", 4, Width.SHORT, "d", singletonList("b"), new IntegerRecommender(false)); + .define("d", Type.INT, Importance.HIGH, "docs", "group", 4, Width.SHORT, "d", Arrays.asList("b"), new IntegerRecommender(false)); Map props = new HashMap<>(); props.put("a", "1"); @@ -279,7 +279,7 @@ public void testValidate() { ConfigValue configA = new ConfigValue("a", 1, Arrays.asList(1, 2, 3), Collections.emptyList()); ConfigValue configB = new ConfigValue("b", null, Arrays.asList(4, 5), Arrays.asList(errorMessageB, errorMessageB)); - ConfigValue configC = new ConfigValue("c", null, Arrays.asList(4, 5), singletonList(errorMessageC)); + ConfigValue configC = new ConfigValue("c", null, Arrays.asList(4, 5), Arrays.asList(errorMessageC)); ConfigValue configD = new ConfigValue("d", 10, Arrays.asList(1, 2, 3), Collections.emptyList()); expected.put("a", configA); @@ -291,7 +291,7 @@ public void testValidate() { .define("a", Type.INT, Importance.HIGH, "docs", "group", 1, Width.SHORT, "a", Arrays.asList("b", "c"), new IntegerRecommender(false)) .define("b", Type.INT, Importance.HIGH, "docs", "group", 2, Width.SHORT, "b", new IntegerRecommender(true)) .define("c", Type.INT, Importance.HIGH, "docs", "group", 3, Width.SHORT, "c", new IntegerRecommender(true)) - .define("d", Type.INT, Importance.HIGH, "docs", "group", 4, Width.SHORT, "d", singletonList("b"), new IntegerRecommender(false)); + .define("d", Type.INT, Importance.HIGH, "docs", "group", 4, Width.SHORT, "d", Arrays.asList("b"), new IntegerRecommender(false)); Map props = new HashMap<>(); props.put("a", "1"); @@ -313,9 +313,9 @@ public void testValidateMissingConfigKey() { String errorMessageD = "d is referred in the dependents, but not defined."; ConfigValue configA = new ConfigValue("a", 1, Arrays.asList(1, 2, 3), Collections.emptyList()); - ConfigValue configB = new ConfigValue("b", null, Arrays.asList(4, 5), singletonList(errorMessageB)); - ConfigValue configC = new ConfigValue("c", null, Arrays.asList(4, 5), singletonList(errorMessageC)); - ConfigValue configD = new ConfigValue("d", null, Collections.emptyList(), singletonList(errorMessageD)); + ConfigValue configB = new ConfigValue("b", null, Arrays.asList(4, 5), Arrays.asList(errorMessageB)); + ConfigValue configC = new ConfigValue("c", null, Arrays.asList(4, 5), Arrays.asList(errorMessageC)); + ConfigValue configD = new ConfigValue("d", null, Collections.emptyList(), Arrays.asList(errorMessageD)); configD.visible(false); expected.put("a", configA); @@ -343,7 +343,7 @@ public void testValidateMissingConfigKey() { public void testValidateCannotParse() { Map expected = new HashMap<>(); String errorMessageB = "Invalid value non_integer for configuration a: Not a number of type INT"; - ConfigValue configA = new ConfigValue("a", null, Collections.emptyList(), singletonList(errorMessageB)); + ConfigValue configA = new ConfigValue("a", null, Collections.emptyList(), Arrays.asList(errorMessageB)); expected.put("a", configA); ConfigDef def = new ConfigDef().define("a", Type.INT, Importance.HIGH, "docs"); @@ -438,7 +438,7 @@ public void testBaseConfigDefDependents() { // Creating a ConfigDef based on another should compute the correct number of configs with no parent, even // if the base ConfigDef has already computed its parentless configs final ConfigDef baseConfigDef = new ConfigDef().define("a", Type.STRING, Importance.LOW, "docs"); - assertEquals(new HashSet<>(singletonList("a")), baseConfigDef.getConfigsWithNoParent()); + assertEquals(new HashSet<>(Arrays.asList("a")), baseConfigDef.getConfigsWithNoParent()); final ConfigDef configDef = new ConfigDef(baseConfigDef) .define("parent", Type.STRING, Importance.HIGH, "parent docs", "group", 1, Width.LONG, "Parent", singletonList("child")) @@ -502,7 +502,7 @@ public void toRst() { .define("opt3", Type.LIST, Arrays.asList("a", "b"), Importance.LOW, "docs3") .define("opt4", Type.BOOLEAN, false, Importance.LOW, null); - final String expectedRst = + final String expectedRst = "" + "``opt2``\n" + " docs2\n" + "\n" + @@ -547,7 +547,7 @@ public void toEnrichedRst() { "Group Two", 0, Width.NONE, "..", singletonList("some.option")) .define("poor.opt", Type.STRING, "foo", Importance.HIGH, "Doc doc doc doc."); - final String expectedRst = + final String expectedRst = "" + "``poor.opt``\n" + " Doc doc doc doc.\n" + "\n" + diff --git a/clients/src/test/java/org/apache/kafka/common/config/provider/DirectoryConfigProviderTest.java b/clients/src/test/java/org/apache/kafka/common/config/provider/DirectoryConfigProviderTest.java index b351b0e6e6cae..59949e6043c3e 100644 --- a/clients/src/test/java/org/apache/kafka/common/config/provider/DirectoryConfigProviderTest.java +++ b/clients/src/test/java/org/apache/kafka/common/config/provider/DirectoryConfigProviderTest.java @@ -179,12 +179,12 @@ public void testMultipleAllowedPaths() { provider.configure(configs); ConfigData configData = provider.get(subdir); - assertEquals(toSet(Collections.singletonList(subdirFileName)), configData.data().keySet()); + assertEquals(toSet(asList(subdirFileName)), configData.data().keySet()); assertEquals("SUBDIRFILE", configData.data().get(subdirFileName)); assertNull(configData.ttl()); configData = provider.get(siblingDir); - assertEquals(toSet(Collections.singletonList(siblingDirFileName)), configData.data().keySet()); + assertEquals(toSet(asList(siblingDirFileName)), configData.data().keySet()); assertEquals("SIBLINGDIRFILE", configData.data().get(siblingDirFileName)); assertNull(configData.ttl()); } diff --git a/clients/src/test/java/org/apache/kafka/common/metrics/JmxReporterTest.java b/clients/src/test/java/org/apache/kafka/common/metrics/JmxReporterTest.java index 3f71a45628f26..a6b2e7f65d8e9 100644 --- a/clients/src/test/java/org/apache/kafka/common/metrics/JmxReporterTest.java +++ b/clients/src/test/java/org/apache/kafka/common/metrics/JmxReporterTest.java @@ -26,7 +26,7 @@ import javax.management.ObjectName; import java.lang.management.ManagementFactory; import java.util.ArrayList; -import java.util.Collections; +import java.util.Arrays; import java.util.HashMap; import java.util.Map; @@ -164,7 +164,7 @@ public void testJmxPrefix() throws Exception { JmxReporter reporter = new JmxReporter(); MetricsContext metricsContext = new KafkaMetricsContext("kafka.server"); MetricConfig metricConfig = new MetricConfig(); - Metrics metrics = new Metrics(metricConfig, new ArrayList<>(Collections.singletonList(reporter)), Time.SYSTEM, metricsContext); + Metrics metrics = new Metrics(metricConfig, new ArrayList<>(Arrays.asList(reporter)), Time.SYSTEM, metricsContext); MBeanServer server = ManagementFactory.getPlatformMBeanServer(); try { @@ -183,7 +183,7 @@ public void testDeprecatedJmxPrefixWithDefaultMetrics() throws Exception { // for backwards compatibility, ensure prefix does not get overridden by the default empty namespace in metricscontext MetricConfig metricConfig = new MetricConfig(); - Metrics metrics = new Metrics(metricConfig, new ArrayList<>(Collections.singletonList(reporter)), Time.SYSTEM); + Metrics metrics = new Metrics(metricConfig, new ArrayList<>(Arrays.asList(reporter)), Time.SYSTEM); MBeanServer server = ManagementFactory.getPlatformMBeanServer(); try { diff --git a/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java b/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java index f4113b00e38ce..a4289d8afbd23 100644 --- a/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java @@ -77,7 +77,7 @@ public class MetricsTest { @BeforeEach public void setup() { - this.metrics = new Metrics(config, singletonList(new JmxReporter()), time, true); + this.metrics = new Metrics(config, Arrays.asList(new JmxReporter()), time, true); } @AfterEach @@ -197,7 +197,7 @@ public void testHierarchicalSensors() { assertEquals(1.0 + c1, p2, EPS); assertEquals(1.0 + c1 + c2, p1, EPS); assertEquals(Arrays.asList(child1, child2), metrics.childrenSensors().get(parent1)); - assertEquals(singletonList(child1), metrics.childrenSensors().get(parent2)); + assertEquals(Arrays.asList(child1), metrics.childrenSensors().get(parent2)); assertNull(metrics.childrenSensors().get(grandchild)); } @@ -693,7 +693,7 @@ public void testMetricInstances() { Map childTagsWithValues = new HashMap<>(); childTagsWithValues.put("child-tag", "child-tag-value"); - try (Metrics inherited = new Metrics(new MetricConfig().tags(parentTagsWithValues), singletonList(new JmxReporter()), time, true)) { + try (Metrics inherited = new Metrics(new MetricConfig().tags(parentTagsWithValues), Arrays.asList(new JmxReporter()), time, true)) { MetricName inheritedMetric = inherited.metricInstance(SampleMetrics.METRIC_WITH_INHERITED_TAGS, childTagsWithValues); Map filledOutTags = inheritedMetric.tags(); @@ -761,7 +761,7 @@ public void testConcurrentReadUpdate() { public void testConcurrentReadUpdateReport() { class LockingReporter implements MetricsReporter { - final Map activeMetrics = new HashMap<>(); + Map activeMetrics = new HashMap<>(); @Override public synchronized void init(List metrics) { } @@ -793,7 +793,7 @@ synchronized void processMetrics() { final LockingReporter reporter = new LockingReporter(); this.metrics.close(); - this.metrics = new Metrics(config, singletonList(reporter), new MockTime(10), true); + this.metrics = new Metrics(config, Arrays.asList(reporter), new MockTime(10), true); final Deque sensors = new ConcurrentLinkedDeque<>(); SensorCreator sensorCreator = new SensorCreator(metrics); diff --git a/clients/src/test/java/org/apache/kafka/common/metrics/SensorTest.java b/clients/src/test/java/org/apache/kafka/common/metrics/SensorTest.java index df3eedd176ad9..9254616528fe7 100644 --- a/clients/src/test/java/org/apache/kafka/common/metrics/SensorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/metrics/SensorTest.java @@ -29,6 +29,7 @@ import org.junit.jupiter.api.Test; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; @@ -127,7 +128,7 @@ public void testShouldRecordForTraceLevelSensor() { public void testExpiredSensor() { MetricConfig config = new MetricConfig(); Time mockTime = new MockTime(); - try (Metrics metrics = new Metrics(config, Collections.singletonList(new JmxReporter()), mockTime, true)) { + try (Metrics metrics = new Metrics(config, Arrays.asList(new JmxReporter()), mockTime, true)) { long inactiveSensorExpirationTimeSeconds = 60L; Sensor sensor = new Sensor(metrics, "sensor", null, config, mockTime, inactiveSensorExpirationTimeSeconds, Sensor.RecordingLevel.INFO); diff --git a/clients/src/test/java/org/apache/kafka/common/metrics/stats/FrequenciesTest.java b/clients/src/test/java/org/apache/kafka/common/metrics/stats/FrequenciesTest.java index 62b4884af2cc5..ba3067d993d9f 100644 --- a/clients/src/test/java/org/apache/kafka/common/metrics/stats/FrequenciesTest.java +++ b/clients/src/test/java/org/apache/kafka/common/metrics/stats/FrequenciesTest.java @@ -28,6 +28,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import java.util.Arrays; import java.util.Collections; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -44,7 +45,7 @@ public class FrequenciesTest { public void setup() { config = new MetricConfig().eventWindow(50).samples(2); time = new MockTime(); - metrics = new Metrics(config, Collections.singletonList(new JmxReporter()), time, true); + metrics = new Metrics(config, Arrays.asList(new JmxReporter()), time, true); } @AfterEach diff --git a/clients/src/test/java/org/apache/kafka/common/network/KafkaChannelTest.java b/clients/src/test/java/org/apache/kafka/common/network/KafkaChannelTest.java index bd03a60935107..f83ea7db87187 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/KafkaChannelTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/KafkaChannelTest.java @@ -73,9 +73,9 @@ public void testReceiving() throws IOException { ChannelMetadataRegistry metadataRegistry = Mockito.mock(ChannelMetadataRegistry.class); ArgumentCaptor sizeCaptor = ArgumentCaptor.forClass(Integer.class); - Mockito.when(pool.tryAllocate(sizeCaptor.capture())).thenAnswer(invocation -> - ByteBuffer.allocate(sizeCaptor.getValue()) - ); + Mockito.when(pool.tryAllocate(sizeCaptor.capture())).thenAnswer(invocation -> { + return ByteBuffer.allocate(sizeCaptor.getValue()); + }); KafkaChannel channel = new KafkaChannel("0", transport, () -> authenticator, 1024, pool, metadataRegistry); diff --git a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java index f805ba807d69d..25a240c2ede18 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java @@ -272,7 +272,7 @@ static List cipherMetrics(Metrics metrics) { return metrics.metrics().entrySet().stream(). filter(e -> e.getKey().description(). contains("The number of connections with this SSL cipher and protocol.")). - map(Map.Entry::getValue). + map(e -> e.getValue()). collect(Collectors.toList()); } @@ -955,7 +955,7 @@ public void testWriteCompletesSendWithNoBytesWritten() throws IOException { NetworkSend send = new NetworkSend("destination", new ByteBufferSend(ByteBuffer.allocate(0))); when(channel.maybeCompleteSend()).thenReturn(send); selector.write(channel); - assertEquals(Collections.singletonList(send), selector.completedSends()); + assertEquals(asList(send), selector.completedSends()); } /** diff --git a/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java b/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java index e56ce7abbad0b..aeb37af931369 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java @@ -55,6 +55,7 @@ import java.nio.channels.SocketChannel; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -104,7 +105,7 @@ private static class Args { private CertStores clientCertStores; private Map sslClientConfigs; private Map sslServerConfigs; - private final Map sslConfigOverrides; + private Map sslConfigOverrides; public Args(String tlsProtocol, boolean useInlinePem) throws Exception { this.tlsProtocol = tlsProtocol; @@ -620,7 +621,7 @@ public void testTlsDefaults(Args args) throws Exception { /** Checks connection failed using the specified {@code tlsVersion}. */ private void checkAuthenticationFailed(Args args, String node, String tlsVersion) throws IOException { - args.sslClientConfigs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Collections.singletonList(tlsVersion)); + args.sslClientConfigs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Arrays.asList(tlsVersion)); createSelector(args.sslClientConfigs); InetSocketAddress addr = new InetSocketAddress("localhost", server.port()); selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); @@ -639,10 +640,10 @@ public void testUnsupportedCiphers(Args args) throws Exception { SSLContext context = SSLContext.getInstance(args.tlsProtocol); context.init(null, null, null); String[] cipherSuites = context.getDefaultSSLParameters().getCipherSuites(); - args.sslServerConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, Collections.singletonList(cipherSuites[0])); + args.sslServerConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, Arrays.asList(cipherSuites[0])); server = createEchoServer(args, SecurityProtocol.SSL); - args.sslClientConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, Collections.singletonList(cipherSuites[1])); + args.sslClientConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, Arrays.asList(cipherSuites[1])); createSelector(args.sslClientConfigs); checkAuthenticationFailed(args, "1", args.tlsProtocol); diff --git a/clients/src/test/java/org/apache/kafka/common/network/SslTransportTls12Tls13Test.java b/clients/src/test/java/org/apache/kafka/common/network/SslTransportTls12Tls13Test.java index 425ab23532bb8..4f6e4b3aced70 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SslTransportTls12Tls13Test.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SslTransportTls12Tls13Test.java @@ -139,7 +139,7 @@ public void testCiphersSuiteForTls12() throws Exception { /** Checks connection failed using the specified {@code tlsVersion}. */ private void checkAuthenticationFailed() throws IOException, InterruptedException { - sslClientConfigs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Collections.singletonList("TLSv1.3")); + sslClientConfigs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Arrays.asList("TLSv1.3")); createSelector(sslClientConfigs); InetSocketAddress addr = new InetSocketAddress("localhost", server.port()); selector.connect("0", addr, BUFFER_SIZE, BUFFER_SIZE); diff --git a/clients/src/test/java/org/apache/kafka/common/network/Tls12SelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/Tls12SelectorTest.java index 750f75f50753c..7169b2ec51706 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/Tls12SelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/Tls12SelectorTest.java @@ -17,6 +17,7 @@ package org.apache.kafka.common.network; +import static java.util.Arrays.asList; import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; @@ -24,7 +25,6 @@ import java.net.InetSocketAddress; import java.security.GeneralSecurityException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import org.apache.kafka.common.config.SslConfigs; @@ -38,7 +38,7 @@ protected Map createSslClientConfigs(File trustStoreFile) throws GeneralSecurityException, IOException { Map configs = TestSslUtils.createSslConfig(false, false, Mode.CLIENT, trustStoreFile, "client"); - configs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Collections.singletonList("TLSv1.2")); + configs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, asList("TLSv1.2")); return configs; } diff --git a/clients/src/test/java/org/apache/kafka/common/network/Tls13SelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/Tls13SelectorTest.java index 2313ec4748e96..db69c2fa8ea1e 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/Tls13SelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/Tls13SelectorTest.java @@ -25,7 +25,6 @@ import java.net.InetSocketAddress; import java.security.GeneralSecurityException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -43,7 +42,7 @@ public class Tls13SelectorTest extends SslSelectorTest { protected Map createSslClientConfigs(File trustStoreFile) throws GeneralSecurityException, IOException { Map configs = TestSslUtils.createSslConfig(false, false, Mode.CLIENT, trustStoreFile, "client"); - configs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Collections.singletonList("TLSv1.3")); + configs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, asList("TLSv1.3")); return configs; } diff --git a/clients/src/test/java/org/apache/kafka/common/protocol/MessageUtilTest.java b/clients/src/test/java/org/apache/kafka/common/protocol/MessageUtilTest.java index b3181907b52a4..4795798908ff8 100755 --- a/clients/src/test/java/org/apache/kafka/common/protocol/MessageUtilTest.java +++ b/clients/src/test/java/org/apache/kafka/common/protocol/MessageUtilTest.java @@ -46,7 +46,7 @@ public void testDeepToString() { assertEquals("[1, 2, 3]", MessageUtil.deepToString(Arrays.asList(1, 2, 3).iterator())); assertEquals("[foo]", - MessageUtil.deepToString(Collections.singletonList("foo").iterator())); + MessageUtil.deepToString(Arrays.asList("foo").iterator())); } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsBuilderTest.java b/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsBuilderTest.java index 5822f89efabb5..49c44459ca9e8 100644 --- a/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsBuilderTest.java +++ b/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsBuilderTest.java @@ -759,7 +759,7 @@ public void testBuffersDereferencedOnClose(Args args) { // Ignore memory usage during initialization if (iterations == 2) startMem = memUsed; - else if (iterations > 2 && memUsed < (iterations - 2) * 1024L) + else if (iterations > 2 && memUsed < (iterations - 2) * 1024) break; } assertTrue(iterations < 100, "Memory usage too high: " + memUsed); diff --git a/clients/src/test/java/org/apache/kafka/common/replica/ReplicaSelectorTest.java b/clients/src/test/java/org/apache/kafka/common/replica/ReplicaSelectorTest.java index 15249bd8383cc..15ddef649a7f6 100644 --- a/clients/src/test/java/org/apache/kafka/common/replica/ReplicaSelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/replica/ReplicaSelectorTest.java @@ -50,9 +50,9 @@ public void testSameRackSelector() { }); selected = selector.select(tp, metadata("not-a-rack"), partitionView); - assertOptional(selected, replicaInfo -> - assertEquals(replicaInfo, leader, "Expect leader when we can't find any nodes in given rack") - ); + assertOptional(selected, replicaInfo -> { + assertEquals(replicaInfo, leader, "Expect leader when we can't find any nodes in given rack"); + }); selected = selector.select(tp, metadata("rack-a"), partitionView); assertOptional(selected, replicaInfo -> { diff --git a/clients/src/test/java/org/apache/kafka/common/requests/AlterReplicaLogDirsRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/AlterReplicaLogDirsRequestTest.java index 3251515492a34..c18926dc57d20 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/AlterReplicaLogDirsRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/AlterReplicaLogDirsRequestTest.java @@ -70,13 +70,13 @@ public void testPartitionDir() { .setPartitions(asList(0, 1)), new AlterReplicaLogDirTopic() .setName("topic2") - .setPartitions(singletonList(7))).iterator())), + .setPartitions(asList(7))).iterator())), new AlterReplicaLogDir() .setPath("/data1") .setTopics(new AlterReplicaLogDirTopicCollection( - singletonList(new AlterReplicaLogDirTopic() + asList(new AlterReplicaLogDirTopic() .setName("topic3") - .setPartitions(singletonList(12))).iterator()))).iterator())); + .setPartitions(asList(12))).iterator()))).iterator())); AlterReplicaLogDirsRequest request = new AlterReplicaLogDirsRequest.Builder(data).build(); Map expect = new HashMap<>(); expect.put(new TopicPartition("topic", 0), "/data0"); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/AlterReplicaLogDirsResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/AlterReplicaLogDirsResponseTest.java index 347a8c26bc067..edc441cf9c828 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/AlterReplicaLogDirsResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/AlterReplicaLogDirsResponseTest.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.common.requests; -import java.util.Collections; import java.util.Map; import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData; @@ -45,7 +44,7 @@ public void testErrorCounts() { .setErrorCode(Errors.NONE.code()))), new AlterReplicaLogDirTopicResult() .setTopicName("t1") - .setPartitions(Collections.singletonList( + .setPartitions(asList( new AlterReplicaLogDirPartitionResult() .setPartitionIndex(0) .setErrorCode(Errors.LOG_DIR_NOT_FOUND.code()))))); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java index ae7b603d41b16..3baf3af3f26ea 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java @@ -39,13 +39,13 @@ public class DeleteAclsResponseTest { private static final short V1 = 1; private static final DeleteAclsMatchingAcl LITERAL_ACL1 = new DeleteAclsMatchingAcl() - .setResourceType(ResourceType.TOPIC.code()) - .setResourceName("foo") - .setPatternType(PatternType.LITERAL.code()) - .setPrincipal("User:ANONYMOUS") - .setHost("127.0.0.1") - .setOperation(AclOperation.READ.code()) - .setPermissionType(AclPermissionType.DENY.code()); + .setResourceType(ResourceType.TOPIC.code()) + .setResourceName("foo") + .setPatternType(PatternType.LITERAL.code()) + .setPrincipal("User:ANONYMOUS") + .setHost("127.0.0.1") + .setOperation(AclOperation.READ.code()) + .setPermissionType(AclPermissionType.DENY.code()); private static final DeleteAclsMatchingAcl LITERAL_ACL2 = new DeleteAclsMatchingAcl() .setResourceType(ResourceType.GROUP.code()) @@ -80,7 +80,7 @@ public class DeleteAclsResponseTest { private static final DeleteAclsFilterResult PREFIXED_RESPONSE = new DeleteAclsFilterResult().setMatchingAcls(asList( LITERAL_ACL1, PREFIXED_ACL1)); - private static final DeleteAclsFilterResult UNKNOWN_RESPONSE = new DeleteAclsFilterResult().setMatchingAcls(singletonList( + private static final DeleteAclsFilterResult UNKNOWN_RESPONSE = new DeleteAclsFilterResult().setMatchingAcls(asList( UNKNOWN_ACL)); @Test diff --git a/clients/src/test/java/org/apache/kafka/common/requests/DeleteTopicsRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/DeleteTopicsRequestTest.java index d7d7eb985cd8a..897797a350a56 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/DeleteTopicsRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/DeleteTopicsRequestTest.java @@ -77,7 +77,7 @@ public void testNewTopicsField() { } else { // We should fail if version is less than 6. - assertThrows(UnsupportedVersionException.class, requestWithNames::serialize); + assertThrows(UnsupportedVersionException.class, () -> requestWithNames.serialize()); } } } @@ -105,7 +105,7 @@ public void testTopicIdsField() { requestWithIdsSerialized.data().topics().forEach(topic -> assertNull(topic.name())); } else { // We should fail if version is less than 6. - assertThrows(UnsupportedVersionException.class, requestWithIds::serialize); + assertThrows(UnsupportedVersionException.class, () -> requestWithIds.serialize()); } } } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/FetchRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/FetchRequestTest.java index 4f13c914f0f71..702341fee836c 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/FetchRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/FetchRequestTest.java @@ -42,7 +42,7 @@ public class FetchRequestTest { private static Stream fetchVersions() { - return ApiKeys.FETCH.allVersions().stream().map(Arguments::of); + return ApiKeys.FETCH.allVersions().stream().map(version -> Arguments.of(version)); } @ParameterizedTest @@ -64,7 +64,7 @@ public void testToReplaceWithDifferentVersions(short version) { // If version < 13, we should not see any partitions in forgottenTopics. This is because we can not // distinguish different topic IDs on versions earlier than 13. - assertEquals(fetchRequestUsesTopicIds, !fetchRequest.data().forgottenTopicsData().isEmpty()); + assertEquals(fetchRequestUsesTopicIds, fetchRequest.data().forgottenTopicsData().size() > 0); fetchRequest.data().forgottenTopicsData().forEach(forgottenTopic -> { // Since we didn't serialize, we should see the topic name and ID regardless of the version. assertEquals(tp.topic(), forgottenTopic.topic()); @@ -228,9 +228,9 @@ public void testFetchRequestSimpleBuilderReplicaStateDowngrade(short version) { public void testFetchRequestSimpleBuilderReplicaIdNotSupported(short version) { FetchRequestData fetchRequestData = new FetchRequestData().setReplicaId(1); FetchRequest.SimpleBuilder builder = new FetchRequest.SimpleBuilder(fetchRequestData); - assertThrows(IllegalStateException.class, () -> - builder.build(version) - ); + assertThrows(IllegalStateException.class, () -> { + builder.build(version); + }); } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/requests/LeaderAndIsrRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/LeaderAndIsrRequestTest.java index cea12640269f7..83c33e4903fba 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/LeaderAndIsrRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/LeaderAndIsrRequestTest.java @@ -101,7 +101,7 @@ public void testGetErrorResponse() { /** * Verifies the logic we have in LeaderAndIsrRequest to present a unified interface across the various versions - * works correctly. For example, `LeaderAndIsrPartitionState.topicName` is not serialized/deserialized in + * works correctly. For example, `LeaderAndIsrPartitionState.topicName` is not serialiazed/deserialized in * recent versions, but we set it manually so that we can always present the ungrouped partition states * independently of the version. */ @@ -118,8 +118,8 @@ public void testVersionLogic() { .setIsr(asList(0, 1)) .setPartitionEpoch(10) .setReplicas(asList(0, 1, 2)) - .setAddingReplicas(Collections.singletonList(3)) - .setRemovingReplicas(Collections.singletonList(2)), + .setAddingReplicas(asList(3)) + .setRemovingReplicas(asList(2)), new LeaderAndIsrPartitionState() .setTopicName("topic0") .setPartitionIndex(1) diff --git a/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java index a18619d1712a1..83c4b101d8969 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java @@ -60,7 +60,7 @@ public void testDuplicatePartitions() { @Test public void testGetErrorResponse() { for (short version = 1; version <= ApiKeys.LIST_OFFSETS.latestVersion(); version++) { - List topics = Collections.singletonList( + List topics = Arrays.asList( new ListOffsetsTopic() .setName("topic") .setPartitions(Collections.singletonList( @@ -93,7 +93,7 @@ public void testGetErrorResponse() { @Test public void testGetErrorResponseV0() { - List topics = Collections.singletonList( + List topics = Arrays.asList( new ListOffsetsTopic() .setName("topic") .setPartitions(Collections.singletonList( diff --git a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java index b53d2cedd080d..512a7cea76681 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java @@ -20,7 +20,6 @@ import org.apache.kafka.common.ElectionType; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.Node; -import org.apache.kafka.common.ShareGroupState; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.Uuid; @@ -44,8 +43,6 @@ import org.apache.kafka.common.message.AddPartitionsToTxnRequestData.AddPartitionsToTxnTopicCollection; import org.apache.kafka.common.message.AddPartitionsToTxnRequestData.AddPartitionsToTxnTransaction; import org.apache.kafka.common.message.AddPartitionsToTxnRequestData.AddPartitionsToTxnTransactionCollection; -import org.apache.kafka.common.message.AddRaftVoterRequestData; -import org.apache.kafka.common.message.AddRaftVoterResponseData; import org.apache.kafka.common.message.AllocateProducerIdsRequestData; import org.apache.kafka.common.message.AllocateProducerIdsResponseData; import org.apache.kafka.common.message.AlterClientQuotasResponseData; @@ -207,22 +204,12 @@ import org.apache.kafka.common.message.ProduceResponseData; import org.apache.kafka.common.message.PushTelemetryRequestData; import org.apache.kafka.common.message.PushTelemetryResponseData; -import org.apache.kafka.common.message.RemoveRaftVoterRequestData; -import org.apache.kafka.common.message.RemoveRaftVoterResponseData; import org.apache.kafka.common.message.RenewDelegationTokenRequestData; import org.apache.kafka.common.message.RenewDelegationTokenResponseData; import org.apache.kafka.common.message.SaslAuthenticateRequestData; import org.apache.kafka.common.message.SaslAuthenticateResponseData; import org.apache.kafka.common.message.SaslHandshakeRequestData; import org.apache.kafka.common.message.SaslHandshakeResponseData; -import org.apache.kafka.common.message.ShareAcknowledgeRequestData; -import org.apache.kafka.common.message.ShareAcknowledgeResponseData; -import org.apache.kafka.common.message.ShareFetchRequestData; -import org.apache.kafka.common.message.ShareFetchResponseData; -import org.apache.kafka.common.message.ShareGroupDescribeRequestData; -import org.apache.kafka.common.message.ShareGroupDescribeResponseData; -import org.apache.kafka.common.message.ShareGroupHeartbeatRequestData; -import org.apache.kafka.common.message.ShareGroupHeartbeatResponseData; import org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaPartitionState; import org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaTopicState; import org.apache.kafka.common.message.StopReplicaResponseData; @@ -237,8 +224,6 @@ import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataEndpoint; import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState; import org.apache.kafka.common.message.UpdateMetadataResponseData; -import org.apache.kafka.common.message.UpdateRaftVoterRequestData; -import org.apache.kafka.common.message.UpdateRaftVoterResponseData; import org.apache.kafka.common.message.VoteRequestData; import org.apache.kafka.common.message.VoteResponseData; import org.apache.kafka.common.network.ListenerName; @@ -1016,10 +1001,6 @@ public void testErrorCountsIncludesNone() { assertEquals(1, createTxnOffsetCommitResponse().errorCounts().get(Errors.NONE)); assertEquals(1, createUpdateMetadataResponse().errorCounts().get(Errors.NONE)); assertEquals(1, createWriteTxnMarkersResponse().errorCounts().get(Errors.NONE)); - assertEquals(1, createShareGroupHeartbeatResponse().errorCounts().get(Errors.NONE)); - assertEquals(1, createShareGroupDescribeResponse().errorCounts().get(Errors.NONE)); - assertEquals(2, createShareFetchResponse().errorCounts().get(Errors.NONE)); - assertEquals(2, createShareAcknowledgeResponse().errorCounts().get(Errors.NONE)); } private AbstractRequest getRequest(ApiKeys apikey, short version) { @@ -1100,13 +1081,6 @@ private AbstractRequest getRequest(ApiKeys apikey, short version) { case ASSIGN_REPLICAS_TO_DIRS: return createAssignReplicasToDirsRequest(version); case LIST_CLIENT_METRICS_RESOURCES: return createListClientMetricsResourcesRequest(version); case DESCRIBE_TOPIC_PARTITIONS: return createDescribeTopicPartitionsRequest(version); - case SHARE_GROUP_HEARTBEAT: return createShareGroupHeartbeatRequest(version); - case SHARE_GROUP_DESCRIBE: return createShareGroupDescribeRequest(version); - case SHARE_FETCH: return createShareFetchRequest(version); - case SHARE_ACKNOWLEDGE: return createShareAcknowledgeRequest(version); - case ADD_RAFT_VOTER: return createAddRaftVoterRequest(version); - case REMOVE_RAFT_VOTER: return createRemoveRaftVoterRequest(version); - case UPDATE_RAFT_VOTER: return createUpdateRaftVoterRequest(version); default: throw new IllegalArgumentException("Unknown API key " + apikey); } } @@ -1189,13 +1163,6 @@ private AbstractResponse getResponse(ApiKeys apikey, short version) { case ASSIGN_REPLICAS_TO_DIRS: return createAssignReplicasToDirsResponse(); case LIST_CLIENT_METRICS_RESOURCES: return createListClientMetricsResourcesResponse(); case DESCRIBE_TOPIC_PARTITIONS: return createDescribeTopicPartitionsResponse(); - case SHARE_GROUP_HEARTBEAT: return createShareGroupHeartbeatResponse(); - case SHARE_GROUP_DESCRIBE: return createShareGroupDescribeResponse(); - case SHARE_FETCH: return createShareFetchResponse(); - case SHARE_ACKNOWLEDGE: return createShareAcknowledgeResponse(); - case ADD_RAFT_VOTER: return createAddRaftVoterResponse(); - case REMOVE_RAFT_VOTER: return createRemoveRaftVoterResponse(); - case UPDATE_RAFT_VOTER: return createUpdateRaftVoterResponse(); default: throw new IllegalArgumentException("Unknown API key " + apikey); } } @@ -1231,25 +1198,25 @@ private AssignReplicasToDirsRequest createAssignReplicasToDirsRequest(short vers .setDirectories(Arrays.asList( new AssignReplicasToDirsRequestData.DirectoryData() .setId(Uuid.randomUuid()) - .setTopics(singletonList( + .setTopics(Arrays.asList( new AssignReplicasToDirsRequestData.TopicData() .setTopicId(Uuid.fromString("qo0Pcp70TdGnAa7YKMKCqw")) - .setPartitions(singletonList( + .setPartitions(Arrays.asList( new AssignReplicasToDirsRequestData.PartitionData() .setPartitionIndex(8) )) )), new AssignReplicasToDirsRequestData.DirectoryData() .setId(Uuid.randomUuid()) - .setTopics(singletonList( + .setTopics(Arrays.asList( new AssignReplicasToDirsRequestData.TopicData() .setTopicId(Uuid.fromString("yEu11V7HTRGIwm6FDWFhzg")) - .setPartitions(asList( + .setPartitions(Arrays.asList( new AssignReplicasToDirsRequestData.PartitionData() .setPartitionIndex(2), new AssignReplicasToDirsRequestData.PartitionData() .setPartitionIndex(80) - )) + )) )) )); return new AssignReplicasToDirsRequest.Builder(data).build(version); @@ -1262,10 +1229,10 @@ private AssignReplicasToDirsResponse createAssignReplicasToDirsResponse() { .setDirectories(Arrays.asList( new AssignReplicasToDirsResponseData.DirectoryData() .setId(Uuid.randomUuid()) - .setTopics(singletonList( + .setTopics(Arrays.asList( new AssignReplicasToDirsResponseData.TopicData() .setTopicId(Uuid.fromString("sKhZV8LnTA275KvByB9bVg")) - .setPartitions(singletonList( + .setPartitions(Arrays.asList( new AssignReplicasToDirsResponseData.PartitionData() .setPartitionIndex(8) .setErrorCode(Errors.NONE.code()) @@ -1273,10 +1240,10 @@ private AssignReplicasToDirsResponse createAssignReplicasToDirsResponse() { )), new AssignReplicasToDirsResponseData.DirectoryData() .setId(Uuid.randomUuid()) - .setTopics(singletonList( + .setTopics(Arrays.asList( new AssignReplicasToDirsResponseData.TopicData() .setTopicId(Uuid.fromString("ORLP5NEzRo64SvKq1hIVQg")) - .setPartitions(asList( + .setPartitions(Arrays.asList( new AssignReplicasToDirsResponseData.PartitionData() .setPartitionIndex(2) .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()), @@ -1291,63 +1258,11 @@ private AssignReplicasToDirsResponse createAssignReplicasToDirsResponse() { private DescribeTopicPartitionsRequest createDescribeTopicPartitionsRequest(short version) { DescribeTopicPartitionsRequestData data = new DescribeTopicPartitionsRequestData() - .setTopics(singletonList(new DescribeTopicPartitionsRequestData.TopicRequest().setName("foo"))) + .setTopics(Arrays.asList(new DescribeTopicPartitionsRequestData.TopicRequest().setName("foo"))) .setCursor(new DescribeTopicPartitionsRequestData.Cursor().setTopicName("foo").setPartitionIndex(1)); return new DescribeTopicPartitionsRequest.Builder(data).build(version); } - private AddRaftVoterRequest createAddRaftVoterRequest(short version) { - return new AddRaftVoterRequest(new AddRaftVoterRequestData(). - setClusterId("FmRMoH-iTCSFNnzgpkWT2A"). - setTimeoutMs(60_000). - setVoterId(1). - setVoterDirectoryId(Uuid.fromString("DZG26STKRxaelDpg2wqsXw")). - setListeners(new AddRaftVoterRequestData.ListenerCollection( - Collections.singletonList(new AddRaftVoterRequestData.Listener(). - setName("CONTROLLER"). - setHost("localhost"). - setPort(8080)).iterator()) - ), version); - } - - private AddRaftVoterResponse createAddRaftVoterResponse() { - return new AddRaftVoterResponse(new AddRaftVoterResponseData(). - setErrorCode((short) 0). - setErrorMessage(null)); - } - - private RemoveRaftVoterRequest createRemoveRaftVoterRequest(short version) { - return new RemoveRaftVoterRequest(new RemoveRaftVoterRequestData(). - setClusterId("FmRMoH-iTCSFNnzgpkWT2A"). - setVoterId(1). - setVoterDirectoryId(Uuid.fromString("DZG26STKRxaelDpg2wqsXw")), - version); - } - - private RemoveRaftVoterResponse createRemoveRaftVoterResponse() { - return new RemoveRaftVoterResponse(new RemoveRaftVoterResponseData(). - setErrorCode((short) 0). - setErrorMessage(null)); - } - - private UpdateRaftVoterRequest createUpdateRaftVoterRequest(short version) { - return new UpdateRaftVoterRequest(new UpdateRaftVoterRequestData(). - setClusterId("FmRMoH-iTCSFNnzgpkWT2A"). - setVoterId(1). - setVoterDirectoryId(Uuid.fromString("DZG26STKRxaelDpg2wqsXw")). - setListeners(new UpdateRaftVoterRequestData.ListenerCollection( - Collections.singletonList(new UpdateRaftVoterRequestData.Listener(). - setName("CONTROLLER"). - setHost("localhost"). - setPort(8080)).iterator())), - version); - } - - private UpdateRaftVoterResponse createUpdateRaftVoterResponse() { - return new UpdateRaftVoterResponse(new UpdateRaftVoterResponseData(). - setErrorCode((short) 0)); - } - private DescribeTopicPartitionsResponse createDescribeTopicPartitionsResponse() { DescribeTopicPartitionsResponseData.DescribeTopicPartitionsResponseTopicCollection collection = new DescribeTopicPartitionsResponseData.DescribeTopicPartitionsResponseTopicCollection(); @@ -1358,13 +1273,13 @@ private DescribeTopicPartitionsResponse createDescribeTopicPartitionsResponse() .setIsInternal(false) .setName("foo") .setTopicAuthorizedOperations(0) - .setPartitions(singletonList( + .setPartitions(Arrays.asList( new DescribeTopicPartitionsResponseData.DescribeTopicPartitionsResponsePartition() .setErrorCode((short) 0) - .setIsrNodes(singletonList(1)) + .setIsrNodes(Arrays.asList(1)) .setPartitionIndex(1) .setLeaderId(1) - .setReplicaNodes(singletonList(1)) + .setReplicaNodes(Arrays.asList(1)) .setLeaderEpoch(0) )) ); @@ -1415,121 +1330,13 @@ private ConsumerGroupHeartbeatResponse createConsumerGroupHeartbeatResponse() { return new ConsumerGroupHeartbeatResponse(data); } - private ShareGroupHeartbeatRequest createShareGroupHeartbeatRequest(short version) { - ShareGroupHeartbeatRequestData data = new ShareGroupHeartbeatRequestData() - .setGroupId("group") - .setMemberId("memberid") - .setMemberEpoch(10) - .setRackId("rackid") - .setSubscribedTopicNames(Arrays.asList("foo", "bar")); - return new ShareGroupHeartbeatRequest.Builder(data).build(version); - } - - private ShareGroupHeartbeatResponse createShareGroupHeartbeatResponse() { - ShareGroupHeartbeatResponseData data = new ShareGroupHeartbeatResponseData() - .setErrorCode(Errors.NONE.code()) - .setThrottleTimeMs(1000) - .setMemberId("memberid") - .setMemberEpoch(11) - .setAssignment(new ShareGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Arrays.asList( - new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(Uuid.randomUuid()) - .setPartitions(Arrays.asList(0, 1, 2)), - new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(Uuid.randomUuid()) - .setPartitions(Arrays.asList(3, 4, 5)) - )) - ); - return new ShareGroupHeartbeatResponse(data); - } - - private ShareGroupDescribeRequest createShareGroupDescribeRequest(short version) { - ShareGroupDescribeRequestData data = new ShareGroupDescribeRequestData() - .setGroupIds(Collections.singletonList("group")) - .setIncludeAuthorizedOperations(false); - return new ShareGroupDescribeRequest.Builder(data).build(version); - } - - private ShareGroupDescribeResponse createShareGroupDescribeResponse() { - ShareGroupDescribeResponseData data = new ShareGroupDescribeResponseData() - .setGroups(Collections.singletonList( - new ShareGroupDescribeResponseData.DescribedGroup() - .setGroupId("group") - .setErrorCode((short) 0) - .setErrorMessage(Errors.forCode((short) 0).message()) - .setGroupState(ShareGroupState.EMPTY.toString()) - .setMembers(new ArrayList<>(0)) - )) - .setThrottleTimeMs(1000); - return new ShareGroupDescribeResponse(data); - } - - private ShareFetchRequest createShareFetchRequest(short version) { - ShareFetchRequestData data = new ShareFetchRequestData() - .setGroupId("group") - .setMemberId(Uuid.randomUuid().toString()) - .setTopics(singletonList(new ShareFetchRequestData.FetchTopic() - .setTopicId(Uuid.randomUuid()) - .setPartitions(singletonList(new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0))))); - return new ShareFetchRequest.Builder(data).build(version); - } - - private ShareFetchResponse createShareFetchResponse() { - ShareFetchResponseData data = new ShareFetchResponseData(); - MemoryRecords records = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("blah".getBytes())); - ShareFetchResponseData.PartitionData partition = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code()) - .setRecords(records) - .setAcquiredRecords(singletonList(new ShareFetchResponseData.AcquiredRecords() - .setFirstOffset(0) - .setLastOffset(0) - .setDeliveryCount((short) 1))); - ShareFetchResponseData.ShareFetchableTopicResponse response = new ShareFetchResponseData.ShareFetchableTopicResponse() - .setTopicId(Uuid.randomUuid()) - .setPartitions(singletonList(partition)); - - data.setResponses(singletonList(response)); - data.setThrottleTimeMs(345); - data.setErrorCode(Errors.NONE.code()); - return new ShareFetchResponse(data); - } - - private ShareAcknowledgeRequest createShareAcknowledgeRequest(short version) { - ShareAcknowledgeRequestData data = new ShareAcknowledgeRequestData() - .setMemberId(Uuid.randomUuid().toString()) - .setTopics(singletonList(new ShareAcknowledgeRequestData.AcknowledgeTopic() - .setTopicId(Uuid.randomUuid()) - .setPartitions(singletonList(new ShareAcknowledgeRequestData.AcknowledgePartition() - .setPartitionIndex(0) - .setAcknowledgementBatches(singletonList(new ShareAcknowledgeRequestData.AcknowledgementBatch() - .setFirstOffset(0) - .setLastOffset(0) - .setAcknowledgeTypes(Collections.singletonList((byte) 0)))))))); - return new ShareAcknowledgeRequest.Builder(data).build(version); - } - - private ShareAcknowledgeResponse createShareAcknowledgeResponse() { - ShareAcknowledgeResponseData data = new ShareAcknowledgeResponseData(); - data.setResponses(singletonList(new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse() - .setTopicId(Uuid.randomUuid()) - .setPartitions(singletonList(new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code()))))); - data.setThrottleTimeMs(345); - data.setErrorCode(Errors.NONE.code()); - return new ShareAcknowledgeResponse(data); - } - private ControllerRegistrationRequest createControllerRegistrationRequest(short version) { ControllerRegistrationRequestData data = new ControllerRegistrationRequestData(). setControllerId(3). setIncarnationId(Uuid.fromString("qiTdnbu6RPazh1Aufq4dxw")). setZkMigrationReady(true). setFeatures(new ControllerRegistrationRequestData.FeatureCollection( - singletonList( + Arrays.asList( new ControllerRegistrationRequestData.Feature(). setName("metadata.version"). setMinSupportedVersion((short) 1). @@ -1537,7 +1344,7 @@ private ControllerRegistrationRequest createControllerRegistrationRequest(short ).iterator() )). setListeners(new ControllerRegistrationRequestData.ListenerCollection( - singletonList( + Arrays.asList( new ControllerRegistrationRequestData.Listener(). setName("CONTROLLER"). setName("localhost"). @@ -3736,7 +3543,7 @@ private BrokerRegistrationRequest createBrokerRegistrationRequest(short v) { .setListeners(new BrokerRegistrationRequestData.ListenerCollection(singletonList( new BrokerRegistrationRequestData.Listener()).iterator())) .setIncarnationId(Uuid.randomUuid()) - .setLogDirs(singletonList(Uuid.fromString("qaJjNJ05Q36kEgeTBDcj0Q"))) + .setLogDirs(Arrays.asList(Uuid.fromString("qaJjNJ05Q36kEgeTBDcj0Q"))) .setPreviousBrokerEpoch(123L); return new BrokerRegistrationRequest.Builder(data).build(v); } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/UpdateMetadataRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/UpdateMetadataRequestTest.java index 7be139c439964..2dd17f776ec95 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/UpdateMetadataRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/UpdateMetadataRequestTest.java @@ -73,7 +73,7 @@ public void testGetErrorResponse() { /** * Verifies the logic we have in UpdateMetadataRequest to present a unified interface across the various versions - * works correctly. For example, `UpdateMetadataPartitionState.topicName` is not serialized/deserialized in + * works correctly. For example, `UpdateMetadataPartitionState.topicName` is not serialiazed/deserialized in * recent versions, but we set it manually so that we can always present the ungrouped partition states * independently of the version. */ @@ -92,7 +92,7 @@ public void testVersionLogic() { .setIsr(asList(0, 1)) .setZkVersion(10) .setReplicas(asList(0, 1, 2)) - .setOfflineReplicas(Collections.singletonList(2)), + .setOfflineReplicas(asList(2)), new UpdateMetadataPartitionState() .setTopicName(topic0) .setPartitionIndex(1) @@ -143,7 +143,7 @@ public void testVersionLogic() { .setEndpoints(broker0Endpoints), new UpdateMetadataBroker() .setId(1) - .setEndpoints(Collections.singletonList( + .setEndpoints(asList( new UpdateMetadataEndpoint() .setHost("host1") .setPort(9090) diff --git a/clients/src/test/java/org/apache/kafka/common/security/authenticator/ClientAuthenticationFailureTest.java b/clients/src/test/java/org/apache/kafka/common/security/authenticator/ClientAuthenticationFailureTest.java index a0e22ee150552..d705c75ab3fe9 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/authenticator/ClientAuthenticationFailureTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/authenticator/ClientAuthenticationFailureTest.java @@ -43,6 +43,7 @@ import org.junit.jupiter.api.Test; import java.time.Duration; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -64,13 +65,13 @@ public void setup() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; saslServerConfigs = new HashMap<>(); - saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Collections.singletonList("PLAIN")); + saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Arrays.asList("PLAIN")); saslClientConfigs = new HashMap<>(); saslClientConfigs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT"); saslClientConfigs.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); - TestJaasConfig testJaasConfig = TestJaasConfig.createConfiguration("PLAIN", Collections.singletonList("PLAIN")); + TestJaasConfig testJaasConfig = TestJaasConfig.createConfiguration("PLAIN", Arrays.asList("PLAIN")); testJaasConfig.setClientOptions("PLAIN", TestJaasConfig.USERNAME, "anotherpassword"); server = createEchoServer(securityProtocol); } diff --git a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorFailureDelayTest.java b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorFailureDelayTest.java index 7fe4f6b5a6ade..dc2513e4fc1e0 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorFailureDelayTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorFailureDelayTest.java @@ -39,6 +39,7 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; @@ -92,7 +93,7 @@ public void teardown() throws Exception { public void testInvalidPasswordSaslPlain() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Arrays.asList("PLAIN")); jaasConfig.setClientOptions("PLAIN", TestJaasConfig.USERNAME, "invalidpassword"); server = createEchoServer(securityProtocol); @@ -140,7 +141,7 @@ public void testDisabledSaslMechanism() throws Exception { public void testClientConnectionClose() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Arrays.asList("PLAIN")); jaasConfig.setClientOptions("PLAIN", TestJaasConfig.USERNAME, "invalidpassword"); server = createEchoServer(securityProtocol); diff --git a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java index 4972f3ea43037..0b5e172116c9b 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java @@ -191,7 +191,7 @@ public void teardown() throws Exception { public void testValidSaslPlainOverSsl() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + configureMechanisms("PLAIN", Arrays.asList("PLAIN")); server = createEchoServer(securityProtocol); checkAuthenticationAndReauthentication(securityProtocol, node); @@ -205,7 +205,7 @@ public void testValidSaslPlainOverSsl() throws Exception { public void testValidSaslPlainOverPlaintext() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; - configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + configureMechanisms("PLAIN", Arrays.asList("PLAIN")); server = createEchoServer(securityProtocol); checkAuthenticationAndReauthentication(securityProtocol, node); @@ -264,7 +264,7 @@ public void testSaslAuthenticationMaxReceiveSize() throws Exception { public void testInvalidPasswordSaslPlain() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Arrays.asList("PLAIN")); jaasConfig.setClientOptions("PLAIN", TestJaasConfig.USERNAME, "invalidpassword"); server = createEchoServer(securityProtocol); @@ -281,7 +281,7 @@ public void testInvalidPasswordSaslPlain() throws Exception { public void testInvalidUsernameSaslPlain() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Arrays.asList("PLAIN")); jaasConfig.setClientOptions("PLAIN", "invaliduser", TestJaasConfig.PASSWORD); server = createEchoServer(securityProtocol); @@ -297,7 +297,7 @@ public void testInvalidUsernameSaslPlain() throws Exception { @Test public void testMissingUsernameSaslPlain() throws Exception { String node = "0"; - TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Arrays.asList("PLAIN")); jaasConfig.setClientOptions("PLAIN", null, "mypassword"); SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; @@ -321,7 +321,7 @@ public void testMissingUsernameSaslPlain() throws Exception { @Test public void testMissingPasswordSaslPlain() throws Exception { String node = "0"; - TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Arrays.asList("PLAIN")); jaasConfig.setClientOptions("PLAIN", "myuser", null); SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; @@ -412,7 +412,7 @@ static void reset() { public void testMechanismPluggability() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - configureMechanisms("DIGEST-MD5", Collections.singletonList("DIGEST-MD5")); + configureMechanisms("DIGEST-MD5", Arrays.asList("DIGEST-MD5")); configureDigestMd5ServerCallback(securityProtocol); server = createEchoServer(securityProtocol); @@ -486,7 +486,7 @@ public void testMultipleServerMechanisms() throws Exception { @Test public void testValidSaslScramSha256() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - configureMechanisms("SCRAM-SHA-256", Collections.singletonList("SCRAM-SHA-256")); + configureMechanisms("SCRAM-SHA-256", Arrays.asList("SCRAM-SHA-256")); server = createEchoServer(securityProtocol); updateScramCredentialCache(TestJaasConfig.USERNAME, TestJaasConfig.PASSWORD); @@ -516,7 +516,7 @@ public void testValidSaslScramMechanisms() throws Exception { @Test public void testInvalidPasswordSaslScram() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Collections.singletonList("SCRAM-SHA-256")); + TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Arrays.asList("SCRAM-SHA-256")); Map options = new HashMap<>(); options.put("username", TestJaasConfig.USERNAME); options.put("password", "invalidpassword"); @@ -536,7 +536,7 @@ public void testInvalidPasswordSaslScram() throws Exception { @Test public void testUnknownUserSaslScram() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Collections.singletonList("SCRAM-SHA-256")); + TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Arrays.asList("SCRAM-SHA-256")); Map options = new HashMap<>(); options.put("username", "unknownUser"); options.put("password", TestJaasConfig.PASSWORD); @@ -582,7 +582,7 @@ public void testScramUsernameWithSpecialCharacters() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; String username = "special user= test,scram"; String password = username + "-password"; - TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Collections.singletonList("SCRAM-SHA-256")); + TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Arrays.asList("SCRAM-SHA-256")); Map options = new HashMap<>(); options.put("username", username); options.put("password", password); @@ -597,7 +597,7 @@ public void testScramUsernameWithSpecialCharacters() throws Exception { @Test public void testTokenAuthenticationOverSaslScram() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Collections.singletonList("SCRAM-SHA-256")); + TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Arrays.asList("SCRAM-SHA-256")); //create jaas config for token auth Map options = new HashMap<>(); @@ -633,7 +633,7 @@ public void testTokenAuthenticationOverSaslScram() throws Exception { @Test public void testTokenReauthenticationOverSaslScram() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Collections.singletonList("SCRAM-SHA-256")); + TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Arrays.asList("SCRAM-SHA-256")); // create jaas config for token auth Map options = new HashMap<>(); @@ -745,7 +745,7 @@ public void testUnauthenticatedApiVersionsRequestOverSslHandshakeVersion1() thro public void testApiVersionsRequestWithServerUnsupportedVersion() throws Exception { short handshakeVersion = ApiKeys.SASL_HANDSHAKE.latestVersion(); SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; - configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + configureMechanisms("PLAIN", Arrays.asList("PLAIN")); server = createEchoServer(securityProtocol); // Send ApiVersionsRequest with unsupported version and validate error response. @@ -787,7 +787,7 @@ public void testApiVersionsRequestWithServerUnsupportedVersion() throws Exceptio */ @Test public void testSaslUnsupportedClientVersions() throws Exception { - configureMechanisms("SCRAM-SHA-512", Collections.singletonList("SCRAM-SHA-512")); + configureMechanisms("SCRAM-SHA-512", Arrays.asList("SCRAM-SHA-512")); server = startServerApiVersionsUnsupportedByClient(SecurityProtocol.SASL_SSL, "SCRAM-SHA-512"); updateScramCredentialCache(TestJaasConfig.USERNAME, TestJaasConfig.PASSWORD); @@ -806,7 +806,7 @@ public void testSaslUnsupportedClientVersions() throws Exception { public void testInvalidApiVersionsRequest() throws Exception { short handshakeVersion = ApiKeys.SASL_HANDSHAKE.latestVersion(); SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; - configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + configureMechanisms("PLAIN", Arrays.asList("PLAIN")); server = createEchoServer(securityProtocol); // Send ApiVersionsRequest with invalid version and validate error response. @@ -848,7 +848,7 @@ public void testForBrokenSaslHandshakeVersionBump() { public void testValidApiVersionsRequest() throws Exception { short handshakeVersion = ApiKeys.SASL_HANDSHAKE.latestVersion(); SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; - configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + configureMechanisms("PLAIN", Arrays.asList("PLAIN")); server = createEchoServer(securityProtocol); // Send ApiVersionsRequest with valid version and validate error response. @@ -878,7 +878,7 @@ public void testValidApiVersionsRequest() throws Exception { @Test public void testSaslHandshakeRequestWithUnsupportedVersion() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; - configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + configureMechanisms("PLAIN", Arrays.asList("PLAIN")); server = createEchoServer(securityProtocol); // Send SaslHandshakeRequest and validate that connection is closed by server. @@ -905,7 +905,7 @@ public void testSaslHandshakeRequestWithUnsupportedVersion() throws Exception { @Test public void testInvalidSaslPacket() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; - configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + configureMechanisms("PLAIN", Arrays.asList("PLAIN")); server = createEchoServer(securityProtocol); // Send invalid SASL packet after valid handshake request @@ -944,7 +944,7 @@ public void testInvalidSaslPacket() throws Exception { @Test public void testInvalidApiVersionsRequestSequence() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; - configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + configureMechanisms("PLAIN", Arrays.asList("PLAIN")); server = createEchoServer(securityProtocol); // Send handshake request followed by ApiVersionsRequest @@ -970,7 +970,7 @@ public void testInvalidApiVersionsRequestSequence() throws Exception { @Test public void testPacketSizeTooBig() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; - configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + configureMechanisms("PLAIN", Arrays.asList("PLAIN")); server = createEchoServer(securityProtocol); // Send SASL packet with large size after valid handshake request @@ -1010,7 +1010,7 @@ public void testPacketSizeTooBig() throws Exception { @Test public void testDisallowedKafkaRequestsBeforeAuthentication() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; - configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + configureMechanisms("PLAIN", Arrays.asList("PLAIN")); server = createEchoServer(securityProtocol); // Send metadata request before Kafka SASL handshake request @@ -1047,7 +1047,7 @@ public void testDisallowedKafkaRequestsBeforeAuthentication() throws Exception { */ @Test public void testInvalidLoginModule() throws Exception { - TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Arrays.asList("PLAIN")); jaasConfig.createOrUpdateEntry(TestJaasConfig.LOGIN_CONTEXT_CLIENT, "InvalidLoginModule", TestJaasConfig.defaultClientOptions()); SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; @@ -1270,7 +1270,7 @@ public void testServerLoginCallbackOverride() throws Exception { public void testDisabledMechanism() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - configureMechanisms("PLAIN", Collections.singletonList("DIGEST-MD5")); + configureMechanisms("PLAIN", Arrays.asList("DIGEST-MD5")); server = createEchoServer(securityProtocol); createAndCheckClientConnectionFailure(securityProtocol, node); @@ -1285,7 +1285,7 @@ public void testDisabledMechanism() throws Exception { public void testInvalidMechanism() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + configureMechanisms("PLAIN", Arrays.asList("PLAIN")); saslClientConfigs.put(SaslConfigs.SASL_MECHANISM, "INVALID"); server = createEchoServer(securityProtocol); @@ -1312,7 +1312,7 @@ public void testInvalidMechanism() throws Exception { public void testClientDynamicJaasConfiguration() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; saslClientConfigs.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); - saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Collections.singletonList("PLAIN")); + saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Arrays.asList("PLAIN")); Map serverOptions = new HashMap<>(); serverOptions.put("user_user1", "user1-secret"); serverOptions.put("user_user2", "user2-secret"); @@ -1359,7 +1359,7 @@ public void testClientDynamicJaasConfiguration() throws Exception { public void testServerDynamicJaasConfiguration() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; saslClientConfigs.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); - saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Collections.singletonList("PLAIN")); + saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Arrays.asList("PLAIN")); Map serverOptions = new HashMap<>(); serverOptions.put("user_user1", "user1-secret"); serverOptions.put("user_user2", "user2-secret"); @@ -1385,7 +1385,7 @@ public void testServerDynamicJaasConfiguration() throws Exception { public void testJaasConfigurationForListener() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; saslClientConfigs.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); - saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Collections.singletonList("PLAIN")); + saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Arrays.asList("PLAIN")); TestJaasConfig staticJaasConfig = new TestJaasConfig(); @@ -1573,7 +1573,7 @@ public void oldSaslScramSslClientWithoutSaslAuthenticateHeaderFailure() throws E public void testValidSaslOauthBearerMechanism() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - configureMechanisms("OAUTHBEARER", Collections.singletonList("OAUTHBEARER")); + configureMechanisms("OAUTHBEARER", Arrays.asList("OAUTHBEARER")); server = createEchoServer(securityProtocol); createAndCheckClientConnection(securityProtocol, node); } @@ -1588,7 +1588,7 @@ public void testCannotReauthenticateWithDifferentPrincipal() throws Exception { saslClientConfigs.put(SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS, AlternateLoginCallbackHandler.class.getName()); configureMechanisms(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM, - Collections.singletonList(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM)); + Arrays.asList(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM)); server = createEchoServer(securityProtocol); // initial authentication must succeed createClientConnection(securityProtocol, node); @@ -1701,7 +1701,7 @@ public void testCannotReauthenticateAgainFasterThanOneSecond() throws Exception time = new MockTime(); SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; configureMechanisms(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM, - Collections.singletonList(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM)); + Arrays.asList(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM)); server = createEchoServer(securityProtocol); try { createClientConnection(securityProtocol, node); @@ -1748,7 +1748,7 @@ public void testCannotReauthenticateAgainFasterThanOneSecond() throws Exception public void testRepeatedValidSaslPlainOverSsl() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + configureMechanisms("PLAIN", Arrays.asList("PLAIN")); /* * Make sure 85% of this value is at least 1 second otherwise it is possible for * the client to start re-authenticating but the server does not start due to @@ -1785,7 +1785,7 @@ public void testValidSaslOauthBearerMechanismWithoutServerTokens() throws Except String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; saslClientConfigs.put(SaslConfigs.SASL_MECHANISM, "OAUTHBEARER"); - saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Collections.singletonList("OAUTHBEARER")); + saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Arrays.asList("OAUTHBEARER")); saslClientConfigs.put(SaslConfigs.SASL_JAAS_CONFIG, TestJaasConfig.jaasConfigProperty("OAUTHBEARER", Collections.singletonMap("unsecuredLoginStringClaim_sub", TestJaasConfig.USERNAME))); saslServerConfigs.put("listener.name.sasl_ssl.oauthbearer." + SaslConfigs.SASL_JAAS_CONFIG, @@ -1818,7 +1818,7 @@ public void testValidSaslOauthBearerMechanismWithoutServerTokens() throws Except @Test public void testInsufficientScopeSaslOauthBearerMechanism() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - TestJaasConfig jaasConfig = configureMechanisms("OAUTHBEARER", Collections.singletonList("OAUTHBEARER")); + TestJaasConfig jaasConfig = configureMechanisms("OAUTHBEARER", Arrays.asList("OAUTHBEARER")); // now update the server side to require a scope the client does not provide Map serverJaasConfigOptionsMap = TestJaasConfig.defaultServerOptions("OAUTHBEARER"); serverJaasConfigOptionsMap.put("unsecuredValidatorRequiredScope", "LOGIN_TO_KAFKA"); // causes the failure @@ -1884,7 +1884,7 @@ private void verifySslClientAuthForSaslSslListener(boolean useListenerPrefix, // Client configures untrusted key store CertStores newStore = new CertStores(false, "localhost"); - saslClientConfigs.putAll(newStore.keyStoreProps()); + newStore.keyStoreProps().forEach((k, v) -> saslClientConfigs.put(k, v)); if (expectedClientAuth == SslClientAuth.NONE) { createAndCheckClientConnectionAndPrincipal(securityProtocol, "2", principalWithOneWayTls); } else { @@ -1900,7 +1900,7 @@ private void removeClientSslKeystore() { private void verifySaslAuthenticateHeaderInterop(boolean enableHeaderOnServer, boolean enableHeaderOnClient, SecurityProtocol securityProtocol, String saslMechanism) throws Exception { - configureMechanisms(saslMechanism, Collections.singletonList(saslMechanism)); + configureMechanisms(saslMechanism, Arrays.asList(saslMechanism)); createServer(securityProtocol, saslMechanism, enableHeaderOnServer); String node = "0"; @@ -1910,7 +1910,7 @@ private void verifySaslAuthenticateHeaderInterop(boolean enableHeaderOnServer, b private void verifySaslAuthenticateHeaderInteropWithFailure(boolean enableHeaderOnServer, boolean enableHeaderOnClient, SecurityProtocol securityProtocol, String saslMechanism) throws Exception { - TestJaasConfig jaasConfig = configureMechanisms(saslMechanism, Collections.singletonList(saslMechanism)); + TestJaasConfig jaasConfig = configureMechanisms(saslMechanism, Arrays.asList(saslMechanism)); jaasConfig.setClientOptions(saslMechanism, TestJaasConfig.USERNAME, "invalidpassword"); createServer(securityProtocol, saslMechanism, enableHeaderOnServer); @@ -1947,7 +1947,7 @@ private NioEchoServer startServerApiVersionsUnsupportedByClient(final SecurityPr boolean isScram = ScramMechanism.isScram(saslMechanism); if (isScram) - ScramCredentialUtils.createCache(credentialCache, Collections.singletonList(saslMechanism)); + ScramCredentialUtils.createCache(credentialCache, Arrays.asList(saslMechanism)); Supplier apiVersionSupplier = () -> { ApiVersionCollection versionCollection = new ApiVersionCollection(2); @@ -1976,7 +1976,7 @@ private NioEchoServer startServerWithoutSaslAuthenticateHeader(final SecurityPro boolean isScram = ScramMechanism.isScram(saslMechanism); if (isScram) - ScramCredentialUtils.createCache(credentialCache, Collections.singletonList(saslMechanism)); + ScramCredentialUtils.createCache(credentialCache, Arrays.asList(saslMechanism)); Supplier apiVersionSupplier = () -> { ApiVersionsResponse defaultApiVersionResponse = TestUtils.defaultApiVersionsResponse( @@ -2090,7 +2090,7 @@ protected void setSaslAuthenticateAndHandshakeVersions(ApiVersionsResponse apiVe * */ private void testUnauthenticatedApiVersionsRequest(SecurityProtocol securityProtocol, short saslHandshakeVersion) throws Exception { - configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + configureMechanisms("PLAIN", Arrays.asList("PLAIN")); server = createEchoServer(securityProtocol); // Create non-SASL connection to manually authenticate after ApiVersionsRequest @@ -2499,43 +2499,44 @@ public static class AlternateLoginCallbackHandler implements AuthenticateCallbac public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { DELEGATE.handle(callbacks); // now change any returned token to have a different principal name - for (Callback callback : callbacks) { - if (callback instanceof OAuthBearerTokenCallback) { - OAuthBearerTokenCallback oauthBearerTokenCallback = (OAuthBearerTokenCallback) callback; - OAuthBearerToken token = oauthBearerTokenCallback.token(); - if (token != null) { - String changedPrincipalNameToUse = token.principalName() - + ++numInvocations; - String headerJson = "{" + claimOrHeaderJsonText("alg", "none") + "}"; - /* - * Use a short lifetime so the background refresh thread replaces it before we - * re-authenticate - */ - String lifetimeSecondsValueToUse = "1"; - String claimsJson; - try { - claimsJson = String.format("{%s,%s,%s}", - expClaimText(Long.parseLong(lifetimeSecondsValueToUse)), - claimOrHeaderJsonText("iat", time.milliseconds() / 1000.0), - claimOrHeaderJsonText("sub", changedPrincipalNameToUse)); - } catch (NumberFormatException e) { - throw new OAuthBearerConfigException(e.getMessage()); - } - try { - Encoder urlEncoderNoPadding = Base64.getUrlEncoder().withoutPadding(); - OAuthBearerUnsecuredJws jws = new OAuthBearerUnsecuredJws(String.format("%s.%s.", - urlEncoderNoPadding.encodeToString(headerJson.getBytes(StandardCharsets.UTF_8)), - urlEncoderNoPadding - .encodeToString(claimsJson.getBytes(StandardCharsets.UTF_8))), - "sub", "scope"); - oauthBearerTokenCallback.token(jws); - } catch (OAuthBearerIllegalTokenException e) { - // occurs if the principal claim doesn't exist or has an empty value - throw new OAuthBearerConfigException(e.getMessage(), e); + if (callbacks.length > 0) + for (Callback callback : callbacks) { + if (callback instanceof OAuthBearerTokenCallback) { + OAuthBearerTokenCallback oauthBearerTokenCallback = (OAuthBearerTokenCallback) callback; + OAuthBearerToken token = oauthBearerTokenCallback.token(); + if (token != null) { + String changedPrincipalNameToUse = token.principalName() + + String.valueOf(++numInvocations); + String headerJson = "{" + claimOrHeaderJsonText("alg", "none") + "}"; + /* + * Use a short lifetime so the background refresh thread replaces it before we + * re-authenticate + */ + String lifetimeSecondsValueToUse = "1"; + String claimsJson; + try { + claimsJson = String.format("{%s,%s,%s}", + expClaimText(Long.parseLong(lifetimeSecondsValueToUse)), + claimOrHeaderJsonText("iat", time.milliseconds() / 1000.0), + claimOrHeaderJsonText("sub", changedPrincipalNameToUse)); + } catch (NumberFormatException e) { + throw new OAuthBearerConfigException(e.getMessage()); + } + try { + Encoder urlEncoderNoPadding = Base64.getUrlEncoder().withoutPadding(); + OAuthBearerUnsecuredJws jws = new OAuthBearerUnsecuredJws(String.format("%s.%s.", + urlEncoderNoPadding.encodeToString(headerJson.getBytes(StandardCharsets.UTF_8)), + urlEncoderNoPadding + .encodeToString(claimsJson.getBytes(StandardCharsets.UTF_8))), + "sub", "scope"); + oauthBearerTokenCallback.token(jws); + } catch (OAuthBearerIllegalTokenException e) { + // occurs if the principal claim doesn't exist or has an empty value + throw new OAuthBearerConfigException(e.getMessage(), e); + } } } } - } } private static String claimOrHeaderJsonText(String claimName, String claimValue) { diff --git a/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosNameTest.java b/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosNameTest.java index 209c11e0a8da7..a6e8f9714dc27 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosNameTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosNameTest.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Arrays; -import java.util.Collections; import java.util.List; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -118,18 +117,18 @@ public void testToUpperCase() throws Exception { @Test public void testInvalidRules() { - testInvalidRule(Collections.singletonList("default")); - testInvalidRule(Collections.singletonList("DEFAUL")); - testInvalidRule(Collections.singletonList("DEFAULT/L")); - testInvalidRule(Collections.singletonList("DEFAULT/g")); - - testInvalidRule(Collections.singletonList("rule:[1:$1]")); - testInvalidRule(Collections.singletonList("rule:[1:$1]/L/U")); - testInvalidRule(Collections.singletonList("rule:[1:$1]/U/L")); - testInvalidRule(Collections.singletonList("rule:[1:$1]/LU")); - testInvalidRule(Collections.singletonList("RULE:[1:$1/L")); - testInvalidRule(Collections.singletonList("RULE:[1:$1]/l")); - testInvalidRule(Collections.singletonList("RULE:[2:$1](ABC.*)s/ABC/XYZ/L/g")); + testInvalidRule(Arrays.asList("default")); + testInvalidRule(Arrays.asList("DEFAUL")); + testInvalidRule(Arrays.asList("DEFAULT/L")); + testInvalidRule(Arrays.asList("DEFAULT/g")); + + testInvalidRule(Arrays.asList("rule:[1:$1]")); + testInvalidRule(Arrays.asList("rule:[1:$1]/L/U")); + testInvalidRule(Arrays.asList("rule:[1:$1]/U/L")); + testInvalidRule(Arrays.asList("rule:[1:$1]/LU")); + testInvalidRule(Arrays.asList("RULE:[1:$1/L")); + testInvalidRule(Arrays.asList("RULE:[1:$1]/l")); + testInvalidRule(Arrays.asList("RULE:[2:$1](ABC.*)s/ABC/XYZ/L/g")); } private void testInvalidRule(List rules) { diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java index 4ad4e78b92c1f..e29b7c069c984 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java @@ -143,7 +143,7 @@ private static OAuthBearerUnsecuredLoginCallbackHandler createCallbackHandler(Ma OAuthBearerUnsecuredLoginCallbackHandler callbackHandler = new OAuthBearerUnsecuredLoginCallbackHandler(); callbackHandler.time(mockTime); callbackHandler.configure(Collections.emptyMap(), OAuthBearerLoginModule.OAUTHBEARER_MECHANISM, - Collections.singletonList(config.getAppConfigurationEntry("KafkaClient")[0])); + Arrays.asList(config.getAppConfigurationEntry("KafkaClient")[0])); return callbackHandler; } diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandlerTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandlerTest.java index 4c0d055012988..d7d6013a45717 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandlerTest.java @@ -21,6 +21,7 @@ import static org.junit.jupiter.api.Assertions.assertNull; import java.nio.charset.StandardCharsets; +import java.util.Arrays; import java.util.Base64; import java.util.Base64.Encoder; import java.util.Collections; @@ -156,7 +157,7 @@ private static OAuthBearerUnsecuredValidatorCallbackHandler createCallbackHandle (Map) options); OAuthBearerUnsecuredValidatorCallbackHandler callbackHandler = new OAuthBearerUnsecuredValidatorCallbackHandler(); callbackHandler.configure(Collections.emptyMap(), OAuthBearerLoginModule.OAUTHBEARER_MECHANISM, - Collections.singletonList(config.getAppConfigurationEntry("KafkaClient")[0])); + Arrays.asList(config.getAppConfigurationEntry("KafkaClient")[0])); return callbackHandler; } diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtilsTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtilsTest.java index c52ed6c4ec1cd..ef8997a7bc7a9 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtilsTest.java @@ -161,14 +161,14 @@ public void validateScope() { long nowMs = TIME.milliseconds(); double nowClaimValue = ((double) nowMs) / 1000; final List noScope = Collections.emptyList(); - final List scope1 = Collections.singletonList("scope1"); + final List scope1 = Arrays.asList("scope1"); final List scope1And2 = Arrays.asList("scope1", "scope2"); for (boolean actualScopeExists : new boolean[] {true, false}) { - List scopes = !actualScopeExists ? Collections.singletonList((List) null) + List scopes = !actualScopeExists ? Arrays.asList((List) null) : Arrays.asList(noScope, scope1, scope1And2); for (List actualScope : scopes) { for (boolean requiredScopeExists : new boolean[] {true, false}) { - List requiredScopes = !requiredScopeExists ? Collections.singletonList((List) null) + List requiredScopes = !requiredScopeExists ? Arrays.asList((List) null) : Arrays.asList(noScope, scope1, scope1And2); for (List requiredScope : requiredScopes) { StringBuilder sb = new StringBuilder("{"); diff --git a/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramFormatterTest.java b/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramFormatterTest.java index 715c97fe9270e..8d7a8ecd84407 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramFormatterTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramFormatterTest.java @@ -78,8 +78,9 @@ public void rfc7677Example() throws Exception { * Tests encoding of username */ @Test - public void saslName() { + public void saslName() throws Exception { String[] usernames = {"user1", "123", "1,2", "user=A", "user==B", "user,1", "user 1", ",", "=", ",=", "=="}; + ScramFormatter formatter = new ScramFormatter(ScramMechanism.SCRAM_SHA_256); for (String username : usernames) { String saslName = ScramFormatter.saslName(username); // There should be no commas in saslName (comma is used as field separator in SASL messages) diff --git a/clients/src/test/java/org/apache/kafka/common/security/ssl/CommonNameLoggingTrustManagerFactoryWrapperTest.java b/clients/src/test/java/org/apache/kafka/common/security/ssl/CommonNameLoggingTrustManagerFactoryWrapperTest.java index b9aefc9a44ce4..129e383221e86 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/ssl/CommonNameLoggingTrustManagerFactoryWrapperTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/ssl/CommonNameLoggingTrustManagerFactoryWrapperTest.java @@ -152,7 +152,7 @@ void testNeverExpiringX509Certificate() throws Exception { if (cert.getNotBefore().before(dateNow) && cert.getNotAfter().after(dateNow)) { assertDoesNotThrow(() -> cert.checkValidity()); } else { - assertThrows(CertificateException.class, cert::checkValidity); + assertThrows(CertificateException.class, () -> cert.checkValidity()); } // The wrappedCert must never throw due to being expired assertDoesNotThrow(() -> wrappedCert.checkValidity()); diff --git a/clients/src/test/java/org/apache/kafka/common/security/ssl/mock/TestTrustManagerFactory.java b/clients/src/test/java/org/apache/kafka/common/security/ssl/mock/TestTrustManagerFactory.java index 82e4c4b4594b8..8e5e584f97c91 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/ssl/mock/TestTrustManagerFactory.java +++ b/clients/src/test/java/org/apache/kafka/common/security/ssl/mock/TestTrustManagerFactory.java @@ -45,6 +45,8 @@ protected TrustManager[] engineGetTrustManagers() { public static class TestTrustManager extends X509ExtendedTrustManager { + public static final String ALIAS = "TestAlias"; + @Override public void checkClientTrusted(X509Certificate[] x509Certificates, String s) { diff --git a/clients/src/test/java/org/apache/kafka/common/utils/FlattenedIteratorTest.java b/clients/src/test/java/org/apache/kafka/common/utils/FlattenedIteratorTest.java index 9023e00be1ea3..fe02cbe5a96cc 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/FlattenedIteratorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/FlattenedIteratorTest.java @@ -19,8 +19,6 @@ import org.junit.jupiter.api.Test; import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; import java.util.List; import java.util.stream.Collectors; @@ -34,14 +32,14 @@ public class FlattenedIteratorTest { public void testNestedLists() { List> list = asList( asList("foo", "a", "bc"), - Collections.singletonList("ddddd"), + asList("ddddd"), asList("", "bar2", "baz45")); - Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), List::iterator); + Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), l -> l.iterator()); List flattened = new ArrayList<>(); flattenedIterable.forEach(flattened::add); - assertEquals(list.stream().flatMap(Collection::stream).collect(Collectors.toList()), flattened); + assertEquals(list.stream().flatMap(l -> l.stream()).collect(Collectors.toList()), flattened); // Ensure we can iterate multiple times List flattened2 = new ArrayList<>(); @@ -54,7 +52,7 @@ public void testNestedLists() { public void testEmptyList() { List> list = emptyList(); - Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), List::iterator); + Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), l -> l.iterator()); List flattened = new ArrayList<>(); flattenedIterable.forEach(flattened::add); @@ -63,9 +61,9 @@ public void testEmptyList() { @Test public void testNestedSingleEmptyList() { - List> list = Collections.singletonList(emptyList()); + List> list = asList(emptyList()); - Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), List::iterator); + Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), l -> l.iterator()); List flattened = new ArrayList<>(); flattenedIterable.forEach(flattened::add); @@ -78,39 +76,39 @@ public void testEmptyListFollowedByNonEmpty() { emptyList(), asList("boo", "b", "de")); - Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), List::iterator); + Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), l -> l.iterator()); List flattened = new ArrayList<>(); flattenedIterable.forEach(flattened::add); - assertEquals(list.stream().flatMap(Collection::stream).collect(Collectors.toList()), flattened); + assertEquals(list.stream().flatMap(l -> l.stream()).collect(Collectors.toList()), flattened); } @Test public void testEmptyListInBetweenNonEmpty() { List> list = asList( - Collections.singletonList("aadwdwdw"), + asList("aadwdwdw"), emptyList(), asList("ee", "aa", "dd")); - Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), List::iterator); + Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), l -> l.iterator()); List flattened = new ArrayList<>(); flattenedIterable.forEach(flattened::add); - assertEquals(list.stream().flatMap(Collection::stream).collect(Collectors.toList()), flattened); + assertEquals(list.stream().flatMap(l -> l.stream()).collect(Collectors.toList()), flattened); } @Test public void testEmptyListAtTheEnd() { List> list = asList( asList("ee", "dd"), - Collections.singletonList("e"), + asList("e"), emptyList()); - Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), List::iterator); + Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), l -> l.iterator()); List flattened = new ArrayList<>(); flattenedIterable.forEach(flattened::add); - assertEquals(list.stream().flatMap(Collection::stream).collect(Collectors.toList()), flattened); + assertEquals(list.stream().flatMap(l -> l.stream()).collect(Collectors.toList()), flattened); } } diff --git a/clients/src/test/java/org/apache/kafka/common/utils/ImplicitLinkedHashCollectionTest.java b/clients/src/test/java/org/apache/kafka/common/utils/ImplicitLinkedHashCollectionTest.java index 93d03a3837aa0..0c5828f565a0b 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/ImplicitLinkedHashCollectionTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/ImplicitLinkedHashCollectionTest.java @@ -625,7 +625,13 @@ public int compare(TestElement a, TestElement b) { return -1; } else if (a.key > b.key) { return 1; - } else return Integer.compare(a.val, b.val); + } else if (a.val < b.val) { + return -1; + } else if (a.val > b.val) { + return 1; + } else { + return 0; + } } } diff --git a/clients/src/test/java/org/apache/kafka/common/utils/MappedIteratorTest.java b/clients/src/test/java/org/apache/kafka/common/utils/MappedIteratorTest.java index 12b4367b0295e..058f2cd6a9b44 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/MappedIteratorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/MappedIteratorTest.java @@ -32,7 +32,7 @@ public class MappedIteratorTest { @Test public void testStringToInteger() { List list = asList("foo", "", "bar2", "baz45"); - Function mapper = String::length; + Function mapper = s -> s.length(); Iterable mappedIterable = () -> new MappedIterator<>(list.iterator(), mapper); List mapped = new ArrayList<>(); @@ -49,7 +49,7 @@ public void testStringToInteger() { @Test public void testEmptyList() { List list = emptyList(); - Function mapper = String::length; + Function mapper = s -> s.length(); Iterable mappedIterable = () -> new MappedIterator<>(list.iterator(), mapper); List mapped = new ArrayList<>(); diff --git a/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java b/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java index 012151dd34e71..08e2cebc3124a 100755 --- a/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java @@ -823,19 +823,19 @@ public void shouldAcceptValidDateFormats() throws ParseException { public void shouldThrowOnInvalidDateFormatOrNullTimestamp() { // check some invalid formats // test null timestamp - assertTrue(assertThrows(IllegalArgumentException.class, () -> - Utils.getDateTime(null) - ).getMessage().contains("Error parsing timestamp with null value")); + assertTrue(assertThrows(IllegalArgumentException.class, () -> { + Utils.getDateTime(null); + }).getMessage().contains("Error parsing timestamp with null value")); // test pattern: yyyy-MM-dd'T'HH:mm:ss.X - checkExceptionForGetDateTimeMethod(() -> - invokeGetDateTimeMethod(new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.X")) - ); + checkExceptionForGetDateTimeMethod(() -> { + invokeGetDateTimeMethod(new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.X")); + }); // test pattern: yyyy-MM-dd HH:mm:ss - assertTrue(assertThrows(ParseException.class, () -> - invokeGetDateTimeMethod(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")) - ).getMessage().contains("It does not contain a 'T' according to ISO8601 format")); + assertTrue(assertThrows(ParseException.class, () -> { + invokeGetDateTimeMethod(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")); + }).getMessage().contains("It does not contain a 'T' according to ISO8601 format")); // KAFKA-10685: use DateTimeFormatter generate micro/nano second timestamp final DateTimeFormatter formatter = new DateTimeFormatterBuilder() @@ -847,19 +847,19 @@ public void shouldThrowOnInvalidDateFormatOrNullTimestamp() { final LocalDateTime timestampWithSeconds = timestampWithNanoSeconds.truncatedTo(ChronoUnit.SECONDS); // test pattern: yyyy-MM-dd'T'HH:mm:ss.SSSSSSSSS - checkExceptionForGetDateTimeMethod(() -> - Utils.getDateTime(formatter.format(timestampWithNanoSeconds)) - ); + checkExceptionForGetDateTimeMethod(() -> { + Utils.getDateTime(formatter.format(timestampWithNanoSeconds)); + }); // test pattern: yyyy-MM-dd'T'HH:mm:ss.SSSSSS - checkExceptionForGetDateTimeMethod(() -> - Utils.getDateTime(formatter.format(timestampWithMicroSeconds)) - ); + checkExceptionForGetDateTimeMethod(() -> { + Utils.getDateTime(formatter.format(timestampWithMicroSeconds)); + }); // test pattern: yyyy-MM-dd'T'HH:mm:ss - checkExceptionForGetDateTimeMethod(() -> - Utils.getDateTime(formatter.format(timestampWithSeconds)) - ); + checkExceptionForGetDateTimeMethod(() -> { + Utils.getDateTime(formatter.format(timestampWithSeconds)); + }); } private void checkExceptionForGetDateTimeMethod(Executable executable) { diff --git a/connect/api/src/main/java/org/apache/kafka/connect/data/ConnectSchema.java b/connect/api/src/main/java/org/apache/kafka/connect/data/ConnectSchema.java index cf5f01502c83b..2a6e71c3817c2 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/data/ConnectSchema.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/data/ConnectSchema.java @@ -213,15 +213,11 @@ public static void validateValue(Schema schema, Object value) { validateValue(null, schema, value); } - public static void validateValue(String field, Schema schema, Object value) { - validateValue(schema, value, field == null ? "value" : "field: \"" + field + "\""); - } - - private static void validateValue(Schema schema, Object value, String location) { + public static void validateValue(String name, Schema schema, Object value) { if (value == null) { if (!schema.isOptional()) - throw new DataException("Invalid value: null used for required " + location - + ", schema type: " + schema.type()); + throw new DataException("Invalid value: null used for required field: \"" + name + + "\", schema type: " + schema.type()); return; } @@ -240,8 +236,8 @@ private static void validateValue(Schema schema, Object value, String location) exceptionMessage.append(" \"").append(schema.name()).append("\""); } exceptionMessage.append(" with type ").append(schema.type()).append(": ").append(value.getClass()); - if (location != null) { - exceptionMessage.append(" for ").append(location); + if (name != null) { + exceptionMessage.append(" for field: \"").append(name).append("\""); } throw new DataException(exceptionMessage.toString()); } @@ -255,33 +251,19 @@ private static void validateValue(Schema schema, Object value, String location) break; case ARRAY: List array = (List) value; - String entryLocation = "element of array " + location; - Schema arrayValueSchema = assertSchemaNotNull(schema.valueSchema(), entryLocation); - for (Object entry : array) { - validateValue(arrayValueSchema, entry, entryLocation); - } + for (Object entry : array) + validateValue(schema.valueSchema(), entry); break; case MAP: Map map = (Map) value; - String keyLocation = "key of map " + location; - String valueLocation = "value of map " + location; - Schema mapKeySchema = assertSchemaNotNull(schema.keySchema(), keyLocation); - Schema mapValueSchema = assertSchemaNotNull(schema.valueSchema(), valueLocation); for (Map.Entry entry : map.entrySet()) { - validateValue(mapKeySchema, entry.getKey(), keyLocation); - validateValue(mapValueSchema, entry.getValue(), valueLocation); + validateValue(schema.keySchema(), entry.getKey()); + validateValue(schema.valueSchema(), entry.getValue()); } break; } } - private static Schema assertSchemaNotNull(Schema schema, String location) { - if (schema == null) { - throw new DataException("No schema defined for " + location); - } - return schema; - } - private static List> expectedClassesFor(Schema schema) { List> expectedClasses = LOGICAL_TYPE_CLASSES.get(schema.name()); if (expectedClasses == null) diff --git a/connect/api/src/test/java/org/apache/kafka/connect/data/ConnectSchemaTest.java b/connect/api/src/test/java/org/apache/kafka/connect/data/ConnectSchemaTest.java index 43c2342fe3b41..25e6db34691f9 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/data/ConnectSchemaTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/data/ConnectSchemaTest.java @@ -330,144 +330,4 @@ public void testEmptyStruct() { new Struct(emptyStruct); } - private void assertInvalidValueForSchema(String fieldName, Schema schema, Object value, String message) { - Exception e = assertThrows(DataException.class, () -> ConnectSchema.validateValue(fieldName, schema, value)); - assertEquals(message, e.getMessage()); - } - - @Test - public void testValidateFieldWithInvalidValueType() { - String fieldName = "field"; - assertInvalidValueForSchema(fieldName, new FakeSchema(), new Object(), - "Invalid Java object for schema \"fake\" with type null: class java.lang.Object for field: \"field\""); - assertInvalidValueForSchema(null, Schema.INT8_SCHEMA, new Object(), - "Invalid Java object for schema with type INT8: class java.lang.Object for value"); - assertInvalidValueForSchema(fieldName, Schema.INT8_SCHEMA, new Object(), - "Invalid Java object for schema with type INT8: class java.lang.Object for field: \"field\""); - } - - @Test - public void testValidateFieldWithInvalidValueMismatchTimestamp() { - long longValue = 1000L; - String fieldName = "field"; - - ConnectSchema.validateValue(fieldName, Schema.INT64_SCHEMA, longValue); - - assertInvalidValueForSchema(fieldName, Timestamp.SCHEMA, longValue, - "Invalid Java object for schema \"org.apache.kafka.connect.data.Timestamp\" " + - "with type INT64: class java.lang.Long for field: \"field\""); - } - - @Test - public void testValidateList() { - String fieldName = "field"; - - // Optional element schema - Schema optionalStrings = SchemaBuilder.array(Schema.OPTIONAL_STRING_SCHEMA); - ConnectSchema.validateValue(fieldName, optionalStrings, Collections.emptyList()); - ConnectSchema.validateValue(fieldName, optionalStrings, Collections.singletonList("hello")); - ConnectSchema.validateValue(fieldName, optionalStrings, Collections.singletonList(null)); - ConnectSchema.validateValue(fieldName, optionalStrings, Arrays.asList("hello", "world")); - ConnectSchema.validateValue(fieldName, optionalStrings, Arrays.asList("hello", null)); - ConnectSchema.validateValue(fieldName, optionalStrings, Arrays.asList(null, "world")); - assertInvalidValueForSchema(fieldName, optionalStrings, Collections.singletonList(true), - "Invalid Java object for schema with type STRING: class java.lang.Boolean for element of array field: \"field\""); - - // Required element schema - Schema requiredStrings = SchemaBuilder.array(Schema.STRING_SCHEMA); - ConnectSchema.validateValue(fieldName, requiredStrings, Collections.emptyList()); - ConnectSchema.validateValue(fieldName, requiredStrings, Collections.singletonList("hello")); - assertInvalidValueForSchema(fieldName, requiredStrings, Collections.singletonList(null), - "Invalid value: null used for required element of array field: \"field\", schema type: STRING"); - ConnectSchema.validateValue(fieldName, requiredStrings, Arrays.asList("hello", "world")); - assertInvalidValueForSchema(fieldName, requiredStrings, Arrays.asList("hello", null), - "Invalid value: null used for required element of array field: \"field\", schema type: STRING"); - assertInvalidValueForSchema(fieldName, requiredStrings, Arrays.asList(null, "world"), - "Invalid value: null used for required element of array field: \"field\", schema type: STRING"); - assertInvalidValueForSchema(fieldName, optionalStrings, Collections.singletonList(true), - "Invalid Java object for schema with type STRING: class java.lang.Boolean for element of array field: \"field\""); - - // Null element schema - Schema nullElements = SchemaBuilder.type(Schema.Type.ARRAY); - assertInvalidValueForSchema(fieldName, nullElements, Collections.emptyList(), - "No schema defined for element of array field: \"field\""); - assertInvalidValueForSchema(fieldName, nullElements, Collections.singletonList("hello"), - "No schema defined for element of array field: \"field\""); - assertInvalidValueForSchema(fieldName, nullElements, Collections.singletonList(null), - "No schema defined for element of array field: \"field\""); - assertInvalidValueForSchema(fieldName, nullElements, Arrays.asList("hello", "world"), - "No schema defined for element of array field: \"field\""); - assertInvalidValueForSchema(fieldName, nullElements, Arrays.asList("hello", null), - "No schema defined for element of array field: \"field\""); - assertInvalidValueForSchema(fieldName, nullElements, Arrays.asList(null, "world"), - "No schema defined for element of array field: \"field\""); - assertInvalidValueForSchema(fieldName, nullElements, Collections.singletonList(true), - "No schema defined for element of array field: \"field\""); - } - - @Test - public void testValidateMap() { - String fieldName = "field"; - - // Optional element schema - Schema optionalStrings = SchemaBuilder.map(Schema.OPTIONAL_STRING_SCHEMA, Schema.OPTIONAL_STRING_SCHEMA); - ConnectSchema.validateValue(fieldName, optionalStrings, Collections.emptyMap()); - ConnectSchema.validateValue(fieldName, optionalStrings, Collections.singletonMap("key", "value")); - ConnectSchema.validateValue(fieldName, optionalStrings, Collections.singletonMap("key", null)); - ConnectSchema.validateValue(fieldName, optionalStrings, Collections.singletonMap(null, "value")); - ConnectSchema.validateValue(fieldName, optionalStrings, Collections.singletonMap(null, null)); - assertInvalidValueForSchema(fieldName, optionalStrings, Collections.singletonMap("key", true), - "Invalid Java object for schema with type STRING: class java.lang.Boolean for value of map field: \"field\""); - assertInvalidValueForSchema(fieldName, optionalStrings, Collections.singletonMap(true, "value"), - "Invalid Java object for schema with type STRING: class java.lang.Boolean for key of map field: \"field\""); - - // Required element schema - Schema requiredStrings = SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.STRING_SCHEMA); - ConnectSchema.validateValue(fieldName, requiredStrings, Collections.emptyMap()); - ConnectSchema.validateValue(fieldName, requiredStrings, Collections.singletonMap("key", "value")); - assertInvalidValueForSchema(fieldName, requiredStrings, Collections.singletonMap("key", null), - "Invalid value: null used for required value of map field: \"field\", schema type: STRING"); - assertInvalidValueForSchema(fieldName, requiredStrings, Collections.singletonMap(null, "value"), - "Invalid value: null used for required key of map field: \"field\", schema type: STRING"); - assertInvalidValueForSchema(fieldName, requiredStrings, Collections.singletonMap(null, null), - "Invalid value: null used for required key of map field: \"field\", schema type: STRING"); - assertInvalidValueForSchema(fieldName, requiredStrings, Collections.singletonMap("key", true), - "Invalid Java object for schema with type STRING: class java.lang.Boolean for value of map field: \"field\""); - assertInvalidValueForSchema(fieldName, requiredStrings, Collections.singletonMap(true, "value"), - "Invalid Java object for schema with type STRING: class java.lang.Boolean for key of map field: \"field\""); - - // Null key schema - Schema nullKeys = SchemaBuilder.type(Schema.Type.MAP); - assertInvalidValueForSchema(fieldName, nullKeys, Collections.emptyMap(), - "No schema defined for key of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullKeys, Collections.singletonMap("key", "value"), - "No schema defined for key of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullKeys, Collections.singletonMap("key", null), - "No schema defined for key of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullKeys, Collections.singletonMap(null, "value"), - "No schema defined for key of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullKeys, Collections.singletonMap(null, null), - "No schema defined for key of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullKeys, Collections.singletonMap("key", true), - "No schema defined for key of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullKeys, Collections.singletonMap(true, "value"), - "No schema defined for key of map field: \"field\""); - - // Null value schema - Schema nullValues = SchemaBuilder.mapWithNullValues(Schema.OPTIONAL_STRING_SCHEMA); - assertInvalidValueForSchema(fieldName, nullValues, Collections.emptyMap(), - "No schema defined for value of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullValues, Collections.singletonMap("key", "value"), - "No schema defined for value of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullValues, Collections.singletonMap("key", null), - "No schema defined for value of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullValues, Collections.singletonMap(null, "value"), - "No schema defined for value of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullValues, Collections.singletonMap(null, null), - "No schema defined for value of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullValues, Collections.singletonMap("key", true), - "No schema defined for value of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullValues, Collections.singletonMap(true, "value"), - "No schema defined for value of map field: \"field\""); - } } diff --git a/connect/api/src/test/java/org/apache/kafka/connect/data/StructTest.java b/connect/api/src/test/java/org/apache/kafka/connect/data/StructTest.java index 6dee26ca83ac5..55ccc81beda2e 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/data/StructTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/data/StructTest.java @@ -304,6 +304,39 @@ public void testValidateStructWithNullValue() { e.getMessage()); } + @Test + public void testValidateFieldWithInvalidValueType() { + String fieldName = "field"; + FakeSchema fakeSchema = new FakeSchema(); + + Exception e = assertThrows(DataException.class, () -> ConnectSchema.validateValue(fieldName, + fakeSchema, new Object())); + assertEquals("Invalid Java object for schema \"fake\" with type null: class java.lang.Object for field: \"field\"", + e.getMessage()); + + e = assertThrows(DataException.class, () -> ConnectSchema.validateValue(fieldName, + Schema.INT8_SCHEMA, new Object())); + assertEquals("Invalid Java object for schema with type INT8: class java.lang.Object for field: \"field\"", + e.getMessage()); + + e = assertThrows(DataException.class, () -> ConnectSchema.validateValue(Schema.INT8_SCHEMA, new Object())); + assertEquals("Invalid Java object for schema with type INT8: class java.lang.Object", e.getMessage()); + } + + @Test + public void testValidateFieldWithInvalidValueMismatchTimestamp() { + String fieldName = "field"; + long longValue = 1000L; + + // Does not throw + ConnectSchema.validateValue(fieldName, Schema.INT64_SCHEMA, longValue); + + Exception e = assertThrows(DataException.class, () -> ConnectSchema.validateValue(fieldName, + Timestamp.SCHEMA, longValue)); + assertEquals("Invalid Java object for schema \"org.apache.kafka.connect.data.Timestamp\" " + + "with type INT64: class java.lang.Long for field: \"field\"", e.getMessage()); + } + @Test public void testPutNullField() { final String fieldName = "fieldName"; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java index e50019676ac2f..4956a25158e31 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java @@ -124,7 +124,7 @@ */ public abstract class AbstractHerder implements Herder, TaskStatus.Listener, ConnectorStatus.Listener { - private static final Logger log = LoggerFactory.getLogger(AbstractHerder.class); + private final Logger log = LoggerFactory.getLogger(AbstractHerder.class); private final String workerId; protected final Worker worker; @@ -1039,17 +1039,17 @@ public static List> reverseTransform(String connName, return result; } - public static boolean taskConfigsChanged(ClusterConfigState configState, String connName, List> rawTaskProps) { + public boolean taskConfigsChanged(ClusterConfigState configState, String connName, List> taskProps) { int currentNumTasks = configState.taskCount(connName); boolean result = false; - if (rawTaskProps.size() != currentNumTasks) { - log.debug("Connector {} task count changed from {} to {}", connName, currentNumTasks, rawTaskProps.size()); + if (taskProps.size() != currentNumTasks) { + log.debug("Connector {} task count changed from {} to {}", connName, currentNumTasks, taskProps.size()); result = true; } if (!result) { for (int index = 0; index < currentNumTasks; index++) { ConnectorTaskId taskId = new ConnectorTaskId(connName, index); - if (!rawTaskProps.get(index).equals(configState.rawTaskConfig(taskId))) { + if (!taskProps.get(index).equals(configState.taskConfig(taskId))) { log.debug("Connector {} has change in configuration for task {}-{}", connName, connName, index); result = true; } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java index ab46ee536acac..cdffbb8787100 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java @@ -2229,11 +2229,11 @@ private void reconfigureConnector(final String connName, final Callback cb } private void publishConnectorTaskConfigs(String connName, List> taskProps, Callback cb) { - List> rawTaskProps = reverseTransform(connName, configState, taskProps); - if (!taskConfigsChanged(configState, connName, rawTaskProps)) { + if (!taskConfigsChanged(configState, connName, taskProps)) { return; } + List> rawTaskProps = reverseTransform(connName, configState, taskProps); if (isLeader()) { writeTaskConfigs(connName, rawTaskProps); cb.onCompletion(null, null); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerder.java index 2768d910d4bb9..e773eeefd5c54 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerder.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerder.java @@ -519,10 +519,10 @@ private synchronized void updateConnectorTasks(String connName) { } List> newTaskConfigs = recomputeTaskConfigs(connName); - List> rawTaskConfigs = reverseTransform(connName, configState, newTaskConfigs); - if (taskConfigsChanged(configState, connName, rawTaskConfigs)) { + if (taskConfigsChanged(configState, connName, newTaskConfigs)) { removeConnectorTasks(connName); + List> rawTaskConfigs = reverseTransform(connName, configState, newTaskConfigs); configBackingStore.putTaskConfigs(connName, rawTaskConfigs); createConnectorTasks(connName); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java index e69de29bb2d1d..6ebac341032a3 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java @@ -0,0 +1,1318 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.connect.storage; + +import org.apache.kafka.clients.CommonClientConfigs; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.IsolationLevel; +import org.apache.kafka.common.config.ConfigException; +import org.apache.kafka.common.errors.ProducerFencedException; +import org.apache.kafka.common.errors.TopicAuthorizationException; +import org.apache.kafka.common.header.internals.RecordHeaders; +import org.apache.kafka.common.record.TimestampType; +import org.apache.kafka.common.utils.MockTime; +import org.apache.kafka.common.utils.Time; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaAndValue; +import org.apache.kafka.connect.data.Struct; +import org.apache.kafka.connect.errors.ConnectException; +import org.apache.kafka.connect.runtime.RestartRequest; +import org.apache.kafka.connect.runtime.SessionKey; +import org.apache.kafka.connect.runtime.TargetState; +import org.apache.kafka.connect.runtime.WorkerConfig; +import org.apache.kafka.connect.runtime.distributed.DistributedConfig; +import org.apache.kafka.connect.util.Callback; +import org.apache.kafka.connect.util.ConnectorTaskId; +import org.apache.kafka.connect.util.KafkaBasedLog; +import org.apache.kafka.connect.util.TopicAdmin; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.AdditionalMatchers; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.ISOLATION_LEVEL_CONFIG; +import static org.apache.kafka.clients.producer.ProducerConfig.CLIENT_ID_CONFIG; +import static org.apache.kafka.clients.producer.ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG; +import static org.apache.kafka.clients.producer.ProducerConfig.TRANSACTIONAL_ID_CONFIG; +import static org.apache.kafka.connect.runtime.distributed.DistributedConfig.EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG; +import static org.apache.kafka.connect.storage.KafkaConfigBackingStore.INCLUDE_TASKS_FIELD_NAME; +import static org.apache.kafka.connect.storage.KafkaConfigBackingStore.ONLY_FAILED_FIELD_NAME; +import static org.apache.kafka.connect.storage.KafkaConfigBackingStore.READ_WRITE_TOTAL_TIMEOUT_MS; +import static org.apache.kafka.connect.storage.KafkaConfigBackingStore.RESTART_KEY; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyCollection; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +@RunWith(MockitoJUnitRunner.StrictStubs.class) +public class KafkaConfigBackingStoreMockitoTest { + private static final String CLIENT_ID_BASE = "test-client-id-"; + private static final String TOPIC = "connect-configs"; + private static final short TOPIC_REPLICATION_FACTOR = 5; + private static final Map DEFAULT_CONFIG_STORAGE_PROPS = new HashMap<>(); + + static { + DEFAULT_CONFIG_STORAGE_PROPS.put(DistributedConfig.CONFIG_TOPIC_CONFIG, TOPIC); + DEFAULT_CONFIG_STORAGE_PROPS.put(DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG, "connect-offsets"); + DEFAULT_CONFIG_STORAGE_PROPS.put(DistributedConfig.CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG, Short.toString(TOPIC_REPLICATION_FACTOR)); + DEFAULT_CONFIG_STORAGE_PROPS.put(DistributedConfig.GROUP_ID_CONFIG, "connect"); + DEFAULT_CONFIG_STORAGE_PROPS.put(DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG, "status-topic"); + DEFAULT_CONFIG_STORAGE_PROPS.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "broker1:9092,broker2:9093"); + DEFAULT_CONFIG_STORAGE_PROPS.put(DistributedConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); + DEFAULT_CONFIG_STORAGE_PROPS.put(DistributedConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); + } + + private static final List CONNECTOR_IDS = Arrays.asList("connector1", "connector2"); + private static final List CONNECTOR_CONFIG_KEYS = Arrays.asList("connector-connector1", "connector-connector2"); + private static final List COMMIT_TASKS_CONFIG_KEYS = Arrays.asList("commit-connector1", "commit-connector2"); + + private static final List TARGET_STATE_KEYS = Arrays.asList("target-state-connector1", "target-state-connector2"); + private static final List CONNECTOR_TASK_COUNT_RECORD_KEYS = Arrays.asList("tasks-fencing-connector1", "tasks-fencing-connector2"); + private static final String CONNECTOR_1_NAME = "connector1"; + private static final String CONNECTOR_2_NAME = "connector2"; + private static final List RESTART_CONNECTOR_KEYS = Arrays.asList(RESTART_KEY(CONNECTOR_1_NAME), RESTART_KEY(CONNECTOR_2_NAME)); + + // Need a) connector with multiple tasks and b) multiple connectors + private static final List TASK_IDS = Arrays.asList( + new ConnectorTaskId("connector1", 0), + new ConnectorTaskId("connector1", 1), + new ConnectorTaskId("connector2", 0) + ); + private static final List TASK_CONFIG_KEYS = Arrays.asList("task-connector1-0", "task-connector1-1", "task-connector2-0"); + // Need some placeholders -- the contents don't matter here, just that they are restored properly + private static final List> SAMPLE_CONFIGS = Arrays.asList( + Collections.singletonMap("config-key-one", "config-value-one"), + Collections.singletonMap("config-key-two", "config-value-two"), + Collections.singletonMap("config-key-three", "config-value-three") + ); + private static final List TASK_CONFIG_STRUCTS = Arrays.asList( + new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(0)), + new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(1)) + ); + private static final Struct ONLY_FAILED_MISSING_STRUCT = new Struct(KafkaConfigBackingStore.RESTART_REQUEST_V0).put(INCLUDE_TASKS_FIELD_NAME, false); + private static final Struct INCLUDE_TASKS_MISSING_STRUCT = new Struct(KafkaConfigBackingStore.RESTART_REQUEST_V0).put(ONLY_FAILED_FIELD_NAME, true); + private static final List RESTART_REQUEST_STRUCTS = Arrays.asList( + new Struct(KafkaConfigBackingStore.RESTART_REQUEST_V0).put(ONLY_FAILED_FIELD_NAME, true).put(INCLUDE_TASKS_FIELD_NAME, false), + ONLY_FAILED_MISSING_STRUCT, + INCLUDE_TASKS_MISSING_STRUCT); + + private static final List CONNECTOR_CONFIG_STRUCTS = Arrays.asList( + new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(0)), + new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(1)), + new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(2)) + ); + + private static final Struct TARGET_STATE_PAUSED = new Struct(KafkaConfigBackingStore.TARGET_STATE_V1) + .put("state", "PAUSED") + .put("state.v2", "PAUSED"); + private static final Struct TARGET_STATE_PAUSED_LEGACY = new Struct(KafkaConfigBackingStore.TARGET_STATE_V0) + .put("state", "PAUSED"); + private static final Struct TARGET_STATE_STOPPED = new Struct(KafkaConfigBackingStore.TARGET_STATE_V1) + .put("state", "PAUSED") + .put("state.v2", "STOPPED"); + private static final List CONNECTOR_TASK_COUNT_RECORD_STRUCTS = Arrays.asList( + new Struct(KafkaConfigBackingStore.TASK_COUNT_RECORD_V0).put("task-count", 6), + new Struct(KafkaConfigBackingStore.TASK_COUNT_RECORD_V0).put("task-count", 9) + ); + + // The exact format doesn't matter here since both conversions are mocked + private static final List CONFIGS_SERIALIZED = Arrays.asList( + "config-bytes-1".getBytes(), "config-bytes-2".getBytes(), "config-bytes-3".getBytes(), + "config-bytes-4".getBytes(), "config-bytes-5".getBytes(), "config-bytes-6".getBytes(), + "config-bytes-7".getBytes(), "config-bytes-8".getBytes(), "config-bytes-9".getBytes() + ); + private static final Struct TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR + = new Struct(KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0).put("tasks", 2); + + private static final Struct TASKS_COMMIT_STRUCT_ZERO_TASK_CONNECTOR + = new Struct(KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0).put("tasks", 0); + + private static final List TARGET_STATES_SERIALIZED = Arrays.asList( + "started".getBytes(), "paused".getBytes(), "stopped".getBytes() + ); + @Mock + private Converter converter; + @Mock + private ConfigBackingStore.UpdateListener configUpdateListener; + private Map props = new HashMap<>(DEFAULT_CONFIG_STORAGE_PROPS); + private DistributedConfig config; + @Mock + KafkaBasedLog configLog; + @Mock + Producer fencableProducer; + @Mock + Future producerFuture; + private KafkaConfigBackingStore configStorage; + + private final ArgumentCaptor capturedTopic = ArgumentCaptor.forClass(String.class); + @SuppressWarnings("unchecked") + private final ArgumentCaptor> capturedConsumerProps = ArgumentCaptor.forClass(Map.class); + @SuppressWarnings("unchecked") + private final ArgumentCaptor> capturedProducerProps = ArgumentCaptor.forClass(Map.class); + @SuppressWarnings("unchecked") + private final ArgumentCaptor> capturedAdminSupplier = ArgumentCaptor.forClass(Supplier.class); + private final ArgumentCaptor capturedNewTopic = ArgumentCaptor.forClass(NewTopic.class); + @SuppressWarnings("unchecked") + private final ArgumentCaptor>> capturedConsumedCallback = ArgumentCaptor.forClass(Callback.class); + + private final MockTime time = new MockTime(); + private long logOffset = 0; + + private void createStore() { + config = Mockito.spy(new DistributedConfig(props)); + doReturn("test-cluster").when(config).kafkaClusterId(); + configStorage = Mockito.spy( + new KafkaConfigBackingStore( + converter, config, null, () -> null, CLIENT_ID_BASE, time) + ); + configStorage.setConfigLog(configLog); + configStorage.setUpdateListener(configUpdateListener); + } + + @Before + public void setUp() { + createStore(); + } + + @Test + public void testStartStop() { + props.put("config.storage.min.insync.replicas", "3"); + props.put("config.storage.max.message.bytes", "1001"); + createStore(); + + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + + verifyConfigure(); + assertEquals(TOPIC, capturedTopic.getValue()); + assertEquals("org.apache.kafka.common.serialization.StringSerializer", capturedProducerProps.getValue().get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)); + assertEquals("org.apache.kafka.common.serialization.ByteArraySerializer", capturedProducerProps.getValue().get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)); + assertEquals("org.apache.kafka.common.serialization.StringDeserializer", capturedConsumerProps.getValue().get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG)); + assertEquals("org.apache.kafka.common.serialization.ByteArrayDeserializer", capturedConsumerProps.getValue().get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG)); + + assertEquals(TOPIC, capturedNewTopic.getValue().name()); + assertEquals(1, capturedNewTopic.getValue().numPartitions()); + assertEquals(TOPIC_REPLICATION_FACTOR, capturedNewTopic.getValue().replicationFactor()); + assertEquals("3", capturedNewTopic.getValue().configs().get("min.insync.replicas")); + assertEquals("1001", capturedNewTopic.getValue().configs().get("max.message.bytes")); + + configStorage.start(); + configStorage.stop(); + + verify(configLog).start(); + verify(configLog).stop(); + } + + @Test + public void testSnapshotCannotMutateInternalState() { + props.put("config.storage.min.insync.replicas", "3"); + props.put("config.storage.max.message.bytes", "1001"); + createStore(); + + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + + configStorage.start(); + ClusterConfigState snapshot = configStorage.snapshot(); + assertNotSame(snapshot.connectorTaskCounts, configStorage.connectorTaskCounts); + assertNotSame(snapshot.connectorConfigs, configStorage.connectorConfigs); + assertNotSame(snapshot.connectorTargetStates, configStorage.connectorTargetStates); + assertNotSame(snapshot.taskConfigs, configStorage.taskConfigs); + assertNotSame(snapshot.connectorTaskCountRecords, configStorage.connectorTaskCountRecords); + assertNotSame(snapshot.connectorTaskConfigGenerations, configStorage.connectorTaskConfigGenerations); + assertNotSame(snapshot.connectorsPendingFencing, configStorage.connectorsPendingFencing); + assertNotSame(snapshot.inconsistentConnectors, configStorage.inconsistent); + } + + @Test + public void testPutConnectorConfig() throws Exception { + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + configStorage.start(); + + // Null before writing + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(-1, configState.offset()); + assertNull(configState.connectorConfig(CONNECTOR_IDS.get(0))); + assertNull(configState.connectorConfig(CONNECTOR_IDS.get(1))); + + String configKey = CONNECTOR_CONFIG_KEYS.get(1); + String targetStateKey = TARGET_STATE_KEYS.get(1); + + doAnswer(expectReadToEnd(Collections.singletonMap(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) + .doAnswer(expectReadToEnd(Collections.singletonMap(CONNECTOR_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1)))) + // Config deletion + .doAnswer(expectReadToEnd(new LinkedHashMap() {{ + put(configKey, null); + put(targetStateKey, null); + }}) + ).when(configLog).readToEnd(); + + // Writing should block until it is written and read back from Kafka + expectConvertWriteRead( + CONNECTOR_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(0), + "properties", SAMPLE_CONFIGS.get(0)); + + configStorage.putConnectorConfig(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), null); + configState = configStorage.snapshot(); + + assertEquals(1, configState.offset()); + assertEquals(SAMPLE_CONFIGS.get(0), configState.connectorConfig(CONNECTOR_IDS.get(0))); + assertNull(configState.connectorConfig(CONNECTOR_IDS.get(1))); + verify(configUpdateListener).onConnectorConfigUpdate(CONNECTOR_IDS.get(0)); + + // Second should also block and all configs should still be available + expectConvertWriteRead( + CONNECTOR_CONFIG_KEYS.get(1), KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(1), + "properties", SAMPLE_CONFIGS.get(1)); + + configStorage.putConnectorConfig(CONNECTOR_IDS.get(1), SAMPLE_CONFIGS.get(1), null); + configState = configStorage.snapshot(); + + assertEquals(2, configState.offset()); + assertEquals(SAMPLE_CONFIGS.get(0), configState.connectorConfig(CONNECTOR_IDS.get(0))); + assertEquals(SAMPLE_CONFIGS.get(1), configState.connectorConfig(CONNECTOR_IDS.get(1))); + verify(configUpdateListener).onConnectorConfigUpdate(CONNECTOR_IDS.get(1)); + + // Config deletion + when(producerFuture.get(anyLong(), any(TimeUnit.class))).thenReturn(null); + when(converter.toConnectData(TOPIC, null)).thenReturn(new SchemaAndValue(null, null)); + when(configLog.sendWithReceipt(AdditionalMatchers.or(Mockito.eq(configKey), Mockito.eq(targetStateKey)), + Mockito.isNull())).thenReturn(producerFuture); + + // Deletion should remove the second one we added + configStorage.removeConnectorConfig(CONNECTOR_IDS.get(1)); + configState = configStorage.snapshot(); + + assertEquals(4, configState.offset()); + assertEquals(SAMPLE_CONFIGS.get(0), configState.connectorConfig(CONNECTOR_IDS.get(0))); + assertNull(configState.connectorConfig(CONNECTOR_IDS.get(1))); + assertNull(configState.targetState(CONNECTOR_IDS.get(1))); + verify(configUpdateListener).onConnectorConfigRemove(CONNECTOR_IDS.get(1)); + + configStorage.stop(); + verify(configLog).stop(); + } + + @Test + public void testPutConnectorConfigWithTargetState() throws Exception { + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + configStorage.start(); + + // Null before writing + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(-1, configState.offset()); + assertNull(configState.connectorConfig(CONNECTOR_IDS.get(0))); + assertNull(configState.targetState(CONNECTOR_IDS.get(0))); + + doAnswer(expectReadToEnd(new LinkedHashMap() {{ + put(TARGET_STATE_KEYS.get(0), TARGET_STATES_SERIALIZED.get(2)); + put(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); + }}) + ).when(configLog).readToEnd(); + + // We expect to write the target state first, followed by the config write and then a read to end + expectConvertWriteRead( + TARGET_STATE_KEYS.get(0), KafkaConfigBackingStore.TARGET_STATE_V1, TARGET_STATES_SERIALIZED.get(2), + "state.v2", TargetState.STOPPED.name()); + + expectConvertWriteRead( + CONNECTOR_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(0), + "properties", SAMPLE_CONFIGS.get(0)); + + // Writing should block until it is written and read back from Kafka + configStorage.putConnectorConfig(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), TargetState.STOPPED); + configState = configStorage.snapshot(); + assertEquals(2, configState.offset()); + assertEquals(TargetState.STOPPED, configState.targetState(CONNECTOR_IDS.get(0))); + assertEquals(SAMPLE_CONFIGS.get(0), configState.connectorConfig(CONNECTOR_IDS.get(0))); + + // We don't expect the config update listener's onConnectorTargetStateChange hook to be invoked + verify(configUpdateListener, never()).onConnectorTargetStateChange(anyString()); + + verify(configUpdateListener).onConnectorConfigUpdate(CONNECTOR_IDS.get(0)); + + configStorage.stop(); + verify(configLog).stop(); + } + + @Test + public void testPutConnectorConfigProducerError() throws Exception { + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + configStorage.start(); + + when(converter.fromConnectData(TOPIC, KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0, CONNECTOR_CONFIG_STRUCTS.get(0))) + .thenReturn(CONFIGS_SERIALIZED.get(0)); + when(configLog.sendWithReceipt(anyString(), any(byte[].class))).thenReturn(producerFuture); + + // Verify initial state + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(-1, configState.offset()); + assertEquals(0, configState.connectors().size()); + + Exception thrownException = new ExecutionException(new TopicAuthorizationException(Collections.singleton("test"))); + when(producerFuture.get(anyLong(), any(TimeUnit.class))).thenThrow(thrownException); + + // verify that the producer exception from KafkaBasedLog::send is propagated + ConnectException e = assertThrows(ConnectException.class, () -> configStorage.putConnectorConfig(CONNECTOR_IDS.get(0), + SAMPLE_CONFIGS.get(0), null)); + assertTrue(e.getMessage().contains("Error writing connector configuration to Kafka")); + assertEquals(thrownException, e.getCause()); + + configStorage.stop(); + verify(configLog).stop(); + } + + @Test + public void testRemoveConnectorConfigSlowProducer() throws Exception { + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + configStorage.start(); + + @SuppressWarnings("unchecked") + Future connectorConfigProducerFuture = mock(Future.class); + + @SuppressWarnings("unchecked") + Future targetStateProducerFuture = mock(Future.class); + + when(configLog.sendWithReceipt(anyString(), isNull())) + // tombstone for the connector config + .thenReturn(connectorConfigProducerFuture) + // tombstone for the connector target state + .thenReturn(targetStateProducerFuture); + + when(connectorConfigProducerFuture.get(eq(READ_WRITE_TOTAL_TIMEOUT_MS), any(TimeUnit.class))) + .thenAnswer((Answer) invocation -> { + time.sleep(READ_WRITE_TOTAL_TIMEOUT_MS - 1000); + return null; + }); + + // the future get timeout is expected to be reduced according to how long the previous Future::get took + when(targetStateProducerFuture.get(eq(1000L), any(TimeUnit.class))) + .thenAnswer((Answer) invocation -> { + time.sleep(1000); + return null; + }); + + @SuppressWarnings("unchecked") + Future future = mock(Future.class); + when(configLog.readToEnd()).thenReturn(future); + + // the Future::get calls on the previous two producer futures exhausted the overall timeout; so expect the + // timeout on the log read future to be 0 + when(future.get(eq(0L), any(TimeUnit.class))).thenReturn(null); + + configStorage.removeConnectorConfig("test-connector"); + configStorage.stop(); + verify(configLog).stop(); + } + + @Test + @SuppressWarnings("unchecked") + public void testWritePrivileges() throws Exception { + // With exactly.once.source.support = preparing (or also, "enabled"), we need to use a transactional producer + // to write some types of messages to the config topic + props.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "preparing"); + createStore(); + + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + configStorage.start(); + + // Try and fail to write a task count record to the config topic without write privileges + when(converter.fromConnectData(TOPIC, KafkaConfigBackingStore.TASK_COUNT_RECORD_V0, CONNECTOR_TASK_COUNT_RECORD_STRUCTS.get(0))) + .thenReturn(CONFIGS_SERIALIZED.get(0)); + + // Should fail the first time since we haven't claimed write privileges + assertThrows(IllegalStateException.class, () -> configStorage.putTaskCountRecord(CONNECTOR_IDS.get(0), 6)); + + // Claim write privileges + doReturn(fencableProducer).when(configStorage).createFencableProducer(); + // And write the task count record successfully + when(fencableProducer.send(any(ProducerRecord.class))).thenReturn(null); + doAnswer(expectReadToEnd(Collections.singletonMap(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) + .doAnswer(expectReadToEnd(Collections.singletonMap(CONNECTOR_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(2)))) + .when(configLog).readToEnd(); + when(converter.toConnectData(TOPIC, CONFIGS_SERIALIZED.get(0))) + .thenReturn(new SchemaAndValue(null, structToMap(CONNECTOR_TASK_COUNT_RECORD_STRUCTS.get(0)))); + + // Should succeed now + configStorage.claimWritePrivileges(); + configStorage.putTaskCountRecord(CONNECTOR_IDS.get(0), 6); + + verify(fencableProducer).beginTransaction(); + verify(fencableProducer).commitTransaction(); + + // Try to write a connector config + when(converter.fromConnectData(TOPIC, KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0, CONNECTOR_CONFIG_STRUCTS.get(0))) + .thenReturn(CONFIGS_SERIALIZED.get(1)); + // Get fenced out + doThrow(new ProducerFencedException("Better luck next time")) + .doNothing() + .when(fencableProducer).commitTransaction(); + + // Should fail again when we get fenced out + assertThrows(PrivilegedWriteException.class, () -> configStorage.putConnectorConfig(CONNECTOR_IDS.get(1), SAMPLE_CONFIGS.get(0), null)); + + verify(fencableProducer, times(2)).beginTransaction(); + verify(fencableProducer).close(Duration.ZERO); + + // Should fail if we retry without reclaiming write privileges + assertThrows(IllegalStateException.class, () -> configStorage.putConnectorConfig(CONNECTOR_IDS.get(1), SAMPLE_CONFIGS.get(0), null)); + + // In the meantime, write a target state (which doesn't require write privileges) + when(converter.fromConnectData(TOPIC, KafkaConfigBackingStore.TARGET_STATE_V1, TARGET_STATE_PAUSED)) + .thenReturn(CONFIGS_SERIALIZED.get(1)); + when(configLog.sendWithReceipt("target-state-" + CONNECTOR_IDS.get(1), CONFIGS_SERIALIZED.get(1))) + .thenReturn(producerFuture); + when(producerFuture.get(anyLong(), any(TimeUnit.class))).thenReturn(null); + + // Should succeed even without write privileges (target states can be written by anyone) + configStorage.putTargetState(CONNECTOR_IDS.get(1), TargetState.PAUSED); + + // Reclaim write privileges and successfully write the config + when(converter.toConnectData(TOPIC, CONFIGS_SERIALIZED.get(2))) + .thenReturn(new SchemaAndValue(null, structToMap(CONNECTOR_CONFIG_STRUCTS.get(0)))); + + // Should succeed if we re-claim write privileges + configStorage.claimWritePrivileges(); + configStorage.putConnectorConfig(CONNECTOR_IDS.get(1), SAMPLE_CONFIGS.get(0), null); + + verify(fencableProducer, times(3)).beginTransaction(); + verify(fencableProducer, times(3)).commitTransaction(); + verify(configUpdateListener).onConnectorConfigUpdate(CONNECTOR_IDS.get(1)); + + configStorage.stop(); + verify(configLog).stop(); + verify(configStorage, times(2)).createFencableProducer(); + verify(fencableProducer, times(2)).close(Duration.ZERO); + } + + @Test + public void testRestoreTargetStateUnexpectedDeletion() { + List> existingRecords = Arrays.asList( + new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), + CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, TARGET_STATE_KEYS.get(0), + CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(4), new RecordHeaders(), Optional.empty())); + LinkedHashMap deserialized = new LinkedHashMap<>(); + deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(3), null); + deserialized.put(CONFIGS_SERIALIZED.get(4), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); + logOffset = 5; + + expectStart(existingRecords, deserialized); + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + configStorage.start(); + + // The target state deletion should reset the state to STARTED + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(5, configState.offset()); // Should always be next to be read, even if uncommitted + assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); + + configStorage.stop(); + verify(configLog).stop(); + } + + @Test + public void testRestoreTargetState() { + List> existingRecords = Arrays.asList( + new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), + CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, TARGET_STATE_KEYS.get(0), + CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(4), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 5, 0L, TimestampType.CREATE_TIME, 0, 0, TARGET_STATE_KEYS.get(1), + CONFIGS_SERIALIZED.get(5), new RecordHeaders(), Optional.empty())); + LinkedHashMap deserialized = new LinkedHashMap<>(); + deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); + // A worker running an older version wrote this target state; make sure we can handle it correctly + deserialized.put(CONFIGS_SERIALIZED.get(3), TARGET_STATE_PAUSED_LEGACY); + deserialized.put(CONFIGS_SERIALIZED.get(4), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); + deserialized.put(CONFIGS_SERIALIZED.get(5), TARGET_STATE_STOPPED); + logOffset = 6; + + expectStart(existingRecords, deserialized); + + // Shouldn't see any callbacks since this is during startup + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + configStorage.start(); + + // Should see a single connector with initial state paused + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(6, configState.offset()); // Should always be next to be read, even if uncommitted + assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + assertEquals(TargetState.PAUSED, configState.targetState(CONNECTOR_IDS.get(0))); + assertEquals(TargetState.STOPPED, configState.targetState(CONNECTOR_IDS.get(1))); + + configStorage.stop(); + verify(configLog).stop(); + } + + @Test + public void testRestore() { + // Restoring data should notify only of the latest values after loading is complete. This also validates + // that inconsistent state is ignored. + + // Overwrite each type at least once to ensure we see the latest data after loading + List> existingRecords = Arrays.asList( + new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_TASK_COUNT_RECORD_KEYS.get(0), + CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), + CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(4), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 5, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(5), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 6, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_TASK_COUNT_RECORD_KEYS.get(1), + CONFIGS_SERIALIZED.get(6), new RecordHeaders(), Optional.empty()), + // Connector after root update should make it through, task update shouldn't + new ConsumerRecord<>(TOPIC, 0, 7, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(7), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 8, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(8), new RecordHeaders(), Optional.empty())); + LinkedHashMap deserialized = new LinkedHashMap<>(); + deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_TASK_COUNT_RECORD_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(1), CONNECTOR_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(3), TASK_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(4), CONNECTOR_CONFIG_STRUCTS.get(1)); + deserialized.put(CONFIGS_SERIALIZED.get(5), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); + deserialized.put(CONFIGS_SERIALIZED.get(6), CONNECTOR_TASK_COUNT_RECORD_STRUCTS.get(1)); + deserialized.put(CONFIGS_SERIALIZED.get(7), CONNECTOR_CONFIG_STRUCTS.get(2)); + deserialized.put(CONFIGS_SERIALIZED.get(8), TASK_CONFIG_STRUCTS.get(1)); + logOffset = 9; + + expectStart(existingRecords, deserialized); + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + configStorage.start(); + + // Should see a single connector and its config should be the last one seen anywhere in the log + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(logOffset, configState.offset()); // Should always be next to be read, even if uncommitted + assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); + // CONNECTOR_CONFIG_STRUCTS[2] -> SAMPLE_CONFIGS[2] + assertEquals(SAMPLE_CONFIGS.get(2), configState.connectorConfig(CONNECTOR_IDS.get(0))); + // Should see 2 tasks for that connector. Only config updates before the root key update should be reflected + assertEquals(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(CONNECTOR_IDS.get(0))); + // Both TASK_CONFIG_STRUCTS[0] -> SAMPLE_CONFIGS[0] + assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0))); + assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(1))); + assertEquals(9, (int) configState.taskCountRecord(CONNECTOR_IDS.get(1))); + assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); + assertEquals(Collections.singleton("connector1"), configState.connectorsPendingFencing); + + // Shouldn't see any callbacks since this is during startup + configStorage.stop(); + verify(configLog).stop(); + } + + @Test + public void testRestoreConnectorDeletion() { + // Restoring data should notify only of the latest values after loading is complete. This also validates + // that inconsistent state is ignored. + + // Overwrite each type at least once to ensure we see the latest data after loading + List> existingRecords = Arrays.asList( + new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), + CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, TARGET_STATE_KEYS.get(0), + CONFIGS_SERIALIZED.get(4), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 5, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(5), new RecordHeaders(), Optional.empty())); + + LinkedHashMap deserialized = new LinkedHashMap<>(); + deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(3), null); + deserialized.put(CONFIGS_SERIALIZED.get(4), null); + deserialized.put(CONFIGS_SERIALIZED.get(5), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); + + logOffset = 6; + expectStart(existingRecords, deserialized); + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + configStorage.start(); + + // Should see a single connector and its config should be the last one seen anywhere in the log + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(6, configState.offset()); // Should always be next to be read, even if uncommitted + assertTrue(configState.connectors().isEmpty()); + + // Shouldn't see any callbacks since this is during startup + configStorage.stop(); + verify(configLog).stop(); + } + + @Test + public void testRestoreZeroTasks() { + // Restoring data should notify only of the latest values after loading is complete. This also validates + // that inconsistent state is ignored. + + // Overwrite each type at least once to ensure we see the latest data after loading + List> existingRecords = Arrays.asList( + new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), + CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(4), new RecordHeaders(), Optional.empty()), + // Connector after root update should make it through, task update shouldn't + new ConsumerRecord<>(TOPIC, 0, 5, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(5), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 6, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(6), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 7, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(7), new RecordHeaders(), Optional.empty())); + LinkedHashMap deserialized = new LinkedHashMap<>(); + deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(3), CONNECTOR_CONFIG_STRUCTS.get(1)); + deserialized.put(CONFIGS_SERIALIZED.get(4), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); + deserialized.put(CONFIGS_SERIALIZED.get(5), CONNECTOR_CONFIG_STRUCTS.get(2)); + deserialized.put(CONFIGS_SERIALIZED.get(6), TASK_CONFIG_STRUCTS.get(1)); + deserialized.put(CONFIGS_SERIALIZED.get(7), TASKS_COMMIT_STRUCT_ZERO_TASK_CONNECTOR); + logOffset = 8; + expectStart(existingRecords, deserialized); + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + configStorage.start(); + + // Should see a single connector and its config should be the last one seen anywhere in the log + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(8, configState.offset()); // Should always be next to be read, even if uncommitted + assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + // CONNECTOR_CONFIG_STRUCTS[2] -> SAMPLE_CONFIGS[2] + assertEquals(SAMPLE_CONFIGS.get(2), configState.connectorConfig(CONNECTOR_IDS.get(0))); + // Should see 0 tasks for that connector. + assertEquals(Collections.emptyList(), configState.tasks(CONNECTOR_IDS.get(0))); + // Both TASK_CONFIG_STRUCTS[0] -> SAMPLE_CONFIGS[0] + assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); + + // Shouldn't see any callbacks since this is during startup + configStorage.stop(); + verify(configLog).stop(); + } + + @Test + public void testRecordToRestartRequest() { + ConsumerRecord record = new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(0), + CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()); + Struct struct = RESTART_REQUEST_STRUCTS.get(0); + SchemaAndValue schemaAndValue = new SchemaAndValue(struct.schema(), structToMap(struct)); + RestartRequest restartRequest = configStorage.recordToRestartRequest(record, schemaAndValue); + assertEquals(CONNECTOR_1_NAME, restartRequest.connectorName()); + assertEquals(struct.getBoolean(INCLUDE_TASKS_FIELD_NAME), restartRequest.includeTasks()); + assertEquals(struct.getBoolean(ONLY_FAILED_FIELD_NAME), restartRequest.onlyFailed()); + } + + @Test + public void testRecordToRestartRequestOnlyFailedInconsistent() { + ConsumerRecord record = new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(0), + CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()); + Struct struct = ONLY_FAILED_MISSING_STRUCT; + SchemaAndValue schemaAndValue = new SchemaAndValue(struct.schema(), structToMap(struct)); + RestartRequest restartRequest = configStorage.recordToRestartRequest(record, schemaAndValue); + assertEquals(CONNECTOR_1_NAME, restartRequest.connectorName()); + assertEquals(struct.getBoolean(INCLUDE_TASKS_FIELD_NAME), restartRequest.includeTasks()); + assertFalse(restartRequest.onlyFailed()); + } + + @Test + public void testRecordToRestartRequestIncludeTasksInconsistent() { + ConsumerRecord record = new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(0), + CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()); + Struct struct = INCLUDE_TASKS_MISSING_STRUCT; + SchemaAndValue schemaAndValue = new SchemaAndValue(struct.schema(), structToMap(struct)); + RestartRequest restartRequest = configStorage.recordToRestartRequest(record, schemaAndValue); + assertEquals(CONNECTOR_1_NAME, restartRequest.connectorName()); + assertFalse(restartRequest.includeTasks()); + assertEquals(struct.getBoolean(ONLY_FAILED_FIELD_NAME), restartRequest.onlyFailed()); + } + + @Test + public void testFencableProducerPropertiesOverrideUserSuppliedValues() { + props.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "preparing"); + String groupId = "my-other-connect-cluster"; + props.put(GROUP_ID_CONFIG, groupId); + props.put(TRANSACTIONAL_ID_CONFIG, "my-custom-transactional-id"); + props.put(ENABLE_IDEMPOTENCE_CONFIG, "false"); + createStore(); + + Map fencableProducerProperties = configStorage.fencableProducerProps(config); + assertEquals("connect-cluster-" + groupId, fencableProducerProperties.get(TRANSACTIONAL_ID_CONFIG)); + assertEquals("true", fencableProducerProperties.get(ENABLE_IDEMPOTENCE_CONFIG)); + } + + @Test + public void testConsumerPropertiesDoNotOverrideUserSuppliedValuesWithoutExactlyOnceSourceEnabled() { + props.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "preparing"); + props.put(ISOLATION_LEVEL_CONFIG, IsolationLevel.READ_UNCOMMITTED.toString()); + createStore(); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + + assertEquals( + IsolationLevel.READ_UNCOMMITTED.toString(), + capturedConsumerProps.getValue().get(ISOLATION_LEVEL_CONFIG) + ); + } + + @Test + public void testClientIds() { + props = new HashMap<>(DEFAULT_CONFIG_STORAGE_PROPS); + props.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "enabled"); + createStore(); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + + Map fencableProducerProps = configStorage.fencableProducerProps(config); + + final String expectedClientId = CLIENT_ID_BASE + "configs"; + assertEquals(expectedClientId, capturedProducerProps.getValue().get(CLIENT_ID_CONFIG)); + assertEquals(expectedClientId, capturedConsumerProps.getValue().get(CLIENT_ID_CONFIG)); + assertEquals(expectedClientId + "-leader", fencableProducerProps.get(CLIENT_ID_CONFIG)); + } + + @Test + public void testExceptionOnStartWhenConfigTopicHasMultiplePartitions() { + when(configLog.partitionCount()).thenReturn(2); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + ConfigException e = assertThrows(ConfigException.class, () -> configStorage.start()); + assertTrue(e.getMessage().contains("required to have a single partition")); + } + + @Test + public void testFencableProducerPropertiesInsertedByDefault() { + props.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "preparing"); + String groupId = "my-connect-cluster"; + props.put(GROUP_ID_CONFIG, groupId); + props.remove(TRANSACTIONAL_ID_CONFIG); + props.remove(ENABLE_IDEMPOTENCE_CONFIG); + createStore(); + + Map fencableProducerProperties = configStorage.fencableProducerProps(config); + assertEquals("connect-cluster-" + groupId, fencableProducerProperties.get(TRANSACTIONAL_ID_CONFIG)); + assertEquals("true", fencableProducerProperties.get(ENABLE_IDEMPOTENCE_CONFIG)); + } + + @Test + public void testConsumerPropertiesInsertedByDefaultWithExactlyOnceSourceEnabled() { + props.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "enabled"); + props.remove(ISOLATION_LEVEL_CONFIG); + createStore(); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + + assertEquals( + IsolationLevel.READ_COMMITTED.toString(), + capturedConsumerProps.getValue().get(ISOLATION_LEVEL_CONFIG) + ); + } + + @Test + public void testConsumerPropertiesOverrideUserSuppliedValuesWithExactlyOnceSourceEnabled() { + props.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "enabled"); + props.put(ISOLATION_LEVEL_CONFIG, IsolationLevel.READ_UNCOMMITTED.toString()); + createStore(); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + + assertEquals( + IsolationLevel.READ_COMMITTED.toString(), + capturedConsumerProps.getValue().get(ISOLATION_LEVEL_CONFIG) + ); + } + + @Test + public void testConsumerPropertiesNotInsertedByDefaultWithoutExactlyOnceSourceEnabled() { + props.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "preparing"); + props.remove(ISOLATION_LEVEL_CONFIG); + createStore(); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + + assertNull(capturedConsumerProps.getValue().get(ISOLATION_LEVEL_CONFIG)); + } + + @Test + public void testBackgroundConnectorDeletion() throws Exception { + // verify that we handle connector deletions correctly when they come up through the log + List> existingRecords = Arrays.asList( + new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), + CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); + LinkedHashMap deserialized = new LinkedHashMap<>(); + deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(1)); + deserialized.put(CONFIGS_SERIALIZED.get(3), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); + logOffset = 5; + + expectStart(existingRecords, deserialized); + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + + configStorage.start(); + verify(configLog).start(); + + // Should see a single connector with initial state paused + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); + assertEquals(SAMPLE_CONFIGS.get(0), configState.connectorConfig(CONNECTOR_IDS.get(0))); + assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(new ConnectorTaskId(CONNECTOR_IDS.get(0), 0))); + assertEquals(SAMPLE_CONFIGS.get(1), configState.taskConfig(new ConnectorTaskId(CONNECTOR_IDS.get(0), 1))); + assertEquals(2, configState.taskCount(CONNECTOR_IDS.get(0))); + + LinkedHashMap serializedData = new LinkedHashMap<>(); + serializedData.put(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); + serializedData.put(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(1)); + doAnswer(expectReadToEnd(serializedData)).when(configLog).readToEnd(); + + Map deserializedData = new HashMap<>(); + deserializedData.put(CONNECTOR_CONFIG_KEYS.get(0), null); + deserializedData.put(TARGET_STATE_KEYS.get(0), null); + expectRead(serializedData, deserializedData); + + configStorage.refresh(0, TimeUnit.SECONDS); + verify(configUpdateListener).onConnectorConfigRemove(CONNECTOR_IDS.get(0)); + + configState = configStorage.snapshot(); + // Connector should now be removed from the snapshot + assertFalse(configState.contains(CONNECTOR_IDS.get(0))); + assertEquals(0, configState.taskCount(CONNECTOR_IDS.get(0))); + // Ensure that the deleted connector's deferred task updates have been cleaned up + // in order to prevent unbounded growth of the map + assertEquals(Collections.emptyMap(), configStorage.deferredTaskUpdates); + + configStorage.stop(); + verify(configLog).stop(); + } + + @Test + public void testPutTaskConfigsDoesNotResolveAllInconsistencies() throws Exception { + // Test a case where a failure and compaction has left us in an inconsistent state when reading the log. + // We start out by loading an initial configuration where we started to write a task update, and then + // compaction cleaned up the earlier record. + List> existingRecords = Arrays.asList( + new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), + // This is the record that has been compacted: + //new ConsumerRecord<>(TOPIC, 0, 1, TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(1)), + new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), + CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(4), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 5, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(5), new RecordHeaders(), Optional.empty())); + LinkedHashMap deserialized = new LinkedHashMap<>(); + deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(4), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); + deserialized.put(CONFIGS_SERIALIZED.get(5), TASK_CONFIG_STRUCTS.get(1)); + logOffset = 6; + expectStart(existingRecords, deserialized); + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + configStorage.start(); + + // After reading the log, it should have been in an inconsistent state + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(6, configState.offset()); // Should always be next to be read, not last committed + assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + // Inconsistent data should leave us with no tasks listed for the connector and an entry in the inconsistent list + assertEquals(Collections.emptyList(), configState.tasks(CONNECTOR_IDS.get(0))); + // Both TASK_CONFIG_STRUCTS[0] -> SAMPLE_CONFIGS[0] + assertNull(configState.taskConfig(TASK_IDS.get(0))); + assertNull(configState.taskConfig(TASK_IDS.get(1))); + assertEquals(Collections.singleton(CONNECTOR_IDS.get(0)), configState.inconsistentConnectors()); + + // Records to be read by consumer as it reads to the end of the log + LinkedHashMap serializedConfigs = new LinkedHashMap<>(); + serializedConfigs.put(TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); + serializedConfigs.put(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(2)); + // Successful attempt to write new task config + doAnswer(expectReadToEnd(new LinkedHashMap<>())) + .doAnswer(expectReadToEnd(new LinkedHashMap<>())) + .doAnswer(expectReadToEnd(serializedConfigs)) + .when(configLog).readToEnd(); + expectConvertWriteRead( + TASK_CONFIG_KEYS.get(0), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(0), + "properties", SAMPLE_CONFIGS.get(0)); + expectConvertWriteRead( + COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(2), + "tasks", 1); // Updated to just 1 task + + // Next, issue a write that has everything that is needed and it should be accepted. Note that in this case + // we are going to shrink the number of tasks to 1 + configStorage.putTaskConfigs("connector1", Collections.singletonList(SAMPLE_CONFIGS.get(0))); + + // Validate updated config + configState = configStorage.snapshot(); + // This is only two more ahead of the last one because multiple calls fail, and so their configs are not written + // to the topic. Only the last call with 1 task config + 1 commit actually gets written. + assertEquals(8, configState.offset()); + assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + assertEquals(Collections.singletonList(TASK_IDS.get(0)), configState.tasks(CONNECTOR_IDS.get(0))); + assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0))); + assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); + + // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks + verify(configUpdateListener).onTaskConfigUpdate(Collections.singletonList(TASK_IDS.get(0))); + + configStorage.stop(); + verify(configLog).stop(); + } + + @Test + public void testPutRestartRequestOnlyFailed() throws Exception { + RestartRequest restartRequest = new RestartRequest(CONNECTOR_IDS.get(0), true, false); + testPutRestartRequest(restartRequest); + } + + @Test + public void testPutRestartRequestOnlyFailedIncludingTasks() throws Exception { + RestartRequest restartRequest = new RestartRequest(CONNECTOR_IDS.get(0), true, true); + testPutRestartRequest(restartRequest); + } + + private void testPutRestartRequest(RestartRequest restartRequest) throws Exception { + expectStart(Collections.emptyList(), Collections.emptyMap()); + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + + configStorage.start(); + verify(configLog).start(); + + expectConvertWriteRead( + RESTART_CONNECTOR_KEYS.get(0), KafkaConfigBackingStore.RESTART_REQUEST_V0, CONFIGS_SERIALIZED.get(0), + ONLY_FAILED_FIELD_NAME, restartRequest.onlyFailed()); + + LinkedHashMap recordsToRead = new LinkedHashMap<>(); + recordsToRead.put(RESTART_CONNECTOR_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); + doAnswer(expectReadToEnd(recordsToRead)).when(configLog).readToEnd(); + + // Writing should block until it is written and read back from Kafka + configStorage.putRestartRequest(restartRequest); + + final ArgumentCaptor restartRequestCaptor = ArgumentCaptor.forClass(RestartRequest.class); + verify(configUpdateListener).onRestartRequest(restartRequestCaptor.capture()); + + assertEquals(restartRequest.connectorName(), restartRequestCaptor.getValue().connectorName()); + assertEquals(restartRequest.onlyFailed(), restartRequestCaptor.getValue().onlyFailed()); + assertEquals(restartRequest.includeTasks(), restartRequestCaptor.getValue().includeTasks()); + + configStorage.stop(); + verify(configLog).stop(); + } + + @Test + public void testRestoreRestartRequestInconsistentState() { + // Restoring data should notify only of the latest values after loading is complete. This also validates + // that inconsistent state doesn't prevent startup. + // Overwrite each type at least once to ensure we see the latest data after loading + List> existingRecords = Arrays.asList( + new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(0), + CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(1), + CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(1), + CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(1), + CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); + LinkedHashMap deserialized = new LinkedHashMap<>(); + deserialized.put(CONFIGS_SERIALIZED.get(0), RESTART_REQUEST_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(1), RESTART_REQUEST_STRUCTS.get(1)); + deserialized.put(CONFIGS_SERIALIZED.get(2), RESTART_REQUEST_STRUCTS.get(2)); + deserialized.put(CONFIGS_SERIALIZED.get(3), null); + logOffset = 4; + expectStart(existingRecords, deserialized); + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + + configStorage.start(); + verify(configLog).start(); + + // Shouldn't see any callbacks since this is during startup + verify(configUpdateListener, never()).onConnectorConfigRemove(anyString()); + verify(configUpdateListener, never()).onConnectorConfigUpdate(anyString()); + verify(configUpdateListener, never()).onTaskConfigUpdate(anyCollection()); + verify(configUpdateListener, never()).onConnectorTargetStateChange(anyString()); + verify(configUpdateListener, never()).onSessionKeyUpdate(any(SessionKey.class)); + verify(configUpdateListener, never()).onRestartRequest(any(RestartRequest.class)); + verify(configUpdateListener, never()).onLoggingLevelUpdate(anyString(), anyString()); + + configStorage.stop(); + verify(configLog).stop(); + } + + @Test + public void testPutLogLevel() throws Exception { + final String logger1 = "org.apache.zookeeper"; + final String logger2 = "org.apache.cassandra"; + final String logger3 = "org.apache.kafka.clients"; + final String logger4 = "org.apache.kafka.connect"; + final String level1 = "ERROR"; + final String level3 = "WARN"; + final String level4 = "DEBUG"; + + final Struct existingLogLevel = new Struct(KafkaConfigBackingStore.LOGGER_LEVEL_V0) + .put("level", level1); + + // Pre-populate the config topic with a couple of logger level records; these should be ignored (i.e., + // not reported to the update listener) + List> existingRecords = Arrays.asList( + new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, "logger-cluster-" + logger1, + CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty() + ), + new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, "logger-cluster-" + logger2, + CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty() + ) + ); + LinkedHashMap deserialized = new LinkedHashMap<>(); + deserialized.put(CONFIGS_SERIALIZED.get(0), existingLogLevel); + // Make sure we gracefully handle tombstones + deserialized.put(CONFIGS_SERIALIZED.get(1), null); + logOffset = 2; + + expectStart(existingRecords, deserialized); + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + + configStorage.start(); + verify(configLog).start(); + + expectConvertWriteRead( + "logger-cluster-" + logger3, KafkaConfigBackingStore.LOGGER_LEVEL_V0, CONFIGS_SERIALIZED.get(2), + "level", level3); + configStorage.putLoggerLevel(logger3, level3); + + expectConvertWriteRead( + "logger-cluster-" + logger4, KafkaConfigBackingStore.LOGGER_LEVEL_V0, CONFIGS_SERIALIZED.get(3), + "level", level4); + configStorage.putLoggerLevel(logger4, level4); + + LinkedHashMap newRecords = new LinkedHashMap<>(); + newRecords.put("logger-cluster-" + logger3, CONFIGS_SERIALIZED.get(2)); + newRecords.put("logger-cluster-" + logger4, CONFIGS_SERIALIZED.get(3)); + doAnswer(expectReadToEnd(newRecords)).when(configLog).readToEnd(); + + configStorage.refresh(0, TimeUnit.SECONDS); + verify(configUpdateListener).onLoggingLevelUpdate(logger3, level3); + verify(configUpdateListener).onLoggingLevelUpdate(logger4, level4); + + configStorage.stop(); + verify(configLog).stop(); + } + + private void verifyConfigure() { + verify(configStorage).createKafkaBasedLog(capturedTopic.capture(), capturedProducerProps.capture(), + capturedConsumerProps.capture(), capturedConsumedCallback.capture(), + capturedNewTopic.capture(), capturedAdminSupplier.capture(), + any(WorkerConfig.class), any(Time.class)); + } + + // If non-empty, deserializations should be a LinkedHashMap + private void expectStart(final List> preexistingRecords, + final Map deserializations) { + doAnswer(invocation -> { + for (ConsumerRecord rec : preexistingRecords) + capturedConsumedCallback.getValue().onCompletion(null, rec); + return null; + }).when(configLog).start(); + + for (Map.Entry deserializationEntry : deserializations.entrySet()) { + // Note null schema because default settings for internal serialization are schema-less + when(converter.toConnectData(TOPIC, deserializationEntry.getKey())) + .thenReturn(new SchemaAndValue(null, structToMap(deserializationEntry.getValue()))); + } + } + + // Expect a conversion & write to the underlying log, followed by a subsequent read when the data is consumed back + // from the log. Validate the data that is captured when the conversion is performed matches the specified data + // (by checking a single field's value) + private void expectConvertWriteRead(final String configKey, final Schema valueSchema, final byte[] serialized, + final String dataFieldName, final Object dataFieldValue) throws Exception { + final ArgumentCaptor capturedRecord = ArgumentCaptor.forClass(Struct.class); + when(converter.fromConnectData(eq(TOPIC), eq(valueSchema), capturedRecord.capture())).thenReturn(serialized); + when(configLog.sendWithReceipt(configKey, serialized)).thenReturn(producerFuture); + when(producerFuture.get(anyLong(), any(TimeUnit.class))).thenReturn(null); + when(converter.toConnectData(TOPIC, serialized)).thenAnswer(invocation -> { + assertEquals(dataFieldValue, capturedRecord.getValue().get(dataFieldName)); + // Note null schema because default settings for internal serialization are schema-less + return new SchemaAndValue(null, structToMap(capturedRecord.getValue())); + }); + } + + private void expectRead(LinkedHashMap serializedValues, + Map deserializedValues) { + for (Map.Entry deserializedValueEntry : deserializedValues.entrySet()) { + byte[] serializedValue = serializedValues.get(deserializedValueEntry.getKey()); + when(converter.toConnectData(TOPIC, serializedValue)) + .thenReturn(new SchemaAndValue(null, structToMap(deserializedValueEntry.getValue()))); + } + } + + // This map needs to maintain ordering + private Answer> expectReadToEnd(final Map serializedConfigs) { + return invocation -> { + for (Map.Entry entry : serializedConfigs.entrySet()) { + capturedConsumedCallback.getValue().onCompletion(null, + new ConsumerRecord<>(TOPIC, 0, logOffset++, 0L, TimestampType.CREATE_TIME, 0, 0, + entry.getKey(), entry.getValue(), new RecordHeaders(), Optional.empty())); + } + CompletableFuture f = new CompletableFuture<>(); + f.complete(null); + return f; + }; + } + + // Generates a Map representation of Struct. Only does shallow traversal, so nested structs are not converted + private Map structToMap(Struct struct) { + if (struct == null) + return null; + Map result = new HashMap<>(); + for (Field field : struct.schema().fields()) result.put(field.name(), struct.get(field)); + return result; + } +} diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java index 847cf33b281c5..ae5f82cd3eeb2 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java @@ -18,16 +18,8 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.admin.NewTopic; -import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.producer.Producer; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; -import org.apache.kafka.common.IsolationLevel; -import org.apache.kafka.common.config.ConfigException; -import org.apache.kafka.common.errors.ProducerFencedException; -import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.utils.MockTime; @@ -36,27 +28,26 @@ import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.SchemaAndValue; import org.apache.kafka.connect.data.Struct; -import org.apache.kafka.connect.errors.ConnectException; -import org.apache.kafka.connect.runtime.RestartRequest; -import org.apache.kafka.connect.runtime.SessionKey; import org.apache.kafka.connect.runtime.TargetState; import org.apache.kafka.connect.runtime.WorkerConfig; import org.apache.kafka.connect.runtime.distributed.DistributedConfig; import org.apache.kafka.connect.util.Callback; import org.apache.kafka.connect.util.ConnectorTaskId; import org.apache.kafka.connect.util.KafkaBasedLog; +import org.apache.kafka.connect.util.TestFuture; import org.apache.kafka.connect.util.TopicAdmin; +import org.easymock.Capture; +import org.easymock.EasyMock; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.AdditionalMatchers; -import org.mockito.ArgumentCaptor; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.junit.MockitoJUnitRunner; -import org.mockito.stubbing.Answer; - -import java.time.Duration; +import org.powermock.api.easymock.PowerMock; +import org.powermock.api.easymock.annotation.Mock; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; +import org.powermock.reflect.Whitebox; + import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -66,44 +57,21 @@ import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; -import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG; -import static org.apache.kafka.clients.consumer.ConsumerConfig.ISOLATION_LEVEL_CONFIG; -import static org.apache.kafka.clients.producer.ProducerConfig.CLIENT_ID_CONFIG; -import static org.apache.kafka.clients.producer.ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG; -import static org.apache.kafka.clients.producer.ProducerConfig.TRANSACTIONAL_ID_CONFIG; -import static org.apache.kafka.connect.runtime.distributed.DistributedConfig.EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG; import static org.apache.kafka.connect.storage.KafkaConfigBackingStore.INCLUDE_TASKS_FIELD_NAME; import static org.apache.kafka.connect.storage.KafkaConfigBackingStore.ONLY_FAILED_FIELD_NAME; -import static org.apache.kafka.connect.storage.KafkaConfigBackingStore.READ_WRITE_TOTAL_TIMEOUT_MS; import static org.apache.kafka.connect.storage.KafkaConfigBackingStore.RESTART_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotSame; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyCollection; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.isNull; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -@RunWith(MockitoJUnitRunner.StrictStubs.class) + +@RunWith(PowerMockRunner.class) +@PrepareForTest({KafkaConfigBackingStore.class, WorkerConfig.class}) +@PowerMockIgnore({"javax.management.*", "javax.crypto.*"}) public class KafkaConfigBackingStoreTest { private static final String CLIENT_ID_BASE = "test-client-id-"; private static final String TOPIC = "connect-configs"; @@ -124,9 +92,9 @@ public class KafkaConfigBackingStoreTest { private static final List CONNECTOR_IDS = Arrays.asList("connector1", "connector2"); private static final List CONNECTOR_CONFIG_KEYS = Arrays.asList("connector-connector1", "connector-connector2"); private static final List COMMIT_TASKS_CONFIG_KEYS = Arrays.asList("commit-connector1", "commit-connector2"); - - private static final List TARGET_STATE_KEYS = Arrays.asList("target-state-connector1", "target-state-connector2"); + private static final List TARGET_STATE_KEYS = Arrays.asList("target-state-connector1", "target-state-connector2"); private static final List CONNECTOR_TASK_COUNT_RECORD_KEYS = Arrays.asList("tasks-fencing-connector1", "tasks-fencing-connector2"); + private static final String CONNECTOR_1_NAME = "connector1"; private static final String CONNECTOR_2_NAME = "connector2"; private static final List RESTART_CONNECTOR_KEYS = Arrays.asList(RESTART_KEY(CONNECTOR_1_NAME), RESTART_KEY(CONNECTOR_2_NAME)); @@ -138,41 +106,39 @@ public class KafkaConfigBackingStoreTest { new ConnectorTaskId("connector2", 0) ); private static final List TASK_CONFIG_KEYS = Arrays.asList("task-connector1-0", "task-connector1-1", "task-connector2-0"); + // Need some placeholders -- the contents don't matter here, just that they are restored properly private static final List> SAMPLE_CONFIGS = Arrays.asList( Collections.singletonMap("config-key-one", "config-value-one"), Collections.singletonMap("config-key-two", "config-value-two"), Collections.singletonMap("config-key-three", "config-value-three") ); - private static final List TASK_CONFIG_STRUCTS = Arrays.asList( - new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(0)), - new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(1)) - ); - private static final Struct ONLY_FAILED_MISSING_STRUCT = new Struct(KafkaConfigBackingStore.RESTART_REQUEST_V0).put(INCLUDE_TASKS_FIELD_NAME, false); - private static final Struct INCLUDE_TASKS_MISSING_STRUCT = new Struct(KafkaConfigBackingStore.RESTART_REQUEST_V0).put(ONLY_FAILED_FIELD_NAME, true); - private static final List RESTART_REQUEST_STRUCTS = Arrays.asList( - new Struct(KafkaConfigBackingStore.RESTART_REQUEST_V0).put(ONLY_FAILED_FIELD_NAME, true).put(INCLUDE_TASKS_FIELD_NAME, false), - ONLY_FAILED_MISSING_STRUCT, - INCLUDE_TASKS_MISSING_STRUCT); - private static final List CONNECTOR_CONFIG_STRUCTS = Arrays.asList( new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(0)), new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(1)), new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(2)) ); + private static final List TASK_CONFIG_STRUCTS = Arrays.asList( + new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(0)), + new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(1)) + ); private static final Struct TARGET_STATE_STARTED = new Struct(KafkaConfigBackingStore.TARGET_STATE_V0).put("state", "STARTED"); private static final Struct TARGET_STATE_PAUSED = new Struct(KafkaConfigBackingStore.TARGET_STATE_V1) .put("state", "PAUSED") .put("state.v2", "PAUSED"); - private static final Struct TARGET_STATE_PAUSED_LEGACY = new Struct(KafkaConfigBackingStore.TARGET_STATE_V0) - .put("state", "PAUSED"); private static final Struct TARGET_STATE_STOPPED = new Struct(KafkaConfigBackingStore.TARGET_STATE_V1) .put("state", "PAUSED") .put("state.v2", "STOPPED"); - private static final List CONNECTOR_TASK_COUNT_RECORD_STRUCTS = Arrays.asList( - new Struct(KafkaConfigBackingStore.TASK_COUNT_RECORD_V0).put("task-count", 6), - new Struct(KafkaConfigBackingStore.TASK_COUNT_RECORD_V0).put("task-count", 9) - ); + + private static final Struct TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR + = new Struct(KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0).put("tasks", 2); + + private static final Struct ONLY_FAILED_MISSING_STRUCT = new Struct(KafkaConfigBackingStore.RESTART_REQUEST_V0).put(INCLUDE_TASKS_FIELD_NAME, false); + private static final Struct INCLUDE_TASKS_MISSING_STRUCT = new Struct(KafkaConfigBackingStore.RESTART_REQUEST_V0).put(ONLY_FAILED_FIELD_NAME, true); + private static final List RESTART_REQUEST_STRUCTS = Arrays.asList( + new Struct(KafkaConfigBackingStore.RESTART_REQUEST_V0).put(ONLY_FAILED_FIELD_NAME, true).put(INCLUDE_TASKS_FIELD_NAME, false), + ONLY_FAILED_MISSING_STRUCT, + INCLUDE_TASKS_MISSING_STRUCT); // The exact format doesn't matter here since both conversions are mocked private static final List CONFIGS_SERIALIZED = Arrays.asList( @@ -180,52 +146,49 @@ public class KafkaConfigBackingStoreTest { "config-bytes-4".getBytes(), "config-bytes-5".getBytes(), "config-bytes-6".getBytes(), "config-bytes-7".getBytes(), "config-bytes-8".getBytes(), "config-bytes-9".getBytes() ); - private static final Struct TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR - = new Struct(KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0).put("tasks", 2); - - private static final Struct TASKS_COMMIT_STRUCT_ZERO_TASK_CONNECTOR - = new Struct(KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0).put("tasks", 0); - private static final List TARGET_STATES_SERIALIZED = Arrays.asList( - "started".getBytes(), "paused".getBytes(), "stopped".getBytes() - ); @Mock private Converter converter; @Mock private ConfigBackingStore.UpdateListener configUpdateListener; - private Map props = new HashMap<>(DEFAULT_CONFIG_STORAGE_PROPS); + private final Map props = new HashMap<>(DEFAULT_CONFIG_STORAGE_PROPS); private DistributedConfig config; @Mock - KafkaBasedLog configLog; - @Mock - Producer fencableProducer; + KafkaBasedLog storeLog; @Mock Future producerFuture; private KafkaConfigBackingStore configStorage; - private final ArgumentCaptor capturedTopic = ArgumentCaptor.forClass(String.class); - @SuppressWarnings("unchecked") - private final ArgumentCaptor> capturedConsumerProps = ArgumentCaptor.forClass(Map.class); - @SuppressWarnings("unchecked") - private final ArgumentCaptor> capturedProducerProps = ArgumentCaptor.forClass(Map.class); - @SuppressWarnings("unchecked") - private final ArgumentCaptor> capturedAdminSupplier = ArgumentCaptor.forClass(Supplier.class); - private final ArgumentCaptor capturedNewTopic = ArgumentCaptor.forClass(NewTopic.class); - @SuppressWarnings("unchecked") - private final ArgumentCaptor>> capturedConsumedCallback = ArgumentCaptor.forClass(Callback.class); - + private final Capture capturedTopic = EasyMock.newCapture(); + private final Capture> capturedProducerProps = EasyMock.newCapture(); + private final Capture> capturedConsumerProps = EasyMock.newCapture(); + private final Capture> capturedAdminSupplier = EasyMock.newCapture(); + private final Capture capturedNewTopic = EasyMock.newCapture(); + private final Capture>> capturedConsumedCallback = EasyMock.newCapture(); private final MockTime time = new MockTime(); + private long logOffset = 0; private void createStore() { - config = Mockito.spy(new DistributedConfig(props)); - doReturn("test-cluster").when(config).kafkaClusterId(); - configStorage = Mockito.spy( - new KafkaConfigBackingStore( - converter, config, null, () -> null, CLIENT_ID_BASE, time) - ); - configStorage.setConfigLog(configLog); + config = PowerMock.createPartialMock( + DistributedConfig.class, + new String[]{"kafkaClusterId"}, + props); + EasyMock.expect(config.kafkaClusterId()).andReturn("test-cluster").anyTimes(); + // The kafkaClusterId is used in the constructor for KafkaConfigBackingStore + // So temporarily enter replay mode in order to mock that call + EasyMock.replay(config); + Supplier topicAdminSupplier = () -> null; + configStorage = PowerMock.createPartialMock( + KafkaConfigBackingStore.class, + new String[]{"createKafkaBasedLog", "createFencableProducer"}, + converter, config, null, topicAdminSupplier, CLIENT_ID_BASE, time); + Whitebox.setInternalState(configStorage, "configLog", storeLog); configStorage.setUpdateListener(configUpdateListener); + // The mock must be reset and re-mocked for the remainder of the test. + // TODO: Once this migrates to Mockito, just use a spy() + EasyMock.reset(config); + EasyMock.expect(config.kafkaClusterId()).andReturn("test-cluster").anyTimes(); } @Before @@ -234,337 +197,296 @@ public void setUp() { } @Test - public void testStartStop() { - props.put("config.storage.min.insync.replicas", "3"); - props.put("config.storage.max.message.bytes", "1001"); - createStore(); - - when(configLog.partitionCount()).thenReturn(1); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - - verifyConfigure(); - assertEquals(TOPIC, capturedTopic.getValue()); - assertEquals("org.apache.kafka.common.serialization.StringSerializer", capturedProducerProps.getValue().get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)); - assertEquals("org.apache.kafka.common.serialization.ByteArraySerializer", capturedProducerProps.getValue().get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)); - assertEquals("org.apache.kafka.common.serialization.StringDeserializer", capturedConsumerProps.getValue().get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG)); - assertEquals("org.apache.kafka.common.serialization.ByteArrayDeserializer", capturedConsumerProps.getValue().get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG)); + public void testTaskCountRecordsAndGenerations() throws Exception { + expectConfigure(); + expectStart(Collections.emptyList(), Collections.emptyMap()); - assertEquals(TOPIC, capturedNewTopic.getValue().name()); - assertEquals(1, capturedNewTopic.getValue().numPartitions()); - assertEquals(TOPIC_REPLICATION_FACTOR, capturedNewTopic.getValue().replicationFactor()); - assertEquals("3", capturedNewTopic.getValue().configs().get("min.insync.replicas")); - assertEquals("1001", capturedNewTopic.getValue().configs().get("max.message.bytes")); + // Task configs should read to end, write to the log, read to end, write root, then read to end again + expectReadToEnd(new LinkedHashMap<>()); + expectConvertWriteRead( + TASK_CONFIG_KEYS.get(0), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(0), + "properties", SAMPLE_CONFIGS.get(0)); + expectConvertWriteRead( + TASK_CONFIG_KEYS.get(1), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(1), + "properties", SAMPLE_CONFIGS.get(1)); + expectReadToEnd(new LinkedHashMap<>()); + expectConvertWriteRead( + COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(2), + "tasks", 2); // Starts with 0 tasks, after update has 2 + // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks + configUpdateListener.onTaskConfigUpdate(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1))); + EasyMock.expectLastCall(); - configStorage.start(); - configStorage.stop(); + // Records to be read by consumer as it reads to the end of the log + LinkedHashMap serializedConfigs = new LinkedHashMap<>(); + serializedConfigs.put(TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); + serializedConfigs.put(TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1)); + serializedConfigs.put(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(2)); + expectReadToEnd(serializedConfigs); - verify(configLog).start(); - verify(configLog).stop(); - } + // Task count records are read back after writing as well + expectConvertWriteRead( + CONNECTOR_TASK_COUNT_RECORD_KEYS.get(0), KafkaConfigBackingStore.TASK_COUNT_RECORD_V0, CONFIGS_SERIALIZED.get(3), + "task-count", 4); + serializedConfigs = new LinkedHashMap<>(); + serializedConfigs.put(CONNECTOR_TASK_COUNT_RECORD_KEYS.get(0), CONFIGS_SERIALIZED.get(3)); + expectReadToEnd(serializedConfigs); - @Test - public void testSnapshotCannotMutateInternalState() { - props.put("config.storage.min.insync.replicas", "3"); - props.put("config.storage.max.message.bytes", "1001"); - createStore(); + expectPartitionCount(1); + expectStop(); - when(configLog.partitionCount()).thenReturn(1); + PowerMock.replayAll(); configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); - ClusterConfigState snapshot = configStorage.snapshot(); - assertNotSame(snapshot.connectorTaskCounts, configStorage.connectorTaskCounts); - assertNotSame(snapshot.connectorConfigs, configStorage.connectorConfigs); - assertNotSame(snapshot.connectorTargetStates, configStorage.connectorTargetStates); - assertNotSame(snapshot.taskConfigs, configStorage.taskConfigs); - assertNotSame(snapshot.connectorTaskCountRecords, configStorage.connectorTaskCountRecords); - assertNotSame(snapshot.connectorTaskConfigGenerations, configStorage.connectorTaskConfigGenerations); - assertNotSame(snapshot.connectorsPendingFencing, configStorage.connectorsPendingFencing); - assertNotSame(snapshot.inconsistentConnectors, configStorage.inconsistent); - } - @Test - public void testPutConnectorConfig() throws Exception { - when(configLog.partitionCount()).thenReturn(1); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); + // Bootstrap as if we had already added the connector, but no tasks had been added yet + whiteboxAddConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); - // Null before writing + // Before anything is written + String connectorName = CONNECTOR_IDS.get(0); ClusterConfigState configState = configStorage.snapshot(); - assertEquals(-1, configState.offset()); - assertNull(configState.connectorConfig(CONNECTOR_IDS.get(0))); - assertNull(configState.connectorConfig(CONNECTOR_IDS.get(1))); - - String configKey = CONNECTOR_CONFIG_KEYS.get(1); - String targetStateKey = TARGET_STATE_KEYS.get(1); - - doAnswer(expectReadToEnd(Collections.singletonMap(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) - .doAnswer(expectReadToEnd(Collections.singletonMap(CONNECTOR_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1)))) - // Config deletion - .doAnswer(expectReadToEnd(new LinkedHashMap() {{ - put(configKey, null); - put(targetStateKey, null); - }}) - ).when(configLog).readToEnd(); - - // Writing should block until it is written and read back from Kafka - expectConvertWriteRead( - CONNECTOR_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(0), - "properties", SAMPLE_CONFIGS.get(0)); - - configStorage.putConnectorConfig(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), null); - configState = configStorage.snapshot(); - - assertEquals(1, configState.offset()); - assertEquals(SAMPLE_CONFIGS.get(0), configState.connectorConfig(CONNECTOR_IDS.get(0))); - assertNull(configState.connectorConfig(CONNECTOR_IDS.get(1))); - verify(configUpdateListener).onConnectorConfigUpdate(CONNECTOR_IDS.get(0)); + assertFalse(configState.pendingFencing(connectorName)); + assertNull(configState.taskCountRecord(connectorName)); + assertNull(configState.taskConfigGeneration(connectorName)); - // Second should also block and all configs should still be available - expectConvertWriteRead( - CONNECTOR_CONFIG_KEYS.get(1), KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(1), - "properties", SAMPLE_CONFIGS.get(1)); + // Writing task configs should block until all the writes have been performed and the root record update + // has completed + List> taskConfigs = Arrays.asList(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1)); + configStorage.putTaskConfigs("connector1", taskConfigs); - configStorage.putConnectorConfig(CONNECTOR_IDS.get(1), SAMPLE_CONFIGS.get(1), null); configState = configStorage.snapshot(); + assertEquals(3, configState.offset()); + assertTrue(configState.pendingFencing(connectorName)); + assertNull(configState.taskCountRecord(connectorName)); + assertEquals(0, (long) configState.taskConfigGeneration(connectorName)); - assertEquals(2, configState.offset()); - assertEquals(SAMPLE_CONFIGS.get(0), configState.connectorConfig(CONNECTOR_IDS.get(0))); - assertEquals(SAMPLE_CONFIGS.get(1), configState.connectorConfig(CONNECTOR_IDS.get(1))); - verify(configUpdateListener).onConnectorConfigUpdate(CONNECTOR_IDS.get(1)); - - // Config deletion - when(producerFuture.get(anyLong(), any(TimeUnit.class))).thenReturn(null); - when(converter.toConnectData(TOPIC, null)).thenReturn(new SchemaAndValue(null, null)); - when(configLog.sendWithReceipt(AdditionalMatchers.or(Mockito.eq(configKey), Mockito.eq(targetStateKey)), - Mockito.isNull())).thenReturn(producerFuture); + configStorage.putTaskCountRecord(connectorName, 4); - // Deletion should remove the second one we added - configStorage.removeConnectorConfig(CONNECTOR_IDS.get(1)); configState = configStorage.snapshot(); - assertEquals(4, configState.offset()); - assertEquals(SAMPLE_CONFIGS.get(0), configState.connectorConfig(CONNECTOR_IDS.get(0))); - assertNull(configState.connectorConfig(CONNECTOR_IDS.get(1))); - assertNull(configState.targetState(CONNECTOR_IDS.get(1))); - verify(configUpdateListener).onConnectorConfigRemove(CONNECTOR_IDS.get(1)); + assertFalse(configState.pendingFencing(connectorName)); + assertEquals(4, (long) configState.taskCountRecord(connectorName)); + assertEquals(0, (long) configState.taskConfigGeneration(connectorName)); configStorage.stop(); - verify(configLog).stop(); + + PowerMock.verifyAll(); } @Test - public void testPutConnectorConfigWithTargetState() throws Exception { - when(configLog.partitionCount()).thenReturn(1); + public void testPutTaskConfigs() throws Exception { + expectConfigure(); + expectStart(Collections.emptyList(), Collections.emptyMap()); + + // Task configs should read to end, write to the log, read to end, write root, then read to end again + expectReadToEnd(new LinkedHashMap<>()); + expectConvertWriteRead( + TASK_CONFIG_KEYS.get(0), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(0), + "properties", SAMPLE_CONFIGS.get(0)); + expectConvertWriteRead( + TASK_CONFIG_KEYS.get(1), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(1), + "properties", SAMPLE_CONFIGS.get(1)); + expectReadToEnd(new LinkedHashMap<>()); + expectConvertWriteRead( + COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(2), + "tasks", 2); // Starts with 0 tasks, after update has 2 + // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks + configUpdateListener.onTaskConfigUpdate(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1))); + EasyMock.expectLastCall(); + + // Records to be read by consumer as it reads to the end of the log + LinkedHashMap serializedConfigs = new LinkedHashMap<>(); + serializedConfigs.put(TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); + serializedConfigs.put(TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1)); + serializedConfigs.put(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(2)); + expectReadToEnd(serializedConfigs); + + expectPartitionCount(1); + expectStop(); + + PowerMock.replayAll(); configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); configStorage.start(); + // Bootstrap as if we had already added the connector, but no tasks had been added yet + whiteboxAddConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); + // Null before writing ClusterConfigState configState = configStorage.snapshot(); assertEquals(-1, configState.offset()); - assertNull(configState.connectorConfig(CONNECTOR_IDS.get(0))); - assertNull(configState.targetState(CONNECTOR_IDS.get(0))); + assertNull(configState.taskConfig(TASK_IDS.get(0))); + assertNull(configState.taskConfig(TASK_IDS.get(1))); + + // Writing task configs should block until all the writes have been performed and the root record update + // has completed + List> taskConfigs = Arrays.asList(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1)); + configStorage.putTaskConfigs("connector1", taskConfigs); + + // Validate root config by listing all connectors and tasks + configState = configStorage.snapshot(); + assertEquals(3, configState.offset()); + String connectorName = CONNECTOR_IDS.get(0); + assertEquals(Collections.singletonList(connectorName), new ArrayList<>(configState.connectors())); + assertEquals(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(connectorName)); + assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0))); + assertEquals(SAMPLE_CONFIGS.get(1), configState.taskConfig(TASK_IDS.get(1))); + assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); + + configStorage.stop(); - doAnswer(expectReadToEnd(new LinkedHashMap() {{ - put(TARGET_STATE_KEYS.get(0), TARGET_STATES_SERIALIZED.get(2)); - put(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); - }}) - ).when(configLog).readToEnd(); + PowerMock.verifyAll(); + } - // We expect to write the target state first, followed by the config write and then a read to end - // We expect to write the target state first, followed by the config write and then a read to end - expectConvertWriteRead( - TARGET_STATE_KEYS.get(0), KafkaConfigBackingStore.TARGET_STATE_V1, TARGET_STATES_SERIALIZED.get(2), - "state.v2", TargetState.STOPPED.name()); + @Test + public void testPutTaskConfigsStartsOnlyReconfiguredTasks() throws Exception { + expectConfigure(); + expectStart(Collections.emptyList(), Collections.emptyMap()); + // Task configs should read to end, write to the log, read to end, write root, then read to end again + expectReadToEnd(new LinkedHashMap<>()); expectConvertWriteRead( - CONNECTOR_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(0), + TASK_CONFIG_KEYS.get(0), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(0), "properties", SAMPLE_CONFIGS.get(0)); + expectConvertWriteRead( + TASK_CONFIG_KEYS.get(1), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(1), + "properties", SAMPLE_CONFIGS.get(1)); + expectReadToEnd(new LinkedHashMap<>()); + expectConvertWriteRead( + COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(2), + "tasks", 2); // Starts with 0 tasks, after update has 2 + // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks + configUpdateListener.onTaskConfigUpdate(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1))); + EasyMock.expectLastCall(); - // Writing should block until it is written and read back from Kafka - configStorage.putConnectorConfig(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), TargetState.STOPPED); - configState = configStorage.snapshot(); - assertEquals(2, configState.offset()); - assertEquals(TargetState.STOPPED, configState.targetState(CONNECTOR_IDS.get(0))); - assertEquals(SAMPLE_CONFIGS.get(0), configState.connectorConfig(CONNECTOR_IDS.get(0))); + // Records to be read by consumer as it reads to the end of the log + LinkedHashMap serializedConfigs = new LinkedHashMap<>(); + serializedConfigs.put(TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); + serializedConfigs.put(TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1)); + serializedConfigs.put(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(2)); + expectReadToEnd(serializedConfigs); - // We don't expect the config update listener's onConnectorTargetStateChange hook to be invoked - verify(configUpdateListener, never()).onConnectorTargetStateChange(anyString()); + // Task configs should read to end, write to the log, read to end, write root, then read to end again + expectReadToEnd(new LinkedHashMap<>()); + expectConvertWriteRead( + TASK_CONFIG_KEYS.get(2), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(3), + "properties", SAMPLE_CONFIGS.get(2)); + expectReadToEnd(new LinkedHashMap<>()); + expectConvertWriteRead( + COMMIT_TASKS_CONFIG_KEYS.get(1), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(4), + "tasks", 1); // Starts with 2 tasks, after update has 3 - verify(configUpdateListener).onConnectorConfigUpdate(CONNECTOR_IDS.get(0)); + // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks + configUpdateListener.onTaskConfigUpdate(Collections.singletonList(TASK_IDS.get(2))); + EasyMock.expectLastCall(); - configStorage.stop(); - verify(configLog).stop(); - } + // Records to be read by consumer as it reads to the end of the log + serializedConfigs = new LinkedHashMap<>(); + serializedConfigs.put(TASK_CONFIG_KEYS.get(2), CONFIGS_SERIALIZED.get(3)); + serializedConfigs.put(COMMIT_TASKS_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(4)); + expectReadToEnd(serializedConfigs); - @Test - public void testPutConnectorConfigProducerError() throws Exception { - when(configLog.partitionCount()).thenReturn(1); + expectPartitionCount(1); + expectStop(); + + PowerMock.replayAll(); configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); configStorage.start(); - when(converter.fromConnectData(TOPIC, KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0, CONNECTOR_CONFIG_STRUCTS.get(0))) - .thenReturn(CONFIGS_SERIALIZED.get(0)); - when(configLog.sendWithReceipt(anyString(), any(byte[].class))).thenReturn(producerFuture); + // Bootstrap as if we had already added the connector, but no tasks had been added yet + whiteboxAddConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); + whiteboxAddConnector(CONNECTOR_IDS.get(1), SAMPLE_CONFIGS.get(1), Collections.emptyList()); - // Verify initial state + // Null before writing ClusterConfigState configState = configStorage.snapshot(); assertEquals(-1, configState.offset()); - assertEquals(0, configState.connectors().size()); + assertNull(configState.taskConfig(TASK_IDS.get(0))); + assertNull(configState.taskConfig(TASK_IDS.get(1))); - Exception thrownException = new ExecutionException(new TopicAuthorizationException(Collections.singleton("test"))); - when(producerFuture.get(anyLong(), any(TimeUnit.class))).thenThrow(thrownException); + // Writing task configs should block until all the writes have been performed and the root record update + // has completed + List> taskConfigs = Arrays.asList(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1)); + configStorage.putTaskConfigs("connector1", taskConfigs); + taskConfigs = Collections.singletonList(SAMPLE_CONFIGS.get(2)); + configStorage.putTaskConfigs("connector2", taskConfigs); - // verify that the producer exception from KafkaBasedLog::send is propagated - ConnectException e = assertThrows(ConnectException.class, () -> configStorage.putConnectorConfig(CONNECTOR_IDS.get(0), - SAMPLE_CONFIGS.get(0), null)); - assertTrue(e.getMessage().contains("Error writing connector configuration to Kafka")); - assertEquals(thrownException, e.getCause()); + // Validate root config by listing all connectors and tasks + configState = configStorage.snapshot(); + assertEquals(5, configState.offset()); + String connectorName1 = CONNECTOR_IDS.get(0); + String connectorName2 = CONNECTOR_IDS.get(1); + assertEquals(Arrays.asList(connectorName1, connectorName2), new ArrayList<>(configState.connectors())); + assertEquals(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(connectorName1)); + assertEquals(Collections.singletonList(TASK_IDS.get(2)), configState.tasks(connectorName2)); + assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0))); + assertEquals(SAMPLE_CONFIGS.get(1), configState.taskConfig(TASK_IDS.get(1))); + assertEquals(SAMPLE_CONFIGS.get(2), configState.taskConfig(TASK_IDS.get(2))); + assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); configStorage.stop(); - verify(configLog).stop(); + + PowerMock.verifyAll(); } @Test - public void testRemoveConnectorConfigSlowProducer() throws Exception { - when(configLog.partitionCount()).thenReturn(1); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); - - @SuppressWarnings("unchecked") - Future connectorConfigProducerFuture = mock(Future.class); - - @SuppressWarnings("unchecked") - Future targetStateProducerFuture = mock(Future.class); - - when(configLog.sendWithReceipt(anyString(), isNull())) - // tombstone for the connector config - .thenReturn(connectorConfigProducerFuture) - // tombstone for the connector target state - .thenReturn(targetStateProducerFuture); + public void testPutTaskConfigsZeroTasks() throws Exception { + expectConfigure(); + expectStart(Collections.emptyList(), Collections.emptyMap()); - when(connectorConfigProducerFuture.get(eq(READ_WRITE_TOTAL_TIMEOUT_MS), any(TimeUnit.class))) - .thenAnswer((Answer) invocation -> { - time.sleep(READ_WRITE_TOTAL_TIMEOUT_MS - 1000); - return null; - }); + // Task configs should read to end, write to the log, read to end, write root. + expectReadToEnd(new LinkedHashMap<>()); + expectConvertWriteRead( + COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(0), + "tasks", 0); // We have 0 tasks + // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks + configUpdateListener.onTaskConfigUpdate(Collections.emptyList()); + EasyMock.expectLastCall(); - // the future get timeout is expected to be reduced according to how long the previous Future::get took - when(targetStateProducerFuture.get(eq(1000L), any(TimeUnit.class))) - .thenAnswer((Answer) invocation -> { - time.sleep(1000); - return null; - }); + // Records to be read by consumer as it reads to the end of the log + LinkedHashMap serializedConfigs = new LinkedHashMap<>(); + serializedConfigs.put(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); + expectReadToEnd(serializedConfigs); - @SuppressWarnings("unchecked") - Future future = mock(Future.class); - when(configLog.readToEnd()).thenReturn(future); + expectPartitionCount(1); + expectStop(); - // the Future::get calls on the previous two producer futures exhausted the overall timeout; so expect the - // timeout on the log read future to be 0 - when(future.get(eq(0L), any(TimeUnit.class))).thenReturn(null); + PowerMock.replayAll(); - configStorage.removeConnectorConfig("test-connector"); - configStorage.stop(); - verify(configLog).stop(); - } + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + configStorage.start(); - @Test - @SuppressWarnings("unchecked") - public void testWritePrivileges() throws Exception { - // With exactly.once.source.support = preparing (or also, "enabled"), we need to use a transactional producer - // to write some types of messages to the config topic - props.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "preparing"); - createStore(); + // Bootstrap as if we had already added the connector, but no tasks had been added yet + whiteboxAddConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); - when(configLog.partitionCount()).thenReturn(1); + // Null before writing + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(-1, configState.offset()); - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); + // Writing task configs should block until all the writes have been performed and the root record update + // has completed + List> taskConfigs = Collections.emptyList(); + configStorage.putTaskConfigs("connector1", taskConfigs); - // Try and fail to write a task count record to the config topic without write privileges - when(converter.fromConnectData(TOPIC, KafkaConfigBackingStore.TASK_COUNT_RECORD_V0, CONNECTOR_TASK_COUNT_RECORD_STRUCTS.get(0))) - .thenReturn(CONFIGS_SERIALIZED.get(0)); - - // Should fail the first time since we haven't claimed write privileges - assertThrows(IllegalStateException.class, () -> configStorage.putTaskCountRecord(CONNECTOR_IDS.get(0), 6)); - - // Claim write privileges - doReturn(fencableProducer).when(configStorage).createFencableProducer(); - // And write the task count record successfully - when(fencableProducer.send(any(ProducerRecord.class))).thenReturn(null); - doAnswer(expectReadToEnd(Collections.singletonMap(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) - .doAnswer(expectReadToEnd(Collections.singletonMap(CONNECTOR_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(2)))) - .when(configLog).readToEnd(); - when(converter.toConnectData(TOPIC, CONFIGS_SERIALIZED.get(0))) - .thenReturn(new SchemaAndValue(null, structToMap(CONNECTOR_TASK_COUNT_RECORD_STRUCTS.get(0)))); - - // Should succeed now - configStorage.claimWritePrivileges(); - configStorage.putTaskCountRecord(CONNECTOR_IDS.get(0), 6); - - verify(fencableProducer).beginTransaction(); - verify(fencableProducer).commitTransaction(); - - // Try to write a connector config - when(converter.fromConnectData(TOPIC, KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0, CONNECTOR_CONFIG_STRUCTS.get(0))) - .thenReturn(CONFIGS_SERIALIZED.get(1)); - // Get fenced out - doThrow(new ProducerFencedException("Better luck next time")) - .doNothing() - .when(fencableProducer).commitTransaction(); - - // Should fail again when we get fenced out - assertThrows(PrivilegedWriteException.class, () -> configStorage.putConnectorConfig(CONNECTOR_IDS.get(1), SAMPLE_CONFIGS.get(0), null)); - - verify(fencableProducer, times(2)).beginTransaction(); - verify(fencableProducer).close(Duration.ZERO); - - // Should fail if we retry without reclaiming write privileges - assertThrows(IllegalStateException.class, () -> configStorage.putConnectorConfig(CONNECTOR_IDS.get(1), SAMPLE_CONFIGS.get(0), null)); - - // In the meantime, write a target state (which doesn't require write privileges) - when(converter.fromConnectData(TOPIC, KafkaConfigBackingStore.TARGET_STATE_V1, TARGET_STATE_PAUSED)) - .thenReturn(CONFIGS_SERIALIZED.get(1)); - when(configLog.sendWithReceipt("target-state-" + CONNECTOR_IDS.get(1), CONFIGS_SERIALIZED.get(1))) - .thenReturn(producerFuture); - when(producerFuture.get(anyLong(), any(TimeUnit.class))).thenReturn(null); - - // Should succeed even without write privileges (target states can be written by anyone) - configStorage.putTargetState(CONNECTOR_IDS.get(1), TargetState.PAUSED); - - // Reclaim write privileges and successfully write the config - when(converter.toConnectData(TOPIC, CONFIGS_SERIALIZED.get(2))) - .thenReturn(new SchemaAndValue(null, structToMap(CONNECTOR_CONFIG_STRUCTS.get(0)))); - - // Should succeed if we re-claim write privileges - configStorage.claimWritePrivileges(); - configStorage.putConnectorConfig(CONNECTOR_IDS.get(1), SAMPLE_CONFIGS.get(0), null); - - verify(fencableProducer, times(3)).beginTransaction(); - verify(fencableProducer, times(3)).commitTransaction(); - verify(configUpdateListener).onConnectorConfigUpdate(CONNECTOR_IDS.get(1)); + // Validate root config by listing all connectors and tasks + configState = configStorage.snapshot(); + assertEquals(1, configState.offset()); + String connectorName = CONNECTOR_IDS.get(0); + assertEquals(Collections.singletonList(connectorName), new ArrayList<>(configState.connectors())); + assertEquals(Collections.emptyList(), configState.tasks(connectorName)); + assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); configStorage.stop(); - verify(configLog).stop(); - verify(configStorage, times(2)).createFencableProducer(); - verify(fencableProducer, times(2)).close(Duration.ZERO); + + PowerMock.verifyAll(); } @Test - public void testRestoreTargetStateUnexpectedDeletion() { + public void testBackgroundUpdateTargetState() throws Exception { + // verify that we handle target state changes correctly when they come up through the log + + expectConfigure(); List> existingRecords = Arrays.asList( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), @@ -572,731 +494,69 @@ public void testRestoreTargetStateUnexpectedDeletion() { CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, TARGET_STATE_KEYS.get(0), - CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(4), new RecordHeaders(), Optional.empty())); - LinkedHashMap deserialized = new LinkedHashMap<>(); - deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(3), null); - deserialized.put(CONFIGS_SERIALIZED.get(4), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); + new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); + LinkedHashMap deserializedOnStartup = new LinkedHashMap<>(); + deserializedOnStartup.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); + deserializedOnStartup.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); + deserializedOnStartup.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); + deserializedOnStartup.put(CONFIGS_SERIALIZED.get(3), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); logOffset = 5; - expectStart(existingRecords, deserialized); - when(configLog.partitionCount()).thenReturn(1); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); + expectStart(existingRecords, deserializedOnStartup); - // The target state deletion should reset the state to STARTED - ClusterConfigState configState = configStorage.snapshot(); - assertEquals(5, configState.offset()); // Should always be next to be read, even if uncommitted - assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); - assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); + LinkedHashMap serializedAfterStartup = new LinkedHashMap<>(); + serializedAfterStartup.put(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); + serializedAfterStartup.put(TARGET_STATE_KEYS.get(1), CONFIGS_SERIALIZED.get(1)); - configStorage.stop(); - verify(configLog).stop(); - } - - @Test - public void testRestoreTargetState() { - List> existingRecords = Arrays.asList( - new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), - CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, TARGET_STATE_KEYS.get(0), - CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(4), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 5, 0L, TimestampType.CREATE_TIME, 0, 0, TARGET_STATE_KEYS.get(1), - CONFIGS_SERIALIZED.get(5), new RecordHeaders(), Optional.empty())); - LinkedHashMap deserialized = new LinkedHashMap<>(); - deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); - // A worker running an older version wrote this target state; make sure we can handle it correctly - deserialized.put(CONFIGS_SERIALIZED.get(3), TARGET_STATE_PAUSED_LEGACY); - deserialized.put(CONFIGS_SERIALIZED.get(4), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); - deserialized.put(CONFIGS_SERIALIZED.get(5), TARGET_STATE_STOPPED); - logOffset = 6; - - expectStart(existingRecords, deserialized); - - // Shouldn't see any callbacks since this is during startup - when(configLog.partitionCount()).thenReturn(1); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); - - // Should see a single connector with initial state paused - ClusterConfigState configState = configStorage.snapshot(); - assertEquals(6, configState.offset()); // Should always be next to be read, even if uncommitted - assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); - assertEquals(TargetState.PAUSED, configState.targetState(CONNECTOR_IDS.get(0))); - assertEquals(TargetState.STOPPED, configState.targetState(CONNECTOR_IDS.get(1))); - - configStorage.stop(); - verify(configLog).stop(); - } - - @Test - public void testRestore() { - // Restoring data should notify only of the latest values after loading is complete. This also validates - // that inconsistent state is ignored. - - // Overwrite each type at least once to ensure we see the latest data after loading - List> existingRecords = Arrays.asList( - new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_TASK_COUNT_RECORD_KEYS.get(0), - CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), - CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(4), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 5, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(5), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 6, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_TASK_COUNT_RECORD_KEYS.get(1), - CONFIGS_SERIALIZED.get(6), new RecordHeaders(), Optional.empty()), - // Connector after root update should make it through, task update shouldn't - new ConsumerRecord<>(TOPIC, 0, 7, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(7), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 8, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(8), new RecordHeaders(), Optional.empty())); - LinkedHashMap deserialized = new LinkedHashMap<>(); - deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_TASK_COUNT_RECORD_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(1), CONNECTOR_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(3), TASK_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(4), CONNECTOR_CONFIG_STRUCTS.get(1)); - deserialized.put(CONFIGS_SERIALIZED.get(5), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); - deserialized.put(CONFIGS_SERIALIZED.get(6), CONNECTOR_TASK_COUNT_RECORD_STRUCTS.get(1)); - deserialized.put(CONFIGS_SERIALIZED.get(7), CONNECTOR_CONFIG_STRUCTS.get(2)); - deserialized.put(CONFIGS_SERIALIZED.get(8), TASK_CONFIG_STRUCTS.get(1)); - logOffset = 9; - - expectStart(existingRecords, deserialized); - when(configLog.partitionCount()).thenReturn(1); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); - - // Should see a single connector and its config should be the last one seen anywhere in the log - ClusterConfigState configState = configStorage.snapshot(); - assertEquals(logOffset, configState.offset()); // Should always be next to be read, even if uncommitted - assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); - assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); - // CONNECTOR_CONFIG_STRUCTS[2] -> SAMPLE_CONFIGS[2] - assertEquals(SAMPLE_CONFIGS.get(2), configState.connectorConfig(CONNECTOR_IDS.get(0))); - // Should see 2 tasks for that connector. Only config updates before the root key update should be reflected - assertEquals(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(CONNECTOR_IDS.get(0))); - // Both TASK_CONFIG_STRUCTS[0] -> SAMPLE_CONFIGS[0] - assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0))); - assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(1))); - assertEquals(9, (int) configState.taskCountRecord(CONNECTOR_IDS.get(1))); - assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); - assertEquals(Collections.singleton("connector1"), configState.connectorsPendingFencing); - - // Shouldn't see any callbacks since this is during startup - configStorage.stop(); - verify(configLog).stop(); - } - - @Test - public void testRestoreConnectorDeletion() { - // Restoring data should notify only of the latest values after loading is complete. This also validates - // that inconsistent state is ignored. - - // Overwrite each type at least once to ensure we see the latest data after loading - List> existingRecords = Arrays.asList( - new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), - CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, TARGET_STATE_KEYS.get(0), - CONFIGS_SERIALIZED.get(4), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 5, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(5), new RecordHeaders(), Optional.empty())); - - LinkedHashMap deserialized = new LinkedHashMap<>(); - deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(3), null); - deserialized.put(CONFIGS_SERIALIZED.get(4), null); - deserialized.put(CONFIGS_SERIALIZED.get(5), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); - - logOffset = 6; - expectStart(existingRecords, deserialized); - when(configLog.partitionCount()).thenReturn(1); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); - - // Should see a single connector and its config should be the last one seen anywhere in the log - ClusterConfigState configState = configStorage.snapshot(); - assertEquals(6, configState.offset()); // Should always be next to be read, even if uncommitted - assertTrue(configState.connectors().isEmpty()); - - // Shouldn't see any callbacks since this is during startup - configStorage.stop(); - verify(configLog).stop(); - } - - @Test - public void testRestoreZeroTasks() { - // Restoring data should notify only of the latest values after loading is complete. This also validates - // that inconsistent state is ignored. - - // Overwrite each type at least once to ensure we see the latest data after loading - List> existingRecords = Arrays.asList( - new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), - CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(4), new RecordHeaders(), Optional.empty()), - // Connector after root update should make it through, task update shouldn't - new ConsumerRecord<>(TOPIC, 0, 5, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(5), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 6, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(6), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 7, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(7), new RecordHeaders(), Optional.empty())); - LinkedHashMap deserialized = new LinkedHashMap<>(); - deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(3), CONNECTOR_CONFIG_STRUCTS.get(1)); - deserialized.put(CONFIGS_SERIALIZED.get(4), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); - deserialized.put(CONFIGS_SERIALIZED.get(5), CONNECTOR_CONFIG_STRUCTS.get(2)); - deserialized.put(CONFIGS_SERIALIZED.get(6), TASK_CONFIG_STRUCTS.get(1)); - deserialized.put(CONFIGS_SERIALIZED.get(7), TASKS_COMMIT_STRUCT_ZERO_TASK_CONNECTOR); - logOffset = 8; - expectStart(existingRecords, deserialized); - when(configLog.partitionCount()).thenReturn(1); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); - - // Should see a single connector and its config should be the last one seen anywhere in the log - ClusterConfigState configState = configStorage.snapshot(); - assertEquals(8, configState.offset()); // Should always be next to be read, even if uncommitted - assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); - // CONNECTOR_CONFIG_STRUCTS[2] -> SAMPLE_CONFIGS[2] - assertEquals(SAMPLE_CONFIGS.get(2), configState.connectorConfig(CONNECTOR_IDS.get(0))); - // Should see 0 tasks for that connector. - assertEquals(Collections.emptyList(), configState.tasks(CONNECTOR_IDS.get(0))); - // Both TASK_CONFIG_STRUCTS[0] -> SAMPLE_CONFIGS[0] - assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); - - // Shouldn't see any callbacks since this is during startup - configStorage.stop(); - verify(configLog).stop(); - } - - @Test - public void testRecordToRestartRequest() { - ConsumerRecord record = new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(0), - CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()); - Struct struct = RESTART_REQUEST_STRUCTS.get(0); - SchemaAndValue schemaAndValue = new SchemaAndValue(struct.schema(), structToMap(struct)); - RestartRequest restartRequest = configStorage.recordToRestartRequest(record, schemaAndValue); - assertEquals(CONNECTOR_1_NAME, restartRequest.connectorName()); - assertEquals(struct.getBoolean(INCLUDE_TASKS_FIELD_NAME), restartRequest.includeTasks()); - assertEquals(struct.getBoolean(ONLY_FAILED_FIELD_NAME), restartRequest.onlyFailed()); - } - - @Test - public void testRecordToRestartRequestOnlyFailedInconsistent() { - ConsumerRecord record = new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(0), - CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()); - Struct struct = ONLY_FAILED_MISSING_STRUCT; - SchemaAndValue schemaAndValue = new SchemaAndValue(struct.schema(), structToMap(struct)); - RestartRequest restartRequest = configStorage.recordToRestartRequest(record, schemaAndValue); - assertEquals(CONNECTOR_1_NAME, restartRequest.connectorName()); - assertEquals(struct.getBoolean(INCLUDE_TASKS_FIELD_NAME), restartRequest.includeTasks()); - assertFalse(restartRequest.onlyFailed()); - } - - @Test - public void testRecordToRestartRequestIncludeTasksInconsistent() { - ConsumerRecord record = new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(0), - CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()); - Struct struct = INCLUDE_TASKS_MISSING_STRUCT; - SchemaAndValue schemaAndValue = new SchemaAndValue(struct.schema(), structToMap(struct)); - RestartRequest restartRequest = configStorage.recordToRestartRequest(record, schemaAndValue); - assertEquals(CONNECTOR_1_NAME, restartRequest.connectorName()); - assertFalse(restartRequest.includeTasks()); - assertEquals(struct.getBoolean(ONLY_FAILED_FIELD_NAME), restartRequest.onlyFailed()); - } - - @Test - public void testFencableProducerPropertiesOverrideUserSuppliedValues() { - props.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "preparing"); - String groupId = "my-other-connect-cluster"; - props.put(GROUP_ID_CONFIG, groupId); - props.put(TRANSACTIONAL_ID_CONFIG, "my-custom-transactional-id"); - props.put(ENABLE_IDEMPOTENCE_CONFIG, "false"); - createStore(); - - Map fencableProducerProperties = configStorage.fencableProducerProps(config); - assertEquals("connect-cluster-" + groupId, fencableProducerProperties.get(TRANSACTIONAL_ID_CONFIG)); - assertEquals("true", fencableProducerProperties.get(ENABLE_IDEMPOTENCE_CONFIG)); - } - - @Test - public void testConsumerPropertiesDoNotOverrideUserSuppliedValuesWithoutExactlyOnceSourceEnabled() { - props.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "preparing"); - props.put(ISOLATION_LEVEL_CONFIG, IsolationLevel.READ_UNCOMMITTED.toString()); - createStore(); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - - assertEquals( - IsolationLevel.READ_UNCOMMITTED.toString(), - capturedConsumerProps.getValue().get(ISOLATION_LEVEL_CONFIG) - ); - } - - @Test - public void testClientIds() { - props = new HashMap<>(DEFAULT_CONFIG_STORAGE_PROPS); - props.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "enabled"); - createStore(); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - - Map fencableProducerProps = configStorage.fencableProducerProps(config); - - final String expectedClientId = CLIENT_ID_BASE + "configs"; - assertEquals(expectedClientId, capturedProducerProps.getValue().get(CLIENT_ID_CONFIG)); - assertEquals(expectedClientId, capturedConsumerProps.getValue().get(CLIENT_ID_CONFIG)); - assertEquals(expectedClientId + "-leader", fencableProducerProps.get(CLIENT_ID_CONFIG)); - } - - @Test - public void testExceptionOnStartWhenConfigTopicHasMultiplePartitions() { - when(configLog.partitionCount()).thenReturn(2); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - ConfigException e = assertThrows(ConfigException.class, () -> configStorage.start()); - assertTrue(e.getMessage().contains("required to have a single partition")); - } - - @Test - public void testFencableProducerPropertiesInsertedByDefault() { - props.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "preparing"); - String groupId = "my-connect-cluster"; - props.put(GROUP_ID_CONFIG, groupId); - props.remove(TRANSACTIONAL_ID_CONFIG); - props.remove(ENABLE_IDEMPOTENCE_CONFIG); - createStore(); - - Map fencableProducerProperties = configStorage.fencableProducerProps(config); - assertEquals("connect-cluster-" + groupId, fencableProducerProperties.get(TRANSACTIONAL_ID_CONFIG)); - assertEquals("true", fencableProducerProperties.get(ENABLE_IDEMPOTENCE_CONFIG)); - } - - @Test - public void testConsumerPropertiesInsertedByDefaultWithExactlyOnceSourceEnabled() { - props.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "enabled"); - props.remove(ISOLATION_LEVEL_CONFIG); - createStore(); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - - assertEquals( - IsolationLevel.READ_COMMITTED.toString(), - capturedConsumerProps.getValue().get(ISOLATION_LEVEL_CONFIG) - ); - } - - @Test - public void testConsumerPropertiesOverrideUserSuppliedValuesWithExactlyOnceSourceEnabled() { - props.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "enabled"); - props.put(ISOLATION_LEVEL_CONFIG, IsolationLevel.READ_UNCOMMITTED.toString()); - createStore(); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - - assertEquals( - IsolationLevel.READ_COMMITTED.toString(), - capturedConsumerProps.getValue().get(ISOLATION_LEVEL_CONFIG) - ); - } - - @Test - public void testConsumerPropertiesNotInsertedByDefaultWithoutExactlyOnceSourceEnabled() { - props.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "preparing"); - props.remove(ISOLATION_LEVEL_CONFIG); - createStore(); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - - assertNull(capturedConsumerProps.getValue().get(ISOLATION_LEVEL_CONFIG)); - } - - @Test - public void testBackgroundConnectorDeletion() throws Exception { - // verify that we handle connector deletions correctly when they come up through the log - List> existingRecords = Arrays.asList( - new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), - CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); - LinkedHashMap deserialized = new LinkedHashMap<>(); - deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(1)); - deserialized.put(CONFIGS_SERIALIZED.get(3), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); - logOffset = 5; - - expectStart(existingRecords, deserialized); - when(configLog.partitionCount()).thenReturn(1); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - - configStorage.start(); - verify(configLog).start(); - - // Should see a single connector with initial state paused - ClusterConfigState configState = configStorage.snapshot(); - assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); - assertEquals(SAMPLE_CONFIGS.get(0), configState.connectorConfig(CONNECTOR_IDS.get(0))); - assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(new ConnectorTaskId(CONNECTOR_IDS.get(0), 0))); - assertEquals(SAMPLE_CONFIGS.get(1), configState.taskConfig(new ConnectorTaskId(CONNECTOR_IDS.get(0), 1))); - assertEquals(2, configState.taskCount(CONNECTOR_IDS.get(0))); - - LinkedHashMap serializedData = new LinkedHashMap<>(); - serializedData.put(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); - serializedData.put(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(1)); - doAnswer(expectReadToEnd(serializedData)).when(configLog).readToEnd(); - - Map deserializedData = new HashMap<>(); - deserializedData.put(CONNECTOR_CONFIG_KEYS.get(0), null); - deserializedData.put(TARGET_STATE_KEYS.get(0), null); - expectRead(serializedData, deserializedData); - - configStorage.refresh(0, TimeUnit.SECONDS); - verify(configUpdateListener).onConnectorConfigRemove(CONNECTOR_IDS.get(0)); - - configState = configStorage.snapshot(); - // Connector should now be removed from the snapshot - assertFalse(configState.contains(CONNECTOR_IDS.get(0))); - assertEquals(0, configState.taskCount(CONNECTOR_IDS.get(0))); - // Ensure that the deleted connector's deferred task updates have been cleaned up - // in order to prevent unbounded growth of the map - assertEquals(Collections.emptyMap(), configStorage.deferredTaskUpdates); - - configStorage.stop(); - verify(configLog).stop(); - } - - @Test - public void testPutTaskConfigsDoesNotResolveAllInconsistencies() throws Exception { - // Test a case where a failure and compaction has left us in an inconsistent state when reading the log. - // We start out by loading an initial configuration where we started to write a task update, and then - // compaction cleaned up the earlier record. - List> existingRecords = Arrays.asList( - new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), - // This is the record that has been compacted: - //new ConsumerRecord<>(TOPIC, 0, 1, TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(1)), - new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), - CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(4), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 5, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(5), new RecordHeaders(), Optional.empty())); - LinkedHashMap deserialized = new LinkedHashMap<>(); - deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(4), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); - deserialized.put(CONFIGS_SERIALIZED.get(5), TASK_CONFIG_STRUCTS.get(1)); - logOffset = 6; - expectStart(existingRecords, deserialized); - when(configLog.partitionCount()).thenReturn(1); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); - - // After reading the log, it should have been in an inconsistent state - ClusterConfigState configState = configStorage.snapshot(); - assertEquals(6, configState.offset()); // Should always be next to be read, not last committed - assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); - // Inconsistent data should leave us with no tasks listed for the connector and an entry in the inconsistent list - assertEquals(Collections.emptyList(), configState.tasks(CONNECTOR_IDS.get(0))); - // Both TASK_CONFIG_STRUCTS[0] -> SAMPLE_CONFIGS[0] - assertNull(configState.taskConfig(TASK_IDS.get(0))); - assertNull(configState.taskConfig(TASK_IDS.get(1))); - assertEquals(Collections.singleton(CONNECTOR_IDS.get(0)), configState.inconsistentConnectors()); - - // Records to be read by consumer as it reads to the end of the log - LinkedHashMap serializedConfigs = new LinkedHashMap<>(); - serializedConfigs.put(TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); - serializedConfigs.put(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(2)); - // Successful attempt to write new task config - doAnswer(expectReadToEnd(new LinkedHashMap<>())) - .doAnswer(expectReadToEnd(new LinkedHashMap<>())) - .doAnswer(expectReadToEnd(serializedConfigs)) - .when(configLog).readToEnd(); - expectConvertWriteRead( - TASK_CONFIG_KEYS.get(0), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(0), - "properties", SAMPLE_CONFIGS.get(0)); - expectConvertWriteRead( - COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(2), - "tasks", 1); // Updated to just 1 task - - // Next, issue a write that has everything that is needed and it should be accepted. Note that in this case - // we are going to shrink the number of tasks to 1 - configStorage.putTaskConfigs("connector1", Collections.singletonList(SAMPLE_CONFIGS.get(0))); - - // Validate updated config - configState = configStorage.snapshot(); - // This is only two more ahead of the last one because multiple calls fail, and so their configs are not written - // to the topic. Only the last call with 1 task config + 1 commit actually gets written. - assertEquals(8, configState.offset()); - assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); - assertEquals(Collections.singletonList(TASK_IDS.get(0)), configState.tasks(CONNECTOR_IDS.get(0))); - assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0))); - assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); - - // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks - verify(configUpdateListener).onTaskConfigUpdate(Collections.singletonList(TASK_IDS.get(0))); - - configStorage.stop(); - verify(configLog).stop(); - } - - @Test - public void testPutRestartRequestOnlyFailed() throws Exception { - RestartRequest restartRequest = new RestartRequest(CONNECTOR_IDS.get(0), true, false); - testPutRestartRequest(restartRequest); - } - - @Test - public void testPutRestartRequestOnlyFailedIncludingTasks() throws Exception { - RestartRequest restartRequest = new RestartRequest(CONNECTOR_IDS.get(0), true, true); - testPutRestartRequest(restartRequest); - } - - private void testPutRestartRequest(RestartRequest restartRequest) throws Exception { - expectStart(Collections.emptyList(), Collections.emptyMap()); - when(configLog.partitionCount()).thenReturn(1); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - - configStorage.start(); - verify(configLog).start(); - - LinkedHashMap recordsToRead = new LinkedHashMap<>(); - recordsToRead.put(RESTART_CONNECTOR_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); - doAnswer(expectReadToEnd(recordsToRead)).when(configLog).readToEnd(); - - expectConvertWriteRead2( - RESTART_CONNECTOR_KEYS.get(0), KafkaConfigBackingStore.RESTART_REQUEST_V0, CONFIGS_SERIALIZED.get(0), - new Struct(KafkaConfigBackingStore.RESTART_REQUEST_V0).put(ONLY_FAILED_FIELD_NAME, restartRequest.onlyFailed()).put(INCLUDE_TASKS_FIELD_NAME, restartRequest.includeTasks())); - - // Writing should block until it is written and read back from Kafka - configStorage.putRestartRequest(restartRequest); - - final ArgumentCaptor restartRequestCaptor = ArgumentCaptor.forClass(RestartRequest.class); - verify(configUpdateListener).onRestartRequest(restartRequestCaptor.capture()); - - assertEquals(restartRequest.connectorName(), restartRequestCaptor.getValue().connectorName()); - assertEquals(restartRequest.onlyFailed(), restartRequestCaptor.getValue().onlyFailed()); - assertEquals(restartRequest.includeTasks(), restartRequestCaptor.getValue().includeTasks()); - - configStorage.stop(); - verify(configLog).stop(); - } - - @Test - public void testRestoreRestartRequestInconsistentState() { - // Restoring data should notify only of the latest values after loading is complete. This also validates - // that inconsistent state doesn't prevent startup. - // Overwrite each type at least once to ensure we see the latest data after loading - List> existingRecords = Arrays.asList( - new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(0), - CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(1), - CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(1), - CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(1), - CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); - LinkedHashMap deserialized = new LinkedHashMap<>(); - deserialized.put(CONFIGS_SERIALIZED.get(0), RESTART_REQUEST_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(1), RESTART_REQUEST_STRUCTS.get(1)); - deserialized.put(CONFIGS_SERIALIZED.get(2), RESTART_REQUEST_STRUCTS.get(2)); - deserialized.put(CONFIGS_SERIALIZED.get(3), null); - logOffset = 4; - expectStart(existingRecords, deserialized); - when(configLog.partitionCount()).thenReturn(1); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - - configStorage.start(); - verify(configLog).start(); - - // Shouldn't see any callbacks since this is during startup - verify(configUpdateListener, never()).onConnectorConfigRemove(anyString()); - verify(configUpdateListener, never()).onConnectorConfigUpdate(anyString()); - verify(configUpdateListener, never()).onTaskConfigUpdate(anyCollection()); - verify(configUpdateListener, never()).onConnectorTargetStateChange(anyString()); - verify(configUpdateListener, never()).onSessionKeyUpdate(any(SessionKey.class)); - verify(configUpdateListener, never()).onRestartRequest(any(RestartRequest.class)); - verify(configUpdateListener, never()).onLoggingLevelUpdate(anyString(), anyString()); - - configStorage.stop(); - verify(configLog).stop(); - } - - @Test - public void testPutTaskConfigsZeroTasks() throws Exception { - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); - verify(configLog).start(); - - // Records to be read by consumer as it reads to the end of the log - doAnswer(expectReadToEnd(new LinkedHashMap<>())). - doAnswer(expectReadToEnd(Collections.singletonMap(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) - .when(configLog).readToEnd(); - - expectConvertWriteRead( - COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(0), - "tasks", 0); // We have 0 tasks - - // Bootstrap as if we had already added the connector, but no tasks had been added yet - addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); - - - // Null before writing - ClusterConfigState configState = configStorage.snapshot(); - assertEquals(-1, configState.offset()); - - // Writing task configs should block until all the writes have been performed and the root record update - // has completed - List> taskConfigs = Collections.emptyList(); - configStorage.putTaskConfigs("connector1", taskConfigs); - - // Validate root config by listing all connectors and tasks - configState = configStorage.snapshot(); - assertEquals(1, configState.offset()); - String connectorName = CONNECTOR_IDS.get(0); - assertEquals(Collections.singletonList(connectorName), new ArrayList<>(configState.connectors())); - assertEquals(Collections.emptyList(), configState.tasks(connectorName)); - assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); + Map deserializedAfterStartup = new HashMap<>(); + deserializedAfterStartup.put(TARGET_STATE_KEYS.get(0), TARGET_STATE_PAUSED); + deserializedAfterStartup.put(TARGET_STATE_KEYS.get(1), TARGET_STATE_STOPPED); - // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks - verify(configUpdateListener).onTaskConfigUpdate(Collections.emptyList()); + expectRead(serializedAfterStartup, deserializedAfterStartup); - configStorage.stop(); - verify(configLog).stop(); - } + configUpdateListener.onConnectorTargetStateChange(CONNECTOR_IDS.get(0)); + EasyMock.expectLastCall(); - @Test - public void testBackgroundUpdateTargetState() throws Exception { - // verify that we handle target state changes correctly when they come up through the log - List> existingRecords = Arrays.asList( - new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), - CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); - LinkedHashMap deserializedOnStartup = new LinkedHashMap<>(); - deserializedOnStartup.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); - deserializedOnStartup.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); - deserializedOnStartup.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); - deserializedOnStartup.put(CONFIGS_SERIALIZED.get(3), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); - logOffset = 5; + expectPartitionCount(1); + expectStop(); - expectStart(existingRecords, deserializedOnStartup); - when(configLog.partitionCount()).thenReturn(1); + PowerMock.replayAll(); configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); configStorage.start(); - verify(configLog).start(); // Should see a single connector with initial state started ClusterConfigState configState = configStorage.snapshot(); assertEquals(Collections.singleton(CONNECTOR_IDS.get(0)), configStorage.connectorTargetStates.keySet()); assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); - LinkedHashMap serializedAfterStartup = new LinkedHashMap<>(); - serializedAfterStartup.put(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); - serializedAfterStartup.put(TARGET_STATE_KEYS.get(1), CONFIGS_SERIALIZED.get(1)); - doAnswer(expectReadToEnd(serializedAfterStartup)).when(configLog).readToEnd(); - - Map deserializedAfterStartup = new HashMap<>(); - deserializedAfterStartup.put(TARGET_STATE_KEYS.get(0), TARGET_STATE_PAUSED); - deserializedAfterStartup.put(TARGET_STATE_KEYS.get(1), TARGET_STATE_STOPPED); - expectRead(serializedAfterStartup, deserializedAfterStartup); - // Should see two connectors now, one paused and one stopped configStorage.refresh(0, TimeUnit.SECONDS); - verify(configUpdateListener).onConnectorTargetStateChange(CONNECTOR_IDS.get(0)); configState = configStorage.snapshot(); - assertEquals(new HashSet<>(CONNECTOR_IDS), configStorage.connectorTargetStates.keySet()); assertEquals(TargetState.PAUSED, configState.targetState(CONNECTOR_IDS.get(0))); assertEquals(TargetState.STOPPED, configState.targetState(CONNECTOR_IDS.get(1))); configStorage.stop(); - verify(configStorage).stop(); + + PowerMock.verifyAll(); } @Test public void testSameTargetState() throws Exception { // verify that we handle target state changes correctly when they come up through the log + + expectConfigure(); List> existingRecords = Arrays.asList( - new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), - CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); + new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), + CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); LinkedHashMap deserialized = new LinkedHashMap<>(); deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); @@ -1306,355 +566,73 @@ public void testSameTargetState() throws Exception { expectStart(existingRecords, deserialized); - when(configLog.partitionCount()).thenReturn(1); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); - verify(configLog).start(); - - ClusterConfigState configState = configStorage.snapshot(); - expectRead(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0), TARGET_STATE_STARTED); - // Should see a single connector with initial state paused - assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); - - expectRead(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0), TARGET_STATE_STARTED); // on resume update listener shouldn't be called - verify(configUpdateListener, never()).onConnectorTargetStateChange(anyString()); - - configStorage.stop(); - verify(configStorage).stop(); - } - - - @Test - public void testPutLogLevel() throws Exception { - final String logger1 = "org.apache.zookeeper"; - final String logger2 = "org.apache.cassandra"; - final String logger3 = "org.apache.kafka.clients"; - final String logger4 = "org.apache.kafka.connect"; - final String level1 = "ERROR"; - final String level3 = "WARN"; - final String level4 = "DEBUG"; - - final Struct existingLogLevel = new Struct(KafkaConfigBackingStore.LOGGER_LEVEL_V0) - .put("level", level1); - - // Pre-populate the config topic with a couple of logger level records; these should be ignored (i.e., - // not reported to the update listener) - List> existingRecords = Arrays.asList( - new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, "logger-cluster-" + logger1, - CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty() - ), - new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, "logger-cluster-" + logger2, - CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty() - ) - ); - LinkedHashMap deserialized = new LinkedHashMap<>(); - deserialized.put(CONFIGS_SERIALIZED.get(0), existingLogLevel); - // Make sure we gracefully handle tombstones - deserialized.put(CONFIGS_SERIALIZED.get(1), null); - logOffset = 2; - - expectStart(existingRecords, deserialized); - when(configLog.partitionCount()).thenReturn(1); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - - configStorage.start(); - verify(configLog).start(); - - expectConvertWriteRead( - "logger-cluster-" + logger3, KafkaConfigBackingStore.LOGGER_LEVEL_V0, CONFIGS_SERIALIZED.get(2), - "level", level3); - configStorage.putLoggerLevel(logger3, level3); - - expectConvertWriteRead( - "logger-cluster-" + logger4, KafkaConfigBackingStore.LOGGER_LEVEL_V0, CONFIGS_SERIALIZED.get(3), - "level", level4); - configStorage.putLoggerLevel(logger4, level4); + configUpdateListener.onConnectorTargetStateChange(EasyMock.anyString()); + EasyMock.expectLastCall().andStubThrow(new AssertionError("unexpected call to onConnectorTargetStateChange")); - LinkedHashMap newRecords = new LinkedHashMap<>(); - newRecords.put("logger-cluster-" + logger3, CONFIGS_SERIALIZED.get(2)); - newRecords.put("logger-cluster-" + logger4, CONFIGS_SERIALIZED.get(3)); - doAnswer(expectReadToEnd(newRecords)).when(configLog).readToEnd(); + expectRead(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0), TARGET_STATE_STARTED); - configStorage.refresh(0, TimeUnit.SECONDS); - verify(configUpdateListener).onLoggingLevelUpdate(logger3, level3); - verify(configUpdateListener).onLoggingLevelUpdate(logger4, level4); + expectPartitionCount(1); + expectStop(); - configStorage.stop(); - verify(configLog).stop(); - } + PowerMock.replayAll(); - @Test - public void testTaskCountRecordsAndGenerations() throws Exception { configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); configStorage.start(); - verify(configLog).start(); - - // Records to be read by consumer as it reads to the end of the log - LinkedHashMap serializedConfigs = new LinkedHashMap<>(); - serializedConfigs.put(TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); - serializedConfigs.put(TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1)); - serializedConfigs.put(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(2)); - - - // Records to be read by consumer as it reads to the end of the log - doAnswer(expectReadToEnd(new LinkedHashMap<>())) - .doAnswer(expectReadToEnd(new LinkedHashMap<>())) - .doAnswer(expectReadToEnd(serializedConfigs)) - .doAnswer(expectReadToEnd(new LinkedHashMap() {{ - put(CONNECTOR_TASK_COUNT_RECORD_KEYS.get(0), CONFIGS_SERIALIZED.get(3)); - }}) - ) - .when(configLog).readToEnd(); - - // Task configs should read to end, write to the log, read to end, write root, then read to end again - expectConvertWriteRead2(TASK_CONFIG_KEYS.get(0), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(0), - new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(0))); - - expectConvertWriteRead2( - TASK_CONFIG_KEYS.get(1), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(1), - new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(1))); - expectConvertWriteRead2( - COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(2), - new Struct(KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0).put("tasks", 2)); - // Starts with 0 tasks, after update has 2 - - // Task count records are read back after writing as well - expectConvertWriteRead2( - CONNECTOR_TASK_COUNT_RECORD_KEYS.get(0), KafkaConfigBackingStore.TASK_COUNT_RECORD_V0, CONFIGS_SERIALIZED.get(3), - new Struct(KafkaConfigBackingStore.TASK_COUNT_RECORD_V0).put("task-count", 4)); - - addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); - // Before anything is written - String connectorName = CONNECTOR_IDS.get(0); + // Should see a single connector with initial state paused ClusterConfigState configState = configStorage.snapshot(); - assertFalse(configState.pendingFencing(connectorName)); - assertNull(configState.taskCountRecord(connectorName)); - assertNull(configState.taskConfigGeneration(connectorName)); - - // Writing task configs should block until all the writes have been performed and the root record update - // has completed - List> taskConfigs = Arrays.asList(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1)); - configStorage.putTaskConfigs("connector1", taskConfigs); - - configState = configStorage.snapshot(); - assertEquals(3, configState.offset()); - assertTrue(configState.pendingFencing(connectorName)); - assertNull(configState.taskCountRecord(connectorName)); - assertEquals(0, (long) configState.taskConfigGeneration(connectorName)); - - configStorage.putTaskCountRecord(connectorName, 4); - - configState = configStorage.snapshot(); - assertEquals(4, configState.offset()); - assertFalse(configState.pendingFencing(connectorName)); - assertEquals(4, (long) configState.taskCountRecord(connectorName)); - assertEquals(0, (long) configState.taskConfigGeneration(connectorName)); + assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); - // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks - verify(configUpdateListener).onTaskConfigUpdate(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1))); + configStorage.refresh(0, TimeUnit.SECONDS); configStorage.stop(); - verify(configLog).stop(); - } - - @Test - public void testPutTaskConfigs() throws Exception { - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); - verify(configLog).start(); - - doAnswer(expectReadToEnd(new LinkedHashMap<>())) - .doAnswer(expectReadToEnd(new LinkedHashMap<>())) - .doAnswer(expectReadToEnd(new LinkedHashMap() {{ - put(TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); - put(TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1)); - put(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(2)); - }}) - ) - .when(configLog).readToEnd(); - - // Task configs should read to end, write to the log, read to end, write root, then read to end again - expectConvertWriteRead2( - TASK_CONFIG_KEYS.get(0), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(0), - new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(0))); - expectConvertWriteRead2( - TASK_CONFIG_KEYS.get(1), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(1), - new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(1))); - expectConvertWriteRead2( - COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(2), - new Struct(KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0).put("tasks", 2)); // Starts with 0 tasks, after update has 2 - // Bootstrap as if we had already added the connector, but no tasks had been added yet - addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); - // Null before writing - ClusterConfigState configState = configStorage.snapshot(); - assertEquals(-1, configState.offset()); - assertNull(configState.taskConfig(TASK_IDS.get(0))); - assertNull(configState.taskConfig(TASK_IDS.get(1))); - - // Writing task configs should block until all the writes have been performed and the root record update - // has completed - List> taskConfigs = Arrays.asList(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1)); - configStorage.putTaskConfigs("connector1", taskConfigs); - - // Validate root config by listing all connectors and tasks - configState = configStorage.snapshot(); - assertEquals(3, configState.offset()); - String connectorName = CONNECTOR_IDS.get(0); - assertEquals(Collections.singletonList(connectorName), new ArrayList<>(configState.connectors())); - assertEquals(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(connectorName)); - assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0))); - assertEquals(SAMPLE_CONFIGS.get(1), configState.taskConfig(TASK_IDS.get(1))); - assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); - - // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks - verify(configUpdateListener).onTaskConfigUpdate(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1))); - - configStorage.stop(); - verify(configLog).stop(); + PowerMock.verifyAll(); } - @Test - public void testPutTaskConfigsStartsOnlyReconfiguredTasks() throws Exception { - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); - verify(configLog).start(); - - doAnswer(expectReadToEnd(new LinkedHashMap<>())) - .doAnswer(expectReadToEnd(new LinkedHashMap<>())) - .doAnswer(expectReadToEnd(new LinkedHashMap() {{ - put(TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); - put(TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1)); - put(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(2)); - }}) - ) - .doAnswer(expectReadToEnd(new LinkedHashMap<>())) - .doAnswer(expectReadToEnd(new LinkedHashMap<>())) - .doAnswer(expectReadToEnd(new LinkedHashMap() {{ - put(TASK_CONFIG_KEYS.get(2), CONFIGS_SERIALIZED.get(3)); - put(COMMIT_TASKS_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(4)); - }}) - ) - .when(configLog).readToEnd(); - - expectConvertWriteRead2( - TASK_CONFIG_KEYS.get(0), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(0), - new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(0))); - expectConvertWriteRead2( - TASK_CONFIG_KEYS.get(1), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(1), - new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(1))); - expectConvertWriteRead2( - COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(2), - new Struct(KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0).put("tasks", 2)); // Starts with 0 tasks, after update has 2 - - // Null before writing - ClusterConfigState configState = configStorage.snapshot(); - assertEquals(-1, configState.offset()); - assertNull(configState.taskConfig(TASK_IDS.get(0))); - assertNull(configState.taskConfig(TASK_IDS.get(1))); - - // Bootstrap as if we had already added the connector, but no tasks had been added yet - addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); - List> taskConfigs = Arrays.asList(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1)); - configStorage.putTaskConfigs("connector1", taskConfigs); - - expectConvertWriteRead2( - TASK_CONFIG_KEYS.get(2), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(3), - new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(2))); - expectConvertWriteRead2( - COMMIT_TASKS_CONFIG_KEYS.get(1), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(4), - new Struct(KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0).put("tasks", 1)); // Starts with 2 tasks, after update has 3 - - addConnector(CONNECTOR_IDS.get(1), SAMPLE_CONFIGS.get(1), Collections.emptyList()); - taskConfigs = Collections.singletonList(SAMPLE_CONFIGS.get(2)); - configStorage.putTaskConfigs("connector2", taskConfigs); - - // Validate root config by listing all connectors and tasks - configState = configStorage.snapshot(); - assertEquals(5, configState.offset()); - String connectorName1 = CONNECTOR_IDS.get(0); - String connectorName2 = CONNECTOR_IDS.get(1); - assertEquals(Arrays.asList(connectorName1, connectorName2), new ArrayList<>(configState.connectors())); - assertEquals(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(connectorName1)); - assertEquals(Collections.singletonList(TASK_IDS.get(2)), configState.tasks(connectorName2)); - assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0))); - assertEquals(SAMPLE_CONFIGS.get(1), configState.taskConfig(TASK_IDS.get(1))); - assertEquals(SAMPLE_CONFIGS.get(2), configState.taskConfig(TASK_IDS.get(2))); - assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); - - // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks - verify(configUpdateListener).onTaskConfigUpdate(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1))); - verify(configUpdateListener).onTaskConfigUpdate(Collections.singletonList(TASK_IDS.get(2))); - - configStorage.stop(); - verify(configLog).stop(); + private void expectConfigure() throws Exception { + PowerMock.expectPrivate(configStorage, "createKafkaBasedLog", + EasyMock.capture(capturedTopic), EasyMock.capture(capturedProducerProps), + EasyMock.capture(capturedConsumerProps), EasyMock.capture(capturedConsumedCallback), + EasyMock.capture(capturedNewTopic), EasyMock.capture(capturedAdminSupplier), + EasyMock.anyObject(WorkerConfig.class), EasyMock.anyObject(Time.class)) + .andReturn(storeLog); } - private void verifyConfigure() { - verify(configStorage).createKafkaBasedLog(capturedTopic.capture(), capturedProducerProps.capture(), - capturedConsumerProps.capture(), capturedConsumedCallback.capture(), - capturedNewTopic.capture(), capturedAdminSupplier.capture(), - any(WorkerConfig.class), any(Time.class)); + private void expectPartitionCount(int partitionCount) { + EasyMock.expect(storeLog.partitionCount()) + .andReturn(partitionCount); } // If non-empty, deserializations should be a LinkedHashMap private void expectStart(final List> preexistingRecords, final Map deserializations) { - doAnswer(invocation -> { + storeLog.start(); + PowerMock.expectLastCall().andAnswer(() -> { for (ConsumerRecord rec : preexistingRecords) capturedConsumedCallback.getValue().onCompletion(null, rec); return null; - }).when(configLog).start(); - + }); for (Map.Entry deserializationEntry : deserializations.entrySet()) { // Note null schema because default settings for internal serialization are schema-less - when(converter.toConnectData(TOPIC, deserializationEntry.getKey())) - .thenReturn(new SchemaAndValue(null, structToMap(deserializationEntry.getValue()))); + EasyMock.expect(converter.toConnectData(EasyMock.eq(TOPIC), EasyMock.aryEq(deserializationEntry.getKey()))) + .andReturn(new SchemaAndValue(null, structToMap(deserializationEntry.getValue()))); } } - // Expect a conversion & write to the underlying log, followed by a subsequent read when the data is consumed back - // from the log. Validate the data that is captured when the conversion is performed matches the specified data - // (by checking a single field's value) - private void expectConvertWriteRead2(final String configKey, final Schema valueSchema, final byte[] serialized, - final Struct value) throws Exception { - doReturn(serialized).when(converter).fromConnectData(eq(TOPIC), eq(valueSchema), eq(value)); - doReturn(producerFuture).when(configLog).sendWithReceipt(eq(configKey), eq(serialized)); - doReturn(null).when(producerFuture).get(anyLong(), any(TimeUnit.class)); - doReturn(new SchemaAndValue(null, structToMap(value))).when(converter).toConnectData(eq(TOPIC), eq(serialized)); - } - - // Expect a conversion & write to the underlying log, followed by a subsequent read when the data is consumed back - // from the log. Validate the data that is captured when the conversion is performed matches the specified data - // (by checking a single field's value) - private void expectConvertWriteRead(final String configKey, final Schema valueSchema, final byte[] serialized, - final String dataFieldName, final Object dataFieldValue) throws Exception { - final ArgumentCaptor capturedRecord = ArgumentCaptor.forClass(Struct.class); - when(converter.fromConnectData(eq(TOPIC), eq(valueSchema), capturedRecord.capture())).thenReturn(serialized); - when(configLog.sendWithReceipt(configKey, serialized)).thenReturn(producerFuture); - when(producerFuture.get(anyLong(), any(TimeUnit.class))).thenReturn(null); - when(converter.toConnectData(TOPIC, serialized)).thenAnswer(invocation -> { - assertEquals(dataFieldValue, capturedRecord.getValue().get(dataFieldName)); - // Note null schema because default settings for internal serialization are schema-less - return new SchemaAndValue(null, structToMap(capturedRecord.getValue())); - }); + private void expectStop() { + storeLog.stop(); + PowerMock.expectLastCall(); } private void expectRead(LinkedHashMap serializedValues, Map deserializedValues) { + expectReadToEnd(serializedValues); for (Map.Entry deserializedValueEntry : deserializedValues.entrySet()) { byte[] serializedValue = serializedValues.get(deserializedValueEntry.getKey()); - when(converter.toConnectData(TOPIC, serializedValue)) - .thenReturn(new SchemaAndValue(null, structToMap(deserializedValueEntry.getValue()))); + EasyMock.expect(converter.toConnectData(EasyMock.eq(TOPIC), EasyMock.aryEq(serializedValue))) + .andReturn(new SchemaAndValue(null, structToMap(deserializedValueEntry.getValue()))); } } @@ -1664,33 +642,66 @@ private void expectRead(final String key, final byte[] serializedValue, Struct d expectRead(serializedData, Collections.singletonMap(key, deserializedValue)); } + // Expect a conversion & write to the underlying log, followed by a subsequent read when the data is consumed back + // from the log. Validate the data that is captured when the conversion is performed matches the specified data + // (by checking a single field's value) + private void expectConvertWriteRead(final String configKey, final Schema valueSchema, final byte[] serialized, + final String dataFieldName, final Object dataFieldValue) throws Exception { + final Capture capturedRecord = EasyMock.newCapture(); + if (serialized != null) + EasyMock.expect(converter.fromConnectData(EasyMock.eq(TOPIC), EasyMock.eq(valueSchema), EasyMock.capture(capturedRecord))) + .andReturn(serialized); + + storeLog.sendWithReceipt(EasyMock.eq(configKey), EasyMock.aryEq(serialized)); + EasyMock.expectLastCall().andReturn(producerFuture); + + producerFuture.get(EasyMock.anyLong(), EasyMock.anyObject()); + EasyMock.expectLastCall().andReturn(null); + + EasyMock.expect(converter.toConnectData(EasyMock.eq(TOPIC), EasyMock.aryEq(serialized))) + .andAnswer(() -> { + if (dataFieldName != null) + assertEquals(dataFieldValue, capturedRecord.getValue().get(dataFieldName)); + // Note null schema because default settings for internal serialization are schema-less + return new SchemaAndValue(null, serialized == null ? null : structToMap(capturedRecord.getValue())); + }); + } + // This map needs to maintain ordering - private Answer> expectReadToEnd(final Map serializedConfigs) { - return invocation -> { - for (Map.Entry entry : serializedConfigs.entrySet()) { - capturedConsumedCallback.getValue().onCompletion(null, - new ConsumerRecord<>(TOPIC, 0, logOffset++, 0L, TimestampType.CREATE_TIME, 0, 0, + private void expectReadToEnd(final LinkedHashMap serializedConfigs) { + EasyMock.expect(storeLog.readToEnd()) + .andAnswer(() -> { + TestFuture future = new TestFuture<>(); + for (Map.Entry entry : serializedConfigs.entrySet()) { + capturedConsumedCallback.getValue().onCompletion(null, + new ConsumerRecord<>(TOPIC, 0, logOffset++, 0L, TimestampType.CREATE_TIME, 0, 0, entry.getKey(), entry.getValue(), new RecordHeaders(), Optional.empty())); - } - CompletableFuture f = new CompletableFuture<>(); - f.complete(null); - return f; - }; + } + future.resolveOnGet((Void) null); + return future; + }); + } + + // Manually insert a connector into config storage, updating the task configs, connector config, and root config + private void whiteboxAddConnector(String connectorName, Map connectorConfig, List> taskConfigs) { + Map> storageTaskConfigs = Whitebox.getInternalState(configStorage, "taskConfigs"); + for (int i = 0; i < taskConfigs.size(); i++) + storageTaskConfigs.put(new ConnectorTaskId(connectorName, i), taskConfigs.get(i)); + + Map> connectorConfigs = Whitebox.getInternalState(configStorage, "connectorConfigs"); + connectorConfigs.put(connectorName, connectorConfig); + + Whitebox.>getInternalState(configStorage, "connectorTaskCounts").put(connectorName, taskConfigs.size()); } // Generates a Map representation of Struct. Only does shallow traversal, so nested structs are not converted private Map structToMap(Struct struct) { if (struct == null) return null; - Map result = new HashMap<>(); - for (Field field : struct.schema().fields()) result.put(field.name(), struct.get(field)); - return result; - } - private void addConnector(String connectorName, Map connectorConfig, List> taskConfigs) { - for (int i = 0; i < taskConfigs.size(); i++) - configStorage.taskConfigs.put(new ConnectorTaskId(connectorName, i), taskConfigs.get(i)); - configStorage.connectorConfigs.put(connectorName, connectorConfig); - configStorage.connectorTaskCounts.put(connectorName, taskConfigs.size()); + HashMap result = new HashMap<>(); + for (Field field : struct.schema().fields()) + result.put(field.name(), struct.get(field)); + return result; } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java index fe97476a7128e..f959d225377e9 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java @@ -50,6 +50,7 @@ import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; import org.apache.kafka.common.network.ListenerName; import org.apache.kafka.common.serialization.ByteArraySerializer; +import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.connect.errors.ConnectException; @@ -111,7 +112,7 @@ public class EmbeddedKafkaCluster { // Kafka Config private final KafkaServer[] brokers; private final Properties brokerConfig; - private final Time time = Time.SYSTEM; + private final Time time = new MockTime(); private final int[] currentBrokerPorts; private final String[] currentBrokerLogDirs; private final boolean hasListenerConfig; @@ -610,19 +611,6 @@ public ConsumerRecords consumeAll( return new ConsumerRecords<>(records); } - public long endOffset(TopicPartition topicPartition) throws TimeoutException, InterruptedException, ExecutionException { - try (Admin admin = createAdminClient()) { - Map offsets = Collections.singletonMap( - topicPartition, OffsetSpec.latest() - ); - return admin.listOffsets(offsets) - .partitionResult(topicPartition) - // Hardcode duration for now; if necessary, we can add a parameter for it later - .get(10, TimeUnit.SECONDS) - .offset(); - } - } - /** * List all the known partitions for the given {@link Collection} of topics * @param maxDurationMs the max duration to wait for while fetching metadata from Kafka (in milliseconds). diff --git a/core/src/main/java/kafka/server/MetadataVersionConfigValidator.java b/core/src/main/java/kafka/server/MetadataVersionConfigValidator.java deleted file mode 100644 index 042ac09452f5e..0000000000000 --- a/core/src/main/java/kafka/server/MetadataVersionConfigValidator.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server; - -import org.apache.kafka.image.MetadataDelta; -import org.apache.kafka.image.MetadataImage; -import org.apache.kafka.image.loader.LoaderManifest; -import org.apache.kafka.image.publisher.MetadataPublisher; -import org.apache.kafka.server.common.MetadataVersion; -import org.apache.kafka.server.fault.FaultHandler; - -public class MetadataVersionConfigValidator implements MetadataPublisher { - private final String name; - private final KafkaConfig config; - private final FaultHandler faultHandler; - - public MetadataVersionConfigValidator( - KafkaConfig config, - FaultHandler faultHandler - ) { - int id = config.brokerId(); - this.name = "MetadataVersionPublisher(id=" + id + ")"; - this.config = config; - this.faultHandler = faultHandler; - } - - @Override - public String name() { - return name; - } - - @Override - public void onMetadataUpdate( - MetadataDelta delta, - MetadataImage newImage, - LoaderManifest manifest - ) { - if (delta.featuresDelta() != null) { - if (delta.metadataVersionChanged().isPresent()) { - onMetadataVersionChanged(newImage.features().metadataVersion()); - } - } - } - - private void onMetadataVersionChanged(MetadataVersion metadataVersion) { - try { - this.config.validateWithMetadataVersion(metadataVersion); - } catch (Throwable t) { - RuntimeException exception = this.faultHandler.handleFault( - "Broker configuration does not support the cluster MetadataVersion", t); - if (exception != null) { - throw exception; - } - } - } -} diff --git a/core/src/main/java/kafka/server/SharePartition.java b/core/src/main/java/kafka/server/SharePartition.java deleted file mode 100644 index ddbdb89930c5c..0000000000000 --- a/core/src/main/java/kafka/server/SharePartition.java +++ /dev/null @@ -1,436 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.server; - -import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.message.ShareFetchResponseData.AcquiredRecords; -import org.apache.kafka.common.utils.Time; -import org.apache.kafka.server.util.timer.Timer; -import org.apache.kafka.storage.internals.log.FetchPartitionData; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.NavigableMap; -import java.util.Objects; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -/** - * The SharePartition is used to track the state of a partition that is shared between multiple - * consumers. The class maintains the state of the records that have been fetched from the leader - * and are in-flight. - */ -public class SharePartition { - - private final static Logger log = LoggerFactory.getLogger(SharePartition.class); - - /** - * empty member id used to indicate when a record is not acquired by any member. - */ - final static String EMPTY_MEMBER_ID = Uuid.ZERO_UUID.toString(); - - /** - * The RecordState is used to track the state of a record that has been fetched from the leader. - * The state of the records determines if the records should be re-delivered, move the next fetch - * offset, or be state persisted to disk. - */ - public enum RecordState { - AVAILABLE((byte) 0), - ACQUIRED((byte) 1), - ACKNOWLEDGED((byte) 2), - ARCHIVED((byte) 4); - - public final byte id; - - RecordState(byte id) { - this.id = id; - } - - /** - * Validates that the newState is one of the valid transition from the current - * {@code RecordState}. - * - * @param newState State into which requesting to transition; must be non-null - * - * @return {@code RecordState} newState if validation succeeds. Returning - * newState helps state assignment chaining. - * - * @throws IllegalStateException if the state transition validation fails. - */ - public RecordState validateTransition(RecordState newState) throws IllegalStateException { - Objects.requireNonNull(newState, "newState cannot be null"); - if (this == newState) { - throw new IllegalStateException("The state transition is invalid as the new state is" - + "the same as the current state"); - } - - if (this == ACKNOWLEDGED || this == ARCHIVED) { - throw new IllegalStateException("The state transition is invalid from the current state: " + this); - } - - if (this == AVAILABLE && newState != ACQUIRED) { - throw new IllegalStateException("The state can only be transitioned to ACQUIRED from AVAILABLE"); - } - - // Either the transition is from Available -> Acquired or from Acquired -> Available/ - // Acknowledged/Archived. - return newState; - } - - public static RecordState forId(byte id) { - switch (id) { - case 0: - return AVAILABLE; - case 1: - return ACQUIRED; - case 2: - return ACKNOWLEDGED; - case 4: - return ARCHIVED; - default: - throw new IllegalArgumentException("Unknown record state id: " + id); - } - } - } - - /** - * The group id of the share partition belongs to. - */ - private final String groupId; - - /** - * The topic id partition of the share partition. - */ - private final TopicIdPartition topicIdPartition; - - /** - * The in-flight record is used to track the state of a record that has been fetched from the - * leader. The state of the record is used to determine if the record should be re-fetched or if it - * can be acknowledged or archived. Once share partition start offset is moved then the in-flight - * records prior to the start offset are removed from the cache. The cache holds data against the - * first offset of the in-flight batch. - */ - private final NavigableMap cachedState; - - /** - * The lock is used to synchronize access to the in-flight records. The lock is used to ensure that - * the in-flight records are accessed in a thread-safe manner. - */ - private final ReadWriteLock lock; - - /** - * The find next fetch offset is used to indicate if the next fetch offset should be recomputed. - */ - private final AtomicBoolean findNextFetchOffset; - - /** - * The lock to ensure that the same share partition does not enter a fetch queue - * while another one is being fetched within the queue. - */ - private final AtomicBoolean fetchLock; - - /** - * The max in-flight messages is used to limit the number of records that can be in-flight at any - * given time. The max in-flight messages is used to prevent the consumer from fetching too many - * records from the leader and running out of memory. - */ - private final int maxInFlightMessages; - - /** - * The max delivery count is used to limit the number of times a record can be delivered to the - * consumer. The max delivery count is used to prevent the consumer re-delivering the same record - * indefinitely. - */ - private final int maxDeliveryCount; - - /** - * The record lock duration is used to limit the duration for which a consumer can acquire a record. - * Once this time period is elapsed, the record will be made available or archived depending on the delivery count. - */ - private final int recordLockDurationMs; - - /** - * Timer is used to implement acquisition lock on records that guarantees the movement of records from - * acquired to available/archived state upon timeout - */ - private final Timer timer; - - /** - * Time is used to get the currentTime. - */ - private final Time time; - - /** - * The share partition start offset specifies the partition start offset from which the records - * are cached in the cachedState of the sharePartition. - */ - private long startOffset; - - /** - * The share partition end offset specifies the partition end offset from which the records - * are already fetched. - */ - private long endOffset; - - /** - * The state epoch is used to track the version of the state of the share partition. - */ - private int stateEpoch; - - /** - * The replica manager is used to get the earliest offset of the share partition, so we can adjust the start offset. - */ - private final ReplicaManager replicaManager; - - SharePartition( - String groupId, - TopicIdPartition topicIdPartition, - int maxInFlightMessages, - int maxDeliveryCount, - int recordLockDurationMs, - Timer timer, - Time time, - ReplicaManager replicaManager - ) { - this.groupId = groupId; - this.topicIdPartition = topicIdPartition; - this.maxInFlightMessages = maxInFlightMessages; - this.maxDeliveryCount = maxDeliveryCount; - this.cachedState = new ConcurrentSkipListMap<>(); - this.lock = new ReentrantReadWriteLock(); - this.findNextFetchOffset = new AtomicBoolean(false); - this.fetchLock = new AtomicBoolean(false); - this.recordLockDurationMs = recordLockDurationMs; - this.timer = timer; - this.time = time; - this.replicaManager = replicaManager; - // Initialize the partition. - initialize(); - } - - /** - * The next fetch offset is used to determine the next offset that should be fetched from the leader. - * The offset should be the next offset after the last fetched batch but there could be batches/ - * offsets that are either released by acknowledgement API or lock timed out hence the next fetch - * offset might be different from the last batch next offset. Hence, method checks if the next - * fetch offset should be recomputed else returns the last computed next fetch offset. - * - * @return The next fetch offset that should be fetched from the leader. - */ - public long nextFetchOffset() { - // TODO: Implement the logic to compute the next fetch offset. - return 0; - } - - /** - * Acquire the fetched records for the share partition. The acquired records are added to the - * in-flight records and the next fetch offset is updated to the next offset that should be - * fetched from the leader. - * - * @param memberId The member id of the client that is fetching the record. - * @param fetchPartitionData The fetched records for the share partition. - * - * @return A future which is completed when the records are acquired. - */ - public CompletableFuture> acquire( - String memberId, - FetchPartitionData fetchPartitionData - ) { - log.trace("Received acquire request for share partition: {}-{}", memberId, fetchPartitionData); - - CompletableFuture> future = new CompletableFuture<>(); - future.completeExceptionally(new UnsupportedOperationException("Not implemented")); - - return future; - } - - /** - * Acknowledge the fetched records for the share partition. The accepted batches are removed - * from the in-flight records once persisted. The next fetch offset is updated to the next offset - * that should be fetched from the leader, if required. - * - * @param memberId The member id of the client that is fetching the record. - * @param acknowledgementBatch The acknowledgement batch list for the share partition. - * - * @return A future which is completed when the records are acknowledged. - */ - public CompletableFuture> acknowledge( - String memberId, - List acknowledgementBatch - ) { - log.trace("Acknowledgement batch request for share partition: {}-{}", groupId, topicIdPartition); - - CompletableFuture> future = new CompletableFuture<>(); - future.completeExceptionally(new UnsupportedOperationException("Not implemented")); - - return future; - } - - /** - * Release the acquired records for the share partition. The next fetch offset is updated to the next offset - * that should be fetched from the leader. - * - * @param memberId The member id of the client whose records shall be released. - * - * @return A future which is completed when the records are released. - */ - public CompletableFuture> releaseAcquiredRecords(String memberId) { - log.trace("Release acquired records request for share partition: {}-{}-{}", groupId, memberId, topicIdPartition); - - CompletableFuture> future = new CompletableFuture<>(); - future.completeExceptionally(new UnsupportedOperationException("Not implemented")); - - return future; - } - - private void initialize() { - // Initialize the partition. - log.debug("Initializing share partition: {}-{}", groupId, topicIdPartition); - - // TODO: Provide implementation to initialize the share partition. - } - - /** - * The InFlightBatch maintains the in-memory state of the fetched records i.e. in-flight records. - */ - private static class InFlightBatch { - /** - * The offset of the first record in the batch that is fetched from the log. - */ - private final long firstOffset; - /** - * The last offset of the batch that is fetched from the log. - */ - private final long lastOffset; - /** - * The in-flight state of the fetched records. If the offset state map is empty then inflightState - * determines the state of the complete batch else individual offset determines the state of - * the respective records. - */ - private InFlightState inFlightState; - - InFlightBatch(String memberId, long firstOffset, long lastOffset, RecordState state, int deliveryCount) { - this.firstOffset = firstOffset; - this.lastOffset = lastOffset; - this.inFlightState = new InFlightState(state, deliveryCount, memberId); - } - - @Override - public String toString() { - return "InFlightBatch(" + - " firstOffset=" + firstOffset + - ", lastOffset=" + lastOffset + - ", inFlightState=" + inFlightState + - ")"; - } - } - - /** - * The InFlightState is used to track the state and delivery count of a record that has been - * fetched from the leader. The state of the record is used to determine if the record should - * be re-deliver or if it can be acknowledged or archived. - */ - private static class InFlightState { - /** - * The state of the fetch batch records. - */ - private RecordState state; - /** - * The number of times the records has been delivered to the client. - */ - private int deliveryCount; - /** - * The member id of the client that is fetching/acknowledging the record. - */ - private String memberId; - - InFlightState(RecordState state, int deliveryCount, String memberId) { - this.state = state; - this.deliveryCount = deliveryCount; - this.memberId = memberId; - } - - @Override - public int hashCode() { - return Objects.hash(state, deliveryCount, memberId); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - InFlightState that = (InFlightState) o; - return state == that.state && deliveryCount == that.deliveryCount && memberId.equals(that.memberId); - } - - @Override - public String toString() { - return "InFlightState(" + - " state=" + state.toString() + - ", deliveryCount=" + deliveryCount + - ", memberId=" + memberId + - ")"; - } - } - - /** - * The AcknowledgementBatch containing the fields required to acknowledge the fetched records. - */ - public static class AcknowledgementBatch { - - private final long firstOffset; - private final long lastOffset; - private final List acknowledgeTypes; - - public AcknowledgementBatch(long firstOffset, long lastOffset, List acknowledgeTypes) { - this.firstOffset = firstOffset; - this.lastOffset = lastOffset; - this.acknowledgeTypes = acknowledgeTypes; - } - - public long firstOffset() { - return firstOffset; - } - - public long lastOffset() { - return lastOffset; - } - - public List acknowledgeTypes() { - return acknowledgeTypes; - } - - @Override - public String toString() { - return "AcknowledgementBatch(" + - " firstOffset=" + firstOffset + - ", lastOffset=" + lastOffset + - ", acknowledgeTypes=" + ((acknowledgeTypes == null) ? "" : acknowledgeTypes) + - ")"; - } - } -} diff --git a/core/src/main/java/kafka/server/SharePartitionManager.java b/core/src/main/java/kafka/server/SharePartitionManager.java deleted file mode 100644 index ec4af0265adb3..0000000000000 --- a/core/src/main/java/kafka/server/SharePartitionManager.java +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.server; - -import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.message.ShareAcknowledgeResponseData; -import org.apache.kafka.common.message.ShareFetchResponseData; -import org.apache.kafka.common.utils.Time; -import org.apache.kafka.storage.internals.log.FetchParams; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * The SharePartitionManager is responsible for managing the SharePartitions and ShareSessions. - * It is responsible for fetching messages from the log and acknowledging the messages. - */ -public class SharePartitionManager implements AutoCloseable { - - private final static Logger log = LoggerFactory.getLogger(SharePartitionManager.class); - - /** - * The partition cache map is used to store the SharePartition objects for each share group topic-partition. - */ - private final Map partitionCacheMap; - - /** - * The replica manager is used to fetch messages from the log. - */ - private final ReplicaManager replicaManager; - - /** - * The time instance is used to get the current time. - */ - private final Time time; - - /** - * The share session cache stores the share sessions. - */ - private final ShareSessionCache cache; - - /** - * The fetch queue stores the share fetch requests that are waiting to be processed. - */ - private final ConcurrentLinkedQueue fetchQueue; - - /** - * The process fetch queue lock is used to ensure that only one thread is processing the fetch queue at a time. - */ - private final AtomicBoolean processFetchQueueLock; - - /** - * The record lock duration is the time in milliseconds that a record lock is held for. - */ - private final int recordLockDurationMs; - - /** - * The max in flight messages is the maximum number of messages that can be in flight at any one time per share-partition. - */ - private final int maxInFlightMessages; - - /** - * The max delivery count is the maximum number of times a message can be delivered before it is considered to be archived. - */ - private final int maxDeliveryCount; - - public SharePartitionManager( - ReplicaManager replicaManager, - Time time, - ShareSessionCache cache, - int recordLockDurationMs, - int maxDeliveryCount, - int maxInFlightMessages - ) { - this(replicaManager, time, cache, new ConcurrentHashMap<>(), recordLockDurationMs, maxDeliveryCount, maxInFlightMessages); - } - - SharePartitionManager( - ReplicaManager replicaManager, - Time time, - ShareSessionCache cache, - Map partitionCacheMap, - int recordLockDurationMs, - int maxDeliveryCount, - int maxInFlightMessages - ) { - this.replicaManager = replicaManager; - this.time = time; - this.cache = cache; - this.partitionCacheMap = partitionCacheMap; - this.fetchQueue = new ConcurrentLinkedQueue<>(); - this.processFetchQueueLock = new AtomicBoolean(false); - this.recordLockDurationMs = recordLockDurationMs; - this.maxDeliveryCount = maxDeliveryCount; - this.maxInFlightMessages = maxInFlightMessages; - } - - /** - * The fetch messages method is used to fetch messages from the log for the specified topic-partitions. - * The method returns a future that will be completed with the fetched messages. - * - * @param groupId The group id, this is used to identify the share group. - * @param memberId The member id, generated by the group-coordinator, this is used to identify the client. - * @param fetchParams The fetch parameters from the share fetch request. - * @param topicIdPartitions The topic-partitions to fetch messages for. - * @param partitionMaxBytes The maximum number of bytes to fetch for each partition. - * - * @return A future that will be completed with the fetched messages. - */ - public CompletableFuture> fetchMessages( - String groupId, - String memberId, - FetchParams fetchParams, - List topicIdPartitions, - Map partitionMaxBytes - ) { - log.trace("Fetch request for topicIdPartitions: {} with groupId: {} fetch params: {}", - topicIdPartitions, groupId, fetchParams); - - CompletableFuture> future = new CompletableFuture<>(); - future.completeExceptionally(new UnsupportedOperationException("Not implemented yet")); - - return future; - } - - /** - * The acknowledge method is used to acknowledge the messages that have been fetched. - * The method returns a future that will be completed with the acknowledge response. - * - * @param memberId The member id, generated by the group-coordinator, this is used to identify the client. - * @param groupId The group id, this is used to identify the share group. - * @param acknowledgeTopics The acknowledge topics and their corresponding acknowledge batches. - * - * @return A future that will be completed with the acknowledge response. - */ - public CompletableFuture> acknowledge( - String memberId, - String groupId, - Map> acknowledgeTopics - ) { - log.trace("Acknowledge request for topicIdPartitions: {} with groupId: {}", - acknowledgeTopics.keySet(), groupId); - - CompletableFuture> future = new CompletableFuture<>(); - future.completeExceptionally(new UnsupportedOperationException("Not implemented yet")); - - return future; - } - - /** - * The release acquired records method is used to release the acquired records for the specified topic-partitions. - * The method returns a future that will be completed with the release response. - * - * @param groupId The group id, this is used to identify the share group. - * @param memberId The member id, generated by the group-coordinator, this is used to identify the client. - * @param topicIdPartitions The topic-partitions to release the acquired records for. - * - * @return A future that will be completed with the release response. - */ - public CompletableFuture> releaseAcquiredRecords( - String groupId, - String memberId, - List topicIdPartitions - ) { - log.trace("Release acquired records request for topicIdPartitions: {} with groupId: {}", - topicIdPartitions, groupId); - - CompletableFuture> future = new CompletableFuture<>(); - future.completeExceptionally(new UnsupportedOperationException("Not implemented yet")); - - return future; - } - - @Override - public void close() throws Exception { - // TODO: Provide Implementation - } - - /** - * The SharePartitionKey is used to uniquely identify a share partition. The key is made up of the - * share group id, the topic id and the partition id. The key is used to store the SharePartition - * objects in the partition cache map. - */ - private static class SharePartitionKey { - - } - - /** - * Caches share sessions. - *

- * See tryEvict for an explanation of the cache eviction strategy. - *

- * The ShareSessionCache is thread-safe because all of its methods are synchronized. - * Note that individual share sessions have their own locks which are separate from the - * ShareSessionCache lock. In order to avoid deadlock, the ShareSessionCache lock - * must never be acquired while an individual ShareSession lock is already held. - */ - public static class ShareSessionCache { - // TODO: Provide Implementation - } - - /** - * The ShareFetchPartitionData class is used to store the fetch parameters for a share fetch request. - */ - private static class ShareFetchPartitionData { - // TODO: Provide Implementation - } -} diff --git a/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java b/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java index 5660772360418..6ffd741f4fc64 100644 --- a/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java +++ b/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java @@ -179,7 +179,7 @@ public KafkaApis build() { if (metrics == null) throw new RuntimeException("You must set metrics"); if (quotas == null) throw new RuntimeException("You must set quotas"); if (fetchManager == null) throw new RuntimeException("You must set fetchManager"); - if (brokerTopicStats == null) brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig().isRemoteStorageSystemEnabled()); + if (brokerTopicStats == null) brokerTopicStats = new BrokerTopicStats(config.isRemoteLogStorageSystemEnabled()); if (apiVersionManager == null) throw new RuntimeException("You must set apiVersionManager"); return new KafkaApis(requestChannel, diff --git a/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java b/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java index 7cac33200d2c0..82aa75909abba 100644 --- a/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java +++ b/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java @@ -185,7 +185,7 @@ public ReplicaManager build() { if (metadataCache == null) throw new RuntimeException("You must set metadataCache"); if (logDirFailureChannel == null) throw new RuntimeException("You must set logDirFailureChannel"); if (alterPartitionManager == null) throw new RuntimeException("You must set alterIsrManager"); - if (brokerTopicStats == null) brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig().isRemoteStorageSystemEnabled()); + if (brokerTopicStats == null) brokerTopicStats = new BrokerTopicStats(config.isRemoteLogStorageSystemEnabled()); // Initialize metrics in the end just before passing it to ReplicaManager to ensure ReplicaManager closes the // metrics correctly. There might be a resource leak if it is initialized and an exception occurs between // its initialization and creation of ReplicaManager. diff --git a/core/src/main/scala/kafka/coordinator/group/GroupMetadataManager.scala b/core/src/main/scala/kafka/coordinator/group/GroupMetadataManager.scala index b5fddac6f4070..e89876e443ce3 100644 --- a/core/src/main/scala/kafka/coordinator/group/GroupMetadataManager.scala +++ b/core/src/main/scala/kafka/coordinator/group/GroupMetadataManager.scala @@ -27,7 +27,7 @@ import java.util.concurrent.ConcurrentHashMap import com.yammer.metrics.core.Gauge import kafka.common.OffsetAndMetadata import kafka.coordinator.group.GroupMetadataManager.maybeConvertOffsetCommitError -import kafka.server.{ReplicaManager, RequestLocal} +import kafka.server.{LogAppendResult, ReplicaManager, RequestLocal} import kafka.utils.CoreUtils.inLock import kafka.utils.Implicits._ import kafka.utils._ @@ -377,13 +377,14 @@ class GroupMetadataManager(brokerId: Int, } private def createPutCacheCallback(isTxnOffsetCommit: Boolean, - group: GroupMetadata, - consumerId: String, - offsetMetadata: immutable.Map[TopicIdPartition, OffsetAndMetadata], - filteredOffsetMetadata: Map[TopicIdPartition, OffsetAndMetadata], - responseCallback: immutable.Map[TopicIdPartition, Errors] => Unit, - producerId: Long, - records: Map[TopicPartition, MemoryRecords]): Map[TopicPartition, PartitionResponse] => Unit = { + group: GroupMetadata, + consumerId: String, + offsetMetadata: immutable.Map[TopicIdPartition, OffsetAndMetadata], + filteredOffsetMetadata: Map[TopicIdPartition, OffsetAndMetadata], + responseCallback: immutable.Map[TopicIdPartition, Errors] => Unit, + producerId: Long, + records: Map[TopicPartition, MemoryRecords], + preAppendErrors: Map[TopicPartition, LogAppendResult] = Map.empty): Map[TopicPartition, PartitionResponse] => Unit = { val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, partitionFor(group.groupId)) // set the callback function to insert offsets into cache after log append completed def putCacheCallback(responseStatus: Map[TopicPartition, PartitionResponse]): Unit = { @@ -434,6 +435,8 @@ class GroupMetadataManager(brokerId: Int, val commitStatus = offsetMetadata.map { case (topicIdPartition, offsetAndMetadata) => if (!validateOffsetMetadataLength(offsetAndMetadata.metadata)) (topicIdPartition, Errors.OFFSET_METADATA_TOO_LARGE) + else if (preAppendErrors.contains(topicIdPartition.topicPartition)) + (topicIdPartition, preAppendErrors(topicIdPartition.topicPartition).error) else (topicIdPartition, responseError) } diff --git a/core/src/main/scala/kafka/log/LogLoader.scala b/core/src/main/scala/kafka/log/LogLoader.scala index b3b0ec2c63362..b0f1fdd0e1ca2 100644 --- a/core/src/main/scala/kafka/log/LogLoader.scala +++ b/core/src/main/scala/kafka/log/LogLoader.scala @@ -173,14 +173,14 @@ class LogLoader( } } - leaderEpochCache.ifPresent(_.truncateFromEndAsyncFlush(nextOffset)) + leaderEpochCache.ifPresent(_.truncateFromEnd(nextOffset)) val newLogStartOffset = if (isRemoteLogEnabled) { logStartOffsetCheckpoint } else { math.max(logStartOffsetCheckpoint, segments.firstSegment.get.baseOffset) } // The earliest leader epoch may not be flushed during a hard failure. Recover it here. - leaderEpochCache.ifPresent(_.truncateFromStartAsyncFlush(logStartOffsetCheckpoint)) + leaderEpochCache.ifPresent(_.truncateFromStart(logStartOffsetCheckpoint)) // Any segment loading or recovery code must not use producerStateManager, so that we can build the full state here // from scratch. diff --git a/core/src/main/scala/kafka/log/LogManager.scala b/core/src/main/scala/kafka/log/LogManager.scala index 8908aadf45930..3bc6533117cba 100755 --- a/core/src/main/scala/kafka/log/LogManager.scala +++ b/core/src/main/scala/kafka/log/LogManager.scala @@ -1562,7 +1562,7 @@ object LogManager { keepPartitionMetadataFile: Boolean): LogManager = { val defaultProps = config.extractLogConfigMap - LogConfig.validateBrokerLogConfigValues(defaultProps, config.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + LogConfig.validateBrokerLogConfigValues(defaultProps, config.isRemoteLogStorageSystemEnabled) val defaultLogConfig = new LogConfig(defaultProps) val cleanerConfig = LogCleaner.cleanerConfig(config) @@ -1586,7 +1586,7 @@ object LogManager { time = time, keepPartitionMetadataFile = keepPartitionMetadataFile, interBrokerProtocolVersion = config.interBrokerProtocolVersion, - remoteStorageSystemEnable = config.remoteLogManagerConfig.isRemoteStorageSystemEnabled(), + remoteStorageSystemEnable = config.remoteLogManagerConfig.enableRemoteStorageSystem(), initialTaskDelayMs = config.logInitialTaskDelayMs) } @@ -1604,16 +1604,11 @@ object LogManager { newTopicsImage: TopicsImage, log: UnifiedLog ): Boolean = { - if (log.topicId.isEmpty) { - // Missing topic ID could result from storage failure or unclean shutdown after topic creation but before flushing - // data to the `partition.metadata` file. And before appending data to the log, the `partition.metadata` is always - // flushed to disk. So if the topic ID is missing, it mostly means no data was appended, and we can treat this as - // a stray log. - info(s"The topicId does not exist in $log, treat it as a stray log") - return true + val topicId = log.topicId.getOrElse { + throw new RuntimeException(s"The log dir $log does not have a topic ID, " + + "which is not allowed when running in KRaft mode.") } - val topicId = log.topicId.get val partitionId = log.topicPartition.partition() Option(newTopicsImage.getPartition(topicId, partitionId)) match { case Some(partition) => diff --git a/core/src/main/scala/kafka/log/UnifiedLog.scala b/core/src/main/scala/kafka/log/UnifiedLog.scala index bef18806b0dc8..ba1c8656a8848 100644 --- a/core/src/main/scala/kafka/log/UnifiedLog.scala +++ b/core/src/main/scala/kafka/log/UnifiedLog.scala @@ -521,8 +521,7 @@ class UnifiedLog(@volatile var logStartOffset: Long, } private def initializeLeaderEpochCache(): Unit = lock synchronized { - leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - dir, topicPartition, logDirFailureChannel, recordVersion, logIdent, leaderEpochCache, scheduler) + leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache(dir, topicPartition, logDirFailureChannel, recordVersion, logIdent) } private def updateHighWatermarkWithLogEndOffset(): Unit = { @@ -1016,7 +1015,7 @@ class UnifiedLog(@volatile var logStartOffset: Long, updatedLogStartOffset = true updateLogStartOffset(newLogStartOffset) info(s"Incremented log start offset to $newLogStartOffset due to $reason") - leaderEpochCache.foreach(_.truncateFromStartAsyncFlush(logStartOffset)) + leaderEpochCache.foreach(_.truncateFromStart(logStartOffset)) producerStateManager.onLogStartOffsetIncremented(newLogStartOffset) maybeIncrementFirstUnstableOffset() } @@ -1814,7 +1813,7 @@ class UnifiedLog(@volatile var logStartOffset: Long, // and inserted the first start offset entry, but then failed to append any entries // before another leader was elected. lock synchronized { - leaderEpochCache.foreach(_.truncateFromEndAsyncFlush(logEndOffset)) + leaderEpochCache.foreach(_.truncateFromEnd(logEndOffset)) } false @@ -1827,7 +1826,7 @@ class UnifiedLog(@volatile var logStartOffset: Long, } else { val deletedSegments = localLog.truncateTo(targetOffset) deleteProducerSnapshots(deletedSegments, asyncDelete = true) - leaderEpochCache.foreach(_.truncateFromEndAsyncFlush(targetOffset)) + leaderEpochCache.foreach(_.truncateFromEnd(targetOffset)) logStartOffset = math.min(targetOffset, logStartOffset) rebuildProducerState(targetOffset, producerStateManager) if (highWatermark >= localLog.logEndOffset) @@ -2012,17 +2011,12 @@ object UnifiedLog extends Logging { Files.createDirectories(dir.toPath) val topicPartition = UnifiedLog.parseTopicPartitionName(dir) val segments = new LogSegments(topicPartition) - // The created leaderEpochCache will be truncated by LogLoader if necessary - // so it is guaranteed that the epoch entries will be correct even when on-disk - // checkpoint was stale (due to async nature of LeaderEpochFileCache#truncateFromStart/End). val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( dir, topicPartition, logDirFailureChannel, config.recordVersion, - s"[UnifiedLog partition=$topicPartition, dir=${dir.getParent}] ", - None, - scheduler) + s"[UnifiedLog partition=$topicPartition, dir=${dir.getParent}] ") val producerStateManager = new ProducerStateManager(topicPartition, dir, maxTransactionTimeoutMs, producerStateManagerConfig, time) val isRemoteLogEnabled = UnifiedLog.isRemoteLogEnabled(remoteStorageSystemEnable, config, topicPartition.topic) @@ -2109,8 +2103,7 @@ object UnifiedLog extends Logging { } /** - * If the recordVersion is >= RecordVersion.V2, create a new LeaderEpochFileCache instance. - * Loading the epoch entries from the backing checkpoint file or the provided currentCache if not empty. + * If the recordVersion is >= RecordVersion.V2, then create and return a LeaderEpochFileCache. * Otherwise, the message format is considered incompatible and the existing LeaderEpoch file * is deleted. * @@ -2119,29 +2112,33 @@ object UnifiedLog extends Logging { * @param logDirFailureChannel The LogDirFailureChannel to asynchronously handle log dir failure * @param recordVersion The record version * @param logPrefix The logging prefix - * @param currentCache The current LeaderEpochFileCache instance (if any) - * @param scheduler The scheduler for executing asynchronous tasks * @return The new LeaderEpochFileCache instance (if created), none otherwise */ def maybeCreateLeaderEpochCache(dir: File, topicPartition: TopicPartition, logDirFailureChannel: LogDirFailureChannel, recordVersion: RecordVersion, - logPrefix: String, - currentCache: Option[LeaderEpochFileCache], - scheduler: Scheduler): Option[LeaderEpochFileCache] = { + logPrefix: String): Option[LeaderEpochFileCache] = { val leaderEpochFile = LeaderEpochCheckpointFile.newFile(dir) + def newLeaderEpochFileCache(): LeaderEpochFileCache = { + val checkpointFile = new LeaderEpochCheckpointFile(leaderEpochFile, logDirFailureChannel) + new LeaderEpochFileCache(topicPartition, checkpointFile) + } + if (recordVersion.precedes(RecordVersion.V2)) { - if (leaderEpochFile.exists()) { + val currentCache = if (leaderEpochFile.exists()) + Some(newLeaderEpochFileCache()) + else + None + + if (currentCache.exists(_.nonEmpty)) warn(s"${logPrefix}Deleting non-empty leader epoch cache due to incompatible message format $recordVersion") - } + Files.deleteIfExists(leaderEpochFile.toPath) None } else { - val checkpointFile = new LeaderEpochCheckpointFile(leaderEpochFile, logDirFailureChannel) - currentCache.map(_.withCheckpoint(checkpointFile)) - .orElse(Some(new LeaderEpochFileCache(topicPartition, checkpointFile, scheduler))) + Some(newLeaderEpochFileCache()) } } diff --git a/core/src/main/scala/kafka/metrics/LinuxIoMetricsCollector.scala b/core/src/main/scala/kafka/metrics/LinuxIoMetricsCollector.scala new file mode 100644 index 0000000000000..3b4950948b3d2 --- /dev/null +++ b/core/src/main/scala/kafka/metrics/LinuxIoMetricsCollector.scala @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.metrics + +import java.nio.file.{Files, Path, Paths} +import org.apache.kafka.common.utils.Time +import org.slf4j.Logger + +import java.nio.charset.StandardCharsets +import scala.jdk.CollectionConverters._ + +/** + * Retrieves Linux /proc/self/io metrics. + */ +class LinuxIoMetricsCollector(procRoot: String, val time: Time, val logger: Logger) { + import LinuxIoMetricsCollector._ + private var lastUpdateMs = -1L + private var cachedReadBytes = 0L + private var cachedWriteBytes = 0L + val path: Path = Paths.get(procRoot, "self", "io") + + def readBytes(): Long = this.synchronized { + val curMs = time.milliseconds() + if (curMs != lastUpdateMs) { + updateValues(curMs) + } + cachedReadBytes + } + + def writeBytes(): Long = this.synchronized { + val curMs = time.milliseconds() + if (curMs != lastUpdateMs) { + updateValues(curMs) + } + cachedWriteBytes + } + + /** + * Read /proc/self/io. + * + * Generally, each line in this file contains a prefix followed by a colon and a number. + * + * For example, it might contain this: + * rchar: 4052 + * wchar: 0 + * syscr: 13 + * syscw: 0 + * read_bytes: 0 + * write_bytes: 0 + * cancelled_write_bytes: 0 + */ + private def updateValues(now: Long): Boolean = this.synchronized { + try { + cachedReadBytes = -1 + cachedWriteBytes = -1 + val lines = Files.readAllLines(path, StandardCharsets.UTF_8).asScala + lines.foreach(line => { + if (line.startsWith(READ_BYTES_PREFIX)) { + cachedReadBytes = line.substring(READ_BYTES_PREFIX.length).toLong + } else if (line.startsWith(WRITE_BYTES_PREFIX)) { + cachedWriteBytes = line.substring(WRITE_BYTES_PREFIX.length).toLong + } + }) + lastUpdateMs = now + true + } catch { + case t: Throwable => + logger.warn("Unable to update IO metrics", t) + false + } + } + + def usable(): Boolean = { + if (path.toFile.exists()) { + updateValues(time.milliseconds()) + } else { + logger.debug(s"disabling IO metrics collection because $path does not exist.") + false + } + } +} + +object LinuxIoMetricsCollector { + private val READ_BYTES_PREFIX = "read_bytes: " + private val WRITE_BYTES_PREFIX = "write_bytes: " +} diff --git a/core/src/main/scala/kafka/network/RequestConvertToJson.scala b/core/src/main/scala/kafka/network/RequestConvertToJson.scala index b4883f0b08b61..54986f52c85a3 100644 --- a/core/src/main/scala/kafka/network/RequestConvertToJson.scala +++ b/core/src/main/scala/kafka/network/RequestConvertToJson.scala @@ -103,9 +103,6 @@ object RequestConvertToJson { case req: UpdateMetadataRequest => UpdateMetadataRequestDataJsonConverter.write(req.data, request.version) case req: VoteRequest => VoteRequestDataJsonConverter.write(req.data, request.version) case req: WriteTxnMarkersRequest => WriteTxnMarkersRequestDataJsonConverter.write(req.data, request.version) - case req: AddRaftVoterRequest => AddRaftVoterRequestDataJsonConverter.write(req.data, request.version) - case req: RemoveRaftVoterRequest => RemoveRaftVoterRequestDataJsonConverter.write(req.data, request.version) - case req: UpdateRaftVoterRequest => UpdateRaftVoterRequestDataJsonConverter.write(req.data, request.version) case _ => throw new IllegalStateException(s"ApiKey ${request.apiKey} is not currently handled in `request`, the " + "code should be updated to do so.") } @@ -189,9 +186,6 @@ object RequestConvertToJson { case res: UpdateMetadataResponse => UpdateMetadataResponseDataJsonConverter.write(res.data, version) case res: VoteResponse => VoteResponseDataJsonConverter.write(res.data, version) case res: WriteTxnMarkersResponse => WriteTxnMarkersResponseDataJsonConverter.write(res.data, version) - case res: AddRaftVoterResponse => AddRaftVoterResponseDataJsonConverter.write(res.data, version) - case res: RemoveRaftVoterResponse => RemoveRaftVoterResponseDataJsonConverter.write(res.data, version) - case res: UpdateRaftVoterResponse => UpdateRaftVoterResponseDataJsonConverter.write(res.data, version) case _ => throw new IllegalStateException(s"ApiKey ${response.apiKey} is not currently handled in `response`, the " + "code should be updated to do so.") } diff --git a/core/src/main/scala/kafka/raft/RaftManager.scala b/core/src/main/scala/kafka/raft/RaftManager.scala index 48646e0b4d7d4..6bf8bd893ba70 100644 --- a/core/src/main/scala/kafka/raft/RaftManager.scala +++ b/core/src/main/scala/kafka/raft/RaftManager.scala @@ -23,7 +23,6 @@ import java.nio.file.Paths import java.util.OptionalInt import java.util.concurrent.CompletableFuture import java.util.{Map => JMap} -import java.util.{Collection => JCollection} import kafka.log.LogManager import kafka.log.UnifiedLog import kafka.server.KafkaConfig @@ -31,10 +30,7 @@ import kafka.utils.CoreUtils import kafka.utils.FileLock import kafka.utils.Logging import org.apache.kafka.clients.{ApiVersions, ManualMetadataUpdater, NetworkClient} -import org.apache.kafka.common.KafkaException -import org.apache.kafka.common.Node -import org.apache.kafka.common.TopicPartition -import org.apache.kafka.common.Uuid +import org.apache.kafka.common.{KafkaException, Node, TopicPartition, Uuid} import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.network.{ChannelBuilders, ListenerName, NetworkReceive, Selectable, Selector} @@ -137,7 +133,7 @@ trait RaftManager[T] { def replicatedLog: ReplicatedLog - def voterNode(id: Int, listener: ListenerName): Option[Node] + def voterNode(id: Int, listener: String): Option[Node] } class KafkaRaftManager[T]( @@ -151,7 +147,6 @@ class KafkaRaftManager[T]( metrics: Metrics, threadNamePrefixOpt: Option[String], val controllerQuorumVotersFuture: CompletableFuture[JMap[Integer, InetSocketAddress]], - bootstrapServers: JCollection[InetSocketAddress], fatalFaultHandler: FaultHandler ) extends RaftManager[T] with Logging { @@ -190,6 +185,7 @@ class KafkaRaftManager[T]( def startup(): Unit = { client.initialize( controllerQuorumVotersFuture.get(), + config.controllerListenerNames.head, new FileQuorumStateStore(new File(dataDir, FileQuorumStateStore.DEFAULT_FILE_NAME)), metrics ) @@ -232,15 +228,14 @@ class KafkaRaftManager[T]( expirationService, logContext, clusterId, - bootstrapServers, raftConfig ) client } private def buildNetworkChannel(): KafkaNetworkChannel = { - val (listenerName, netClient) = buildNetworkClient() - new KafkaNetworkChannel(time, listenerName, netClient, config.quorumRequestTimeoutMs, threadNamePrefix) + val netClient = buildNetworkClient() + new KafkaNetworkChannel(time, netClient, config.quorumRequestTimeoutMs, threadNamePrefix) } private def createDataDir(): File = { @@ -259,7 +254,7 @@ class KafkaRaftManager[T]( ) } - private def buildNetworkClient(): (ListenerName, NetworkClient) = { + private def buildNetworkClient(): NetworkClient = { val controllerListenerName = new ListenerName(config.controllerListenerNames.head) val controllerSecurityProtocol = config.effectiveListenerSecurityProtocolMap.getOrElse( controllerListenerName, @@ -297,7 +292,7 @@ class KafkaRaftManager[T]( val reconnectBackoffMsMs = 500 val discoverBrokerVersions = true - val networkClient = new NetworkClient( + new NetworkClient( selector, new ManualMetadataUpdater(), clientId, @@ -314,15 +309,13 @@ class KafkaRaftManager[T]( apiVersions, logContext ) - - (controllerListenerName, networkClient) } override def leaderAndEpoch: LeaderAndEpoch = { client.leaderAndEpoch } - override def voterNode(id: Int, listener: ListenerName): Option[Node] = { + override def voterNode(id: Int, listener: String): Option[Node] = { client.voterNode(id, listener).toScala } } diff --git a/core/src/main/scala/kafka/server/BrokerServer.scala b/core/src/main/scala/kafka/server/BrokerServer.scala index 31db58c077889..112a03c50a9a4 100644 --- a/core/src/main/scala/kafka/server/BrokerServer.scala +++ b/core/src/main/scala/kafka/server/BrokerServer.scala @@ -37,7 +37,7 @@ import org.apache.kafka.common.utils.{LogContext, Time} import org.apache.kafka.common.{ClusterResource, TopicPartition, Uuid} import org.apache.kafka.coordinator.group.metrics.{GroupCoordinatorMetrics, GroupCoordinatorRuntimeMetrics} import org.apache.kafka.coordinator.group.{CoordinatorRecord, GroupCoordinator, GroupCoordinatorConfig, GroupCoordinatorService, CoordinatorRecordSerde} -import org.apache.kafka.image.publisher.{BrokerRegistrationTracker, MetadataPublisher} +import org.apache.kafka.image.publisher.MetadataPublisher import org.apache.kafka.metadata.{BrokerState, ListenerInfo, VersionRange} import org.apache.kafka.security.CredentialProvider import org.apache.kafka.server.{AssignmentsManager, ClientMetricsManager, NodeToControllerChannelManager} @@ -139,8 +139,6 @@ class BrokerServer( var brokerMetadataPublisher: BrokerMetadataPublisher = _ - var brokerRegistrationTracker: BrokerRegistrationTracker = _ - val brokerFeatures: BrokerFeatures = BrokerFeatures.createDefault(config.unstableFeatureVersionsEnabled) def kafkaYammerMetrics: KafkaYammerMetrics = KafkaYammerMetrics.INSTANCE @@ -186,7 +184,7 @@ class BrokerServer( kafkaScheduler.startup() /* register broker metrics */ - brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + brokerTopicStats = new BrokerTopicStats(config.isRemoteLogStorageSystemEnabled) quotaManagers = QuotaFactory.instantiate(config, metrics, time, s"broker-${config.nodeId}-") @@ -447,7 +445,6 @@ class BrokerServer( rlm.startup() } - metadataPublishers.add(new MetadataVersionConfigValidator(config, sharedServer.metadataPublishingFaultHandler)) brokerMetadataPublisher = new BrokerMetadataPublisher(config, metadataCache, logManager, @@ -481,13 +478,10 @@ class BrokerServer( authorizer ), sharedServer.initialBrokerMetadataLoadFaultHandler, - sharedServer.metadataPublishingFaultHandler + sharedServer.metadataPublishingFaultHandler, + lifecycleManager ) metadataPublishers.add(brokerMetadataPublisher) - brokerRegistrationTracker = new BrokerRegistrationTracker(config.brokerId, - () => lifecycleManager.resendBrokerRegistrationUnlessZkMode()) - metadataPublishers.add(brokerRegistrationTracker) - // Register parts of the broker that can be reconfigured via dynamic configs. This needs to // be done before we publish the dynamic configs, so that we don't miss anything. @@ -619,7 +613,7 @@ class BrokerServer( } protected def createRemoteLogManager(): Option[RemoteLogManager] = { - if (config.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) { + if (config.remoteLogManagerConfig.enableRemoteStorageSystem()) { Some(new RemoteLogManager(config.remoteLogManagerConfig, config.brokerId, config.logDirs.head, clusterId, time, (tp: TopicPartition) => logManager.getLog(tp).asJava, (tp: TopicPartition, remoteLogStartOffset: java.lang.Long) => { diff --git a/core/src/main/scala/kafka/server/ConfigHandler.scala b/core/src/main/scala/kafka/server/ConfigHandler.scala index e441b30a3bd27..1d5702e76e49d 100644 --- a/core/src/main/scala/kafka/server/ConfigHandler.scala +++ b/core/src/main/scala/kafka/server/ConfigHandler.scala @@ -70,7 +70,7 @@ class TopicConfigHandler(private val replicaManager: ReplicaManager, val logs = logManager.logsByTopic(topic) val wasRemoteLogEnabledBeforeUpdate = logs.exists(_.remoteLogEnabled()) - logManager.updateTopicConfig(topic, props, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + logManager.updateTopicConfig(topic, props, kafkaConfig.isRemoteLogStorageSystemEnabled) maybeBootstrapRemoteLogComponents(topic, logs, wasRemoteLogEnabledBeforeUpdate) } diff --git a/core/src/main/scala/kafka/server/ControllerApis.scala b/core/src/main/scala/kafka/server/ControllerApis.scala index f4c0ba89f81f3..e81c37f96b74f 100644 --- a/core/src/main/scala/kafka/server/ControllerApis.scala +++ b/core/src/main/scala/kafka/server/ControllerApis.scala @@ -129,9 +129,6 @@ class ControllerApis( case ApiKeys.DESCRIBE_CLUSTER => handleDescribeCluster(request) case ApiKeys.CONTROLLER_REGISTRATION => handleControllerRegistration(request) case ApiKeys.ASSIGN_REPLICAS_TO_DIRS => handleAssignReplicasToDirs(request) - case ApiKeys.ADD_RAFT_VOTER => handleAddRaftVoter(request) - case ApiKeys.REMOVE_RAFT_VOTER => handleRemoveRaftVoter(request) - case ApiKeys.UPDATE_RAFT_VOTER => handleUpdateRaftVoter(request) case _ => throw new ApiException(s"Unsupported ApiKey ${request.context.header.apiKey}") } @@ -1083,19 +1080,4 @@ class ControllerApis( requestThrottleMs => new AssignReplicasToDirsResponse(reply.setThrottleTimeMs(requestThrottleMs))) } } - - def handleAddRaftVoter(request: RequestChannel.Request): CompletableFuture[Unit] = { - authHelper.authorizeClusterOperation(request, ALTER) - throw new UnsupportedVersionException("handleAddRaftVoter is not supported yet.") - } - - def handleRemoveRaftVoter(request: RequestChannel.Request): CompletableFuture[Unit] = { - authHelper.authorizeClusterOperation(request, ALTER) - throw new UnsupportedVersionException("handleRemoveRaftVoter is not supported yet.") - } - - def handleUpdateRaftVoter(request: RequestChannel.Request): CompletableFuture[Unit] = { - authHelper.authorizeClusterOperation(request, CLUSTER_ACTION) - throw new UnsupportedVersionException("handleUpdateRaftVoter is not supported yet.") - } } diff --git a/core/src/main/scala/kafka/server/ControllerConfigurationValidator.scala b/core/src/main/scala/kafka/server/ControllerConfigurationValidator.scala index b99065b573ee8..15eb1eff04aa3 100644 --- a/core/src/main/scala/kafka/server/ControllerConfigurationValidator.scala +++ b/core/src/main/scala/kafka/server/ControllerConfigurationValidator.scala @@ -107,8 +107,7 @@ class ControllerConfigurationValidator(kafkaConfig: KafkaConfig) extends Configu throw new InvalidConfigurationException("Null value not supported for topic configs: " + nullTopicConfigs.mkString(",")) } - LogConfig.validate(properties, kafkaConfig.extractLogConfigMap, - kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + LogConfig.validate(properties, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) case BROKER => validateBrokerName(resource.name()) case CLIENT_METRICS => val properties = new Properties() diff --git a/core/src/main/scala/kafka/server/ControllerServer.scala b/core/src/main/scala/kafka/server/ControllerServer.scala index 7c1f694b08030..58a033ac63839 100644 --- a/core/src/main/scala/kafka/server/ControllerServer.scala +++ b/core/src/main/scala/kafka/server/ControllerServer.scala @@ -17,6 +17,7 @@ package kafka.server +import kafka.metrics.LinuxIoMetricsCollector import kafka.migration.MigrationPropagator import kafka.network.{DataPlaneAcceptor, SocketServer} import kafka.raft.KafkaRaftManager @@ -47,7 +48,7 @@ import org.apache.kafka.server.authorizer.Authorizer import org.apache.kafka.server.config.ServerLogConfigs.{ALTER_CONFIG_POLICY_CLASS_NAME_CONFIG, CREATE_TOPIC_POLICY_CLASS_NAME_CONFIG} import org.apache.kafka.server.common.ApiMessageAndVersion import org.apache.kafka.server.config.ConfigType -import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics, LinuxIoMetricsCollector} +import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics} import org.apache.kafka.server.network.{EndpointReadyFutures, KafkaAuthorizerServerInfo} import org.apache.kafka.server.policy.{AlterConfigPolicy, CreateTopicPolicy} import org.apache.kafka.server.util.{Deadline, FutureUtils} @@ -153,7 +154,7 @@ class ControllerServer( metricsGroup.newGauge("ClusterId", () => clusterId) metricsGroup.newGauge("yammer-metrics-count", () => KafkaYammerMetrics.defaultRegistry.allMetrics.size) - linuxIoMetricsCollector = new LinuxIoMetricsCollector("/proc", time) + linuxIoMetricsCollector = new LinuxIoMetricsCollector("/proc", time, logger.underlying) if (linuxIoMetricsCollector.usable()) { metricsGroup.newGauge("linux-disk-read-bytes", () => linuxIoMetricsCollector.readBytes()) metricsGroup.newGauge("linux-disk-write-bytes", () => linuxIoMetricsCollector.writeBytes()) diff --git a/core/src/main/scala/kafka/server/DelayedRemoteFetch.scala b/core/src/main/scala/kafka/server/DelayedRemoteFetch.scala index 58a866aa4a63f..00d6afb89ffe2 100644 --- a/core/src/main/scala/kafka/server/DelayedRemoteFetch.scala +++ b/core/src/main/scala/kafka/server/DelayedRemoteFetch.scala @@ -35,13 +35,12 @@ import scala.collection._ class DelayedRemoteFetch(remoteFetchTask: Future[Void], remoteFetchResult: CompletableFuture[RemoteLogReadResult], remoteFetchInfo: RemoteStorageFetchInfo, - remoteFetchMaxWaitMs: Long, fetchPartitionStatus: Seq[(TopicIdPartition, FetchPartitionStatus)], fetchParams: FetchParams, localReadResults: Seq[(TopicIdPartition, LogReadResult)], replicaManager: ReplicaManager, responseCallback: Seq[(TopicIdPartition, FetchPartitionData)] => Unit) - extends DelayedOperation(remoteFetchMaxWaitMs) { + extends DelayedOperation(fetchParams.maxWaitMs) { if (fetchParams.isFromFollower) { throw new IllegalStateException(s"The follower should not invoke remote fetch. Fetch params are: $fetchParams") diff --git a/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala b/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala index 22576bdceb6fd..c9bb2e3b4ffda 100755 --- a/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala +++ b/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala @@ -1203,7 +1203,6 @@ class DynamicRemoteLogConfig(server: KafkaBroker) extends BrokerReconfigurable w object DynamicRemoteLogConfig { val ReconfigurableConfigs = Set( - RemoteLogManagerConfig.REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_PROP, - RemoteLogManagerConfig.REMOTE_FETCH_MAX_WAIT_MS_PROP + RemoteLogManagerConfig.REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_PROP ) } diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala index 5fbf62ea4db2f..b76ebff59cb78 100644 --- a/core/src/main/scala/kafka/server/KafkaApis.scala +++ b/core/src/main/scala/kafka/server/KafkaApis.scala @@ -256,10 +256,6 @@ class KafkaApis(val requestChannel: RequestChannel, case ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS => handleGetTelemetrySubscriptionsRequest(request) case ApiKeys.PUSH_TELEMETRY => handlePushTelemetryRequest(request) case ApiKeys.LIST_CLIENT_METRICS_RESOURCES => handleListClientMetricsResources(request) - case ApiKeys.ADD_RAFT_VOTER => forwardToControllerOrFail(request) - case ApiKeys.REMOVE_RAFT_VOTER => forwardToControllerOrFail(request) - case ApiKeys.SHARE_FETCH => handleShareFetchRequest(request) - case ApiKeys.SHARE_ACKNOWLEDGE => handleShareAcknowledgeRequest(request) case _ => throw new IllegalStateException(s"No handler for request api key ${request.header.apiKey}") } } catch { @@ -3833,7 +3829,6 @@ class KafkaApis(val requestChannel: RequestChannel, def handleConsumerGroupDescribe(request: RequestChannel.Request): CompletableFuture[Unit] = { val consumerGroupDescribeRequest = request.body[ConsumerGroupDescribeRequest] - val includeAuthorizedOperations = consumerGroupDescribeRequest.data.includeAuthorizedOperations if (!isConsumerGroupProtocolEnabled()) { // The API is not supported by the "old" group coordinator (the default). If the @@ -3862,17 +3857,6 @@ class KafkaApis(val requestChannel: RequestChannel, if (exception != null) { requestHelper.sendMaybeThrottle(request, consumerGroupDescribeRequest.getErrorResponse(exception)) } else { - if (includeAuthorizedOperations) { - results.forEach { groupResult => - if (groupResult.errorCode == Errors.NONE.code) { - groupResult.setAuthorizedOperations(authHelper.authorizedOperations( - request, - new Resource(ResourceType.GROUP, groupResult.groupId) - )) - } - } - } - if (response.groups.isEmpty) { // If the response is empty, we can directly reuse the results. response.setGroups(results) @@ -3943,20 +3927,6 @@ class KafkaApis(val requestChannel: RequestChannel, } } - def handleShareFetchRequest(request: RequestChannel.Request): Unit = { - val shareFetchRequest = request.body[ShareFetchRequest] - // TODO: Implement the ShareFetchRequest handling - requestHelper.sendMaybeThrottle(request, shareFetchRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) - CompletableFuture.completedFuture[Unit](()) - } - - def handleShareAcknowledgeRequest(request: RequestChannel.Request): Unit = { - val shareAcknowledgeRequest = request.body[ShareAcknowledgeRequest] - // TODO: Implement the ShareAcknowledgeRequest handling - requestHelper.sendMaybeThrottle(request, shareAcknowledgeRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) - CompletableFuture.completedFuture[Unit](()) - } - private def updateRecordConversionStats(request: RequestChannel.Request, tp: TopicPartition, conversionStats: RecordValidationStats): Unit = { diff --git a/core/src/main/scala/kafka/server/KafkaBroker.scala b/core/src/main/scala/kafka/server/KafkaBroker.scala index 9e1ee3d694195..b88a56e378d5e 100644 --- a/core/src/main/scala/kafka/server/KafkaBroker.scala +++ b/core/src/main/scala/kafka/server/KafkaBroker.scala @@ -20,6 +20,7 @@ package kafka.server import com.yammer.metrics.core.MetricName import kafka.log.LogManager import kafka.log.remote.RemoteLogManager +import kafka.metrics.LinuxIoMetricsCollector import kafka.network.SocketServer import kafka.utils.Logging import org.apache.kafka.common.ClusterResource @@ -33,7 +34,7 @@ import org.apache.kafka.metadata.BrokerState import org.apache.kafka.security.CredentialProvider import org.apache.kafka.server.NodeToControllerChannelManager import org.apache.kafka.server.authorizer.Authorizer -import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics, LinuxIoMetricsCollector} +import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics} import org.apache.kafka.server.util.Scheduler import java.time.Duration @@ -114,7 +115,7 @@ trait KafkaBroker extends Logging { metricsGroup.newGauge("ClusterId", () => clusterId) metricsGroup.newGauge("yammer-metrics-count", () => KafkaYammerMetrics.defaultRegistry.allMetrics.size) - private val linuxIoMetricsCollector = new LinuxIoMetricsCollector("/proc", Time.SYSTEM) + private val linuxIoMetricsCollector = new LinuxIoMetricsCollector("/proc", Time.SYSTEM, logger.underlying) if (linuxIoMetricsCollector.usable()) { metricsGroup.newGauge("linux-disk-read-bytes", () => linuxIoMetricsCollector.readBytes()) diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index 309f182cad919..77a0a8554bc6d 100755 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -37,7 +37,7 @@ import org.apache.kafka.common.utils.Utils import org.apache.kafka.coordinator.group.ConsumerGroupMigrationPolicy import org.apache.kafka.coordinator.group.Group.GroupType import org.apache.kafka.coordinator.group.GroupCoordinatorConfig -import org.apache.kafka.coordinator.group.api.assignor.ConsumerGroupPartitionAssignor +import org.apache.kafka.coordinator.group.assignor.ConsumerGroupPartitionAssignor import org.apache.kafka.coordinator.transaction.{TransactionLogConfigs, TransactionStateManagerConfigs} import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.raft.QuorumConfig @@ -456,7 +456,6 @@ object KafkaConfig { /** ********* Raft Quorum Configuration *********/ .define(QuorumConfig.QUORUM_VOTERS_CONFIG, LIST, QuorumConfig.DEFAULT_QUORUM_VOTERS, new QuorumConfig.ControllerQuorumVotersValidator(), HIGH, QuorumConfig.QUORUM_VOTERS_DOC) - .define(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, LIST, QuorumConfig.DEFAULT_QUORUM_BOOTSTRAP_SERVERS, new QuorumConfig.ControllerQuorumBootstrapServersValidator(), HIGH, QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_DOC) .define(QuorumConfig.QUORUM_ELECTION_TIMEOUT_MS_CONFIG, INT, QuorumConfig.DEFAULT_QUORUM_ELECTION_TIMEOUT_MS, null, HIGH, QuorumConfig.QUORUM_ELECTION_TIMEOUT_MS_DOC) .define(QuorumConfig.QUORUM_FETCH_TIMEOUT_MS_CONFIG, INT, QuorumConfig.DEFAULT_QUORUM_FETCH_TIMEOUT_MS, null, HIGH, QuorumConfig.QUORUM_FETCH_TIMEOUT_MS_DOC) .define(QuorumConfig.QUORUM_ELECTION_BACKOFF_MAX_MS_CONFIG, INT, QuorumConfig.DEFAULT_QUORUM_ELECTION_BACKOFF_MAX_MS, null, HIGH, QuorumConfig.QUORUM_ELECTION_BACKOFF_MAX_MS_DOC) @@ -472,7 +471,7 @@ object KafkaConfig { } /** ********* Remote Log Management Configuration *********/ - RemoteLogManagerConfig.configDef().configKeys().values().forEach(key => configDef.define(key)) + RemoteLogManagerConfig.CONFIG_DEF.configKeys().values().forEach(key => configDef.define(key)) def configNames: Seq[String] = configDef.names.asScala.toBuffer.sorted private[server] def defaultValues: Map[String, _] = configDef.defaultValues.asScala @@ -607,7 +606,7 @@ class KafkaConfig private(doLog: Boolean, val props: java.util.Map[_, _], dynami val zkEnableSecureAcls: Boolean = getBoolean(ZkConfigs.ZK_ENABLE_SECURE_ACLS_CONFIG) val zkMaxInFlightRequests: Int = getInt(ZkConfigs.ZK_MAX_IN_FLIGHT_REQUESTS_CONFIG) - private val _remoteLogManagerConfig = new RemoteLogManagerConfig(props) + private val _remoteLogManagerConfig = new RemoteLogManagerConfig(this) def remoteLogManagerConfig = _remoteLogManagerConfig private def zkBooleanConfigOrSystemPropertyWithDefaultValue(propKey: String): Boolean = { @@ -1089,7 +1088,6 @@ class KafkaConfig private(doLog: Boolean, val props: java.util.Map[_, _], dynami /** ********* Raft Quorum Configuration *********/ val quorumVoters = getList(QuorumConfig.QUORUM_VOTERS_CONFIG) - val quorumBootstrapServers = getList(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG) val quorumElectionTimeoutMs = getInt(QuorumConfig.QUORUM_ELECTION_TIMEOUT_MS_CONFIG) val quorumFetchTimeoutMs = getInt(QuorumConfig.QUORUM_FETCH_TIMEOUT_MS_CONFIG) val quorumElectionBackoffMs = getInt(QuorumConfig.QUORUM_ELECTION_BACKOFF_MAX_MS_CONFIG) @@ -1246,8 +1244,6 @@ class KafkaConfig private(doLog: Boolean, val props: java.util.Map[_, _], dynami def logLocalRetentionMs: java.lang.Long = getLong(RemoteLogManagerConfig.LOG_LOCAL_RETENTION_MS_PROP) - def remoteFetchMaxWaitMs = getInt(RemoteLogManagerConfig.REMOTE_FETCH_MAX_WAIT_MS_PROP) - validateValues() @nowarn("cat=deprecation") @@ -1527,18 +1523,6 @@ class KafkaConfig private(doLog: Boolean, val props: java.util.Map[_, _], dynami } } - /** - * Validate some configurations for new MetadataVersion. A new MetadataVersion can take place when - * a FeatureLevelRecord for "metadata.version" is read from the cluster metadata. - */ - def validateWithMetadataVersion(metadataVersion: MetadataVersion): Unit = { - if (processRoles.contains(ProcessRole.BrokerRole) && logDirs.size > 1) { - require(metadataVersion.isDirectoryAssignmentSupported, - s"Multiple log directories (aka JBOD) are not supported in the current MetadataVersion ${metadataVersion}. " + - s"Need ${MetadataVersion.IBP_3_7_IV2} or higher") - } - } - /** * Copy the subset of properties that are relevant to Logs. The individual properties * are listed here since the names are slightly different in each Config class... diff --git a/core/src/main/scala/kafka/server/KafkaRaftServer.scala b/core/src/main/scala/kafka/server/KafkaRaftServer.scala index ecb757c1a89ba..d3200149f7a39 100644 --- a/core/src/main/scala/kafka/server/KafkaRaftServer.scala +++ b/core/src/main/scala/kafka/server/KafkaRaftServer.scala @@ -71,7 +71,6 @@ class KafkaRaftServer( time, metrics, CompletableFuture.completedFuture(QuorumConfig.parseVoterConnections(config.quorumVoters)), - QuorumConfig.parseBootstrapServers(config.quorumBootstrapServers), new StandardFaultHandlerFactory(), ) diff --git a/core/src/main/scala/kafka/server/KafkaServer.scala b/core/src/main/scala/kafka/server/KafkaServer.scala index 5b6e04e5a0e34..933a5df536a5f 100755 --- a/core/src/main/scala/kafka/server/KafkaServer.scala +++ b/core/src/main/scala/kafka/server/KafkaServer.scala @@ -70,9 +70,9 @@ import java.net.{InetAddress, SocketTimeoutException} import java.nio.file.{Files, Paths} import java.time.Duration import java.util +import java.util.{Optional, OptionalInt, OptionalLong} import java.util.concurrent._ import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger} -import java.util.{Optional, OptionalInt, OptionalLong} import scala.collection.{Map, Seq} import scala.compat.java8.OptionConverters.RichOptionForJava8 import scala.jdk.CollectionConverters._ @@ -276,7 +276,7 @@ class KafkaServer( createCurrentControllerIdMetric() /* register broker metrics */ - _brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + _brokerTopicStats = new BrokerTopicStats(config.isRemoteLogStorageSystemEnabled) quotaManagers = QuotaFactory.instantiate(config, metrics, time, threadNamePrefix.getOrElse("")) KafkaBroker.notifyClusterListeners(clusterId, kafkaMetricsReporters ++ metrics.reporters.asScala) @@ -439,7 +439,6 @@ class KafkaServer( metrics, threadNamePrefix, CompletableFuture.completedFuture(quorumVoters), - QuorumConfig.parseBootstrapServers(config.quorumBootstrapServers), fatalFaultHandler = new LoggingFaultHandler("raftManager", () => shutdown()) ) quorumControllerNodeProvider = RaftControllerNodeProvider(raftManager, config) @@ -690,7 +689,7 @@ class KafkaServer( } protected def createRemoteLogManager(): Option[RemoteLogManager] = { - if (config.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) { + if (config.remoteLogManagerConfig.enableRemoteStorageSystem()) { Some(new RemoteLogManager(config.remoteLogManagerConfig, config.brokerId, config.logDirs.head, clusterId, time, (tp: TopicPartition) => logManager.getLog(tp).asJava, (tp: TopicPartition, remoteLogStartOffset: java.lang.Long) => { diff --git a/core/src/main/scala/kafka/server/NodeToControllerChannelManager.scala b/core/src/main/scala/kafka/server/NodeToControllerChannelManager.scala index a0e4bbbc463e6..0017a5876af13 100644 --- a/core/src/main/scala/kafka/server/NodeToControllerChannelManager.scala +++ b/core/src/main/scala/kafka/server/NodeToControllerChannelManager.scala @@ -112,7 +112,7 @@ class RaftControllerNodeProvider( val saslMechanism: String ) extends ControllerNodeProvider with Logging { - private def idToNode(id: Int): Option[Node] = raftManager.voterNode(id, listenerName) + private def idToNode(id: Int): Option[Node] = raftManager.voterNode(id, listenerName.value()) override def getControllerInfo(): ControllerInformation = ControllerInformation(raftManager.leaderAndEpoch.leaderId.asScala.flatMap(idToNode), diff --git a/core/src/main/scala/kafka/server/ReplicaManager.scala b/core/src/main/scala/kafka/server/ReplicaManager.scala index ec6005890c8df..aa56269a2f40d 100644 --- a/core/src/main/scala/kafka/server/ReplicaManager.scala +++ b/core/src/main/scala/kafka/server/ReplicaManager.scala @@ -1476,9 +1476,9 @@ class ReplicaManager(val config: KafkaConfig, return Some(createLogReadResult(e)) } - val remoteFetchMaxWaitMs = config.remoteFetchMaxWaitMs.toLong - val remoteFetch = new DelayedRemoteFetch(remoteFetchTask, remoteFetchResult, remoteFetchInfo, remoteFetchMaxWaitMs, + val remoteFetch = new DelayedRemoteFetch(remoteFetchTask, remoteFetchResult, remoteFetchInfo, fetchPartitionStatus, params, logReadResults, this, responseCallback) + delayedRemoteFetchPurgatory.tryCompleteElseWatch(remoteFetch, Seq(key)) None } @@ -1754,25 +1754,12 @@ class ReplicaManager(val config: KafkaConfig, createLogReadResult(highWatermark, leaderLogStartOffset, leaderLogEndOffset, new OffsetMovedToTieredStorageException("Given offset" + offset + " is moved to tiered storage")) } else { - val fetchDataInfo = if (remoteLogManager.get.isRemoteLogFetchQuotaExceeded) { - // We do not want to send an exception in a LogReadResult response (like we do in other cases when we send - // UnknownOffsetMetadata), because it is classified as an error in reading the data, and a response is - // immediately sent back to the client. Instead, we want to serve data for the other topic partitions of the - // fetch request via delayed fetch if required (when sending immediate response, we skip delayed fetch). - new FetchDataInfo( - LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, - MemoryRecords.EMPTY, - false, - Optional.empty(), - Optional.empty() - ) - } else { - // For consume fetch requests, create a dummy FetchDataInfo with the remote storage fetch information. - // For the first topic-partition that needs remote data, we will use this information to read the data in another thread. - new FetchDataInfo(new LogOffsetMetadata(offset), MemoryRecords.EMPTY, false, Optional.empty(), - Optional.of(new RemoteStorageFetchInfo(adjustedMaxBytes, minOneMessage, tp.topicPartition(), - fetchInfo, params.isolation, params.hardMaxBytesLimit()))) - } + // For consume fetch requests, create a dummy FetchDataInfo with the remote storage fetch information. + // For the first topic-partition that needs remote data, we will use this information to read the data in another thread. + val fetchDataInfo = + new FetchDataInfo(new LogOffsetMetadata(offset), MemoryRecords.EMPTY, false, Optional.empty(), + Optional.of(new RemoteStorageFetchInfo(adjustedMaxBytes, minOneMessage, tp.topicPartition(), + fetchInfo, params.isolation, params.hardMaxBytesLimit()))) LogReadResult(fetchDataInfo, divergingEpoch = None, diff --git a/core/src/main/scala/kafka/server/SharedServer.scala b/core/src/main/scala/kafka/server/SharedServer.scala index ea92dd61f5fc4..215208f9f631a 100644 --- a/core/src/main/scala/kafka/server/SharedServer.scala +++ b/core/src/main/scala/kafka/server/SharedServer.scala @@ -41,7 +41,6 @@ import java.util.Arrays import java.util.Optional import java.util.concurrent.atomic.AtomicReference import java.util.concurrent.{CompletableFuture, TimeUnit} -import java.util.{Collection => JCollection} import java.util.{Map => JMap} @@ -95,7 +94,6 @@ class SharedServer( val time: Time, private val _metrics: Metrics, val controllerQuorumVotersFuture: CompletableFuture[JMap[Integer, InetSocketAddress]], - val bootstrapServers: JCollection[InetSocketAddress], val faultHandlerFactory: FaultHandlerFactory ) extends Logging { private val logContext: LogContext = new LogContext(s"[SharedServer id=${sharedServerConfig.nodeId}] ") @@ -267,7 +265,6 @@ class SharedServer( metrics, Some(s"kafka-${sharedServerConfig.nodeId}-raft"), // No dash expected at the end controllerQuorumVotersFuture, - bootstrapServers, raftManagerFaultHandler ) raftManager = _raftManager diff --git a/core/src/main/scala/kafka/server/checkpoints/OffsetCheckpointFile.scala b/core/src/main/scala/kafka/server/checkpoints/OffsetCheckpointFile.scala index 084e46c5ef266..de3283d21fd42 100644 --- a/core/src/main/scala/kafka/server/checkpoints/OffsetCheckpointFile.scala +++ b/core/src/main/scala/kafka/server/checkpoints/OffsetCheckpointFile.scala @@ -68,7 +68,7 @@ class OffsetCheckpointFile(val file: File, logDirFailureChannel: LogDirFailureCh def write(offsets: Map[TopicPartition, Long]): Unit = { val list: java.util.List[(TopicPartition, Long)] = new java.util.ArrayList[(TopicPartition, Long)](offsets.size) offsets.foreach(x => list.add(x)) - checkpoint.write(list) + checkpoint.write(list, true) } def read(): Map[TopicPartition, Long] = { diff --git a/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala b/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala index e986f6e61ed13..048a665757b74 100644 --- a/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala +++ b/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala @@ -20,7 +20,7 @@ package kafka.server.metadata import java.util.{OptionalInt, Properties} import kafka.coordinator.transaction.TransactionCoordinator import kafka.log.LogManager -import kafka.server.{KafkaConfig, ReplicaManager, RequestLocal} +import kafka.server.{BrokerLifecycleManager, KafkaConfig, ReplicaManager, RequestLocal} import kafka.utils.Logging import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.TimeoutException @@ -74,6 +74,7 @@ class BrokerMetadataPublisher( aclPublisher: AclPublisher, fatalFaultHandler: FaultHandler, metadataPublishingFaultHandler: FaultHandler, + brokerLifecycleManager: BrokerLifecycleManager, ) extends MetadataPublisher with Logging { logIdent = s"[BrokerMetadataPublisher id=${config.nodeId}] " diff --git a/core/src/main/scala/kafka/tools/StorageTool.scala b/core/src/main/scala/kafka/tools/StorageTool.scala index c9f5139585225..c79548761d082 100644 --- a/core/src/main/scala/kafka/tools/StorageTool.scala +++ b/core/src/main/scala/kafka/tools/StorageTool.scala @@ -45,117 +45,89 @@ import scala.jdk.CollectionConverters._ import scala.collection.mutable.ArrayBuffer object StorageTool extends Logging { - def main(args: Array[String]): Unit = { - var exitCode: Integer = 0 - var message: Option[String] = None try { - exitCode = execute(args) - } catch { - case e: TerseFailure => - exitCode = 1 - message = Some(e.getMessage) - } - message.foreach(System.err.println) - Exit.exit(exitCode, message) - } + val namespace = parseArguments(args) + val command = namespace.getString("command") + val config = Option(namespace.getString("config")).flatMap( + p => Some(new KafkaConfig(Utils.loadProps(p)))) + command match { + case "info" => + val directories = configToLogDirectories(config.get) + val selfManagedMode = configToSelfManagedMode(config.get) + Exit.exit(infoCommand(System.out, selfManagedMode, directories)) + + case "format" => + val directories = configToLogDirectories(config.get) + val clusterId = namespace.getString("cluster_id") + val metaProperties = new MetaProperties.Builder(). + setVersion(MetaPropertiesVersion.V1). + setClusterId(clusterId). + setNodeId(config.get.nodeId). + build() + val metadataRecords : ArrayBuffer[ApiMessageAndVersion] = ArrayBuffer() + val specifiedFeatures: util.List[String] = namespace.getList("feature") + val releaseVersionFlagSpecified = namespace.getString("release_version") != null + if (releaseVersionFlagSpecified && specifiedFeatures != null) { + throw new TerseFailure("Both --release-version and --feature were set. Only one of the two flags can be set.") + } + val featureNamesAndLevelsMap = featureNamesAndLevels(Option(specifiedFeatures).getOrElse(Collections.emptyList).asScala.toList) + val metadataVersion = getMetadataVersion(namespace, featureNamesAndLevelsMap, + Option(config.get.originals.get(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG)).map(_.toString)) + validateMetadataVersion(metadataVersion, config) + // Get all other features, validate, and create records for them + // Use latest default for features if --release-version is not specified + generateFeatureRecords( + metadataRecords, + metadataVersion, + featureNamesAndLevelsMap, + Features.PRODUCTION_FEATURES.asScala.toList, + config.get.unstableFeatureVersionsEnabled, + releaseVersionFlagSpecified + ) + getUserScramCredentialRecords(namespace).foreach(userScramCredentialRecords => { + if (!metadataVersion.isScramSupported) { + throw new TerseFailure(s"SCRAM is only supported in metadata.version ${MetadataVersion.IBP_3_5_IV2} or later.") + } + for (record <- userScramCredentialRecords) { + metadataRecords.append(new ApiMessageAndVersion(record, 0.toShort)) + } + }) - /** - * Executes the command according to the given arguments and returns the appropriate exit code. - * @param args The command line arguments - * @return The exit code - */ - def execute(args: Array[String]): Int = { - val namespace = parseArguments(args) - val command = namespace.getString("command") - val config = Option(namespace.getString("config")).flatMap( - p => Some(new KafkaConfig(Utils.loadProps(p)))) - command match { - case "info" => - val directories = configToLogDirectories(config.get) - val selfManagedMode = configToSelfManagedMode(config.get) - infoCommand(System.out, selfManagedMode, directories) - - case "format" => - runFormatCommand(namespace, config.get) - - case "random-uuid" => - System.out.println(Uuid.randomUuid) - 0 - case _ => - throw new RuntimeException(s"Unknown command $command") - } - } + val bootstrapMetadata = buildBootstrapMetadata(metadataVersion, Some(metadataRecords), "format command") + val ignoreFormatted = namespace.getBoolean("ignore_formatted") + if (!configToSelfManagedMode(config.get)) { + throw new TerseFailure("The kafka configuration file appears to be for " + + "a legacy cluster. Formatting is only supported for clusters in KRaft mode.") + } + Exit.exit(formatCommand(System.out, directories, metaProperties, bootstrapMetadata, + metadataVersion,ignoreFormatted)) - /** - * Validates arguments, configuration, prepares bootstrap metadata and delegates to {{@link formatCommand}}. - * Visible for testing. - * @param namespace Arguments - * @param config The server configuration - * @return The exit code - */ - def runFormatCommand(namespace: Namespace, config: KafkaConfig) = { - val directories = configToLogDirectories(config) - val clusterId = namespace.getString("cluster_id") - val metaProperties = new MetaProperties.Builder(). - setVersion(MetaPropertiesVersion.V1). - setClusterId(clusterId). - setNodeId(config.nodeId). - build() - val metadataRecords : ArrayBuffer[ApiMessageAndVersion] = ArrayBuffer() - val specifiedFeatures: util.List[String] = namespace.getList("feature") - val releaseVersionFlagSpecified = namespace.getString("release_version") != null - if (releaseVersionFlagSpecified && specifiedFeatures != null) { - throw new TerseFailure("Both --release-version and --feature were set. Only one of the two flags can be set.") - } - val featureNamesAndLevelsMap = featureNamesAndLevels(Option(specifiedFeatures).getOrElse(Collections.emptyList).asScala.toList) - val metadataVersion = getMetadataVersion(namespace, featureNamesAndLevelsMap, - Option(config.originals.get(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG)).map(_.toString)) - validateMetadataVersion(metadataVersion, config) - // Get all other features, validate, and create records for them - // Use latest default for features if --release-version is not specified - generateFeatureRecords( - metadataRecords, - metadataVersion, - featureNamesAndLevelsMap, - Features.PRODUCTION_FEATURES.asScala.toList, - config.unstableFeatureVersionsEnabled, - releaseVersionFlagSpecified - ) - getUserScramCredentialRecords(namespace).foreach(userScramCredentialRecords => { - if (!metadataVersion.isScramSupported) { - throw new TerseFailure(s"SCRAM is only supported in metadata.version ${MetadataVersion.IBP_3_5_IV2} or later.") - } - for (record <- userScramCredentialRecords) { - metadataRecords.append(new ApiMessageAndVersion(record, 0.toShort)) + case "random-uuid" => + System.out.println(Uuid.randomUuid) + Exit.exit(0) + + case _ => + throw new RuntimeException(s"Unknown command $command") } - }) - val bootstrapMetadata = buildBootstrapMetadata(metadataVersion, Some(metadataRecords), "format command") - val ignoreFormatted = namespace.getBoolean("ignore_formatted") - if (!configToSelfManagedMode(config)) { - throw new TerseFailure("The kafka configuration file appears to be for " + - "a legacy cluster. Formatting is only supported for clusters in KRaft mode.") + } catch { + case e: TerseFailure => + System.err.println(e.getMessage) + Exit.exit(1, Some(e.getMessage)) } - formatCommand(System.out, directories, metaProperties, bootstrapMetadata, - metadataVersion,ignoreFormatted) } - private def validateMetadataVersion(metadataVersion: MetadataVersion, config: KafkaConfig): Unit = { + private def validateMetadataVersion(metadataVersion: MetadataVersion, config: Option[KafkaConfig]): Unit = { if (!metadataVersion.isKRaftSupported) { throw new TerseFailure(s"Must specify a valid KRaft metadata.version of at least ${MetadataVersion.IBP_3_0_IV0}.") } if (!metadataVersion.isProduction) { - if (config.unstableFeatureVersionsEnabled) { + if (config.get.unstableFeatureVersionsEnabled) { System.out.println(s"WARNING: using pre-production metadata.version $metadataVersion.") } else { throw new TerseFailure(s"The metadata.version $metadataVersion is not ready for production use yet.") } } - try { - config.validateWithMetadataVersion(metadataVersion) - } catch { - case e: IllegalArgumentException => throw new TerseFailure(s"Invalid configuration for metadata version: ${e.getMessage}") - } } private[tools] def generateFeatureRecords(metadataRecords: ArrayBuffer[ApiMessageAndVersion], @@ -530,7 +502,7 @@ object StorageTool extends Logging { metaPropertiesEnsemble.verify(metaProperties.clusterId(), metaProperties.nodeId(), util.EnumSet.noneOf(classOf[VerificationFlag])) - stream.println(s"metaPropertiesEnsemble=$metaPropertiesEnsemble") + System.out.println(s"metaPropertiesEnsemble=$metaPropertiesEnsemble") val copier = new MetaPropertiesEnsemble.Copier(metaPropertiesEnsemble) if (!(ignoreFormatted || copier.logDirProps().isEmpty)) { val firstLogDir = copier.logDirProps().keySet().iterator().next() diff --git a/core/src/main/scala/kafka/tools/TestRaftServer.scala b/core/src/main/scala/kafka/tools/TestRaftServer.scala index 0acae6c5dc3a2..d357ad0bd5635 100644 --- a/core/src/main/scala/kafka/tools/TestRaftServer.scala +++ b/core/src/main/scala/kafka/tools/TestRaftServer.scala @@ -95,7 +95,6 @@ class TestRaftServer( metrics, Some(threadNamePrefix), CompletableFuture.completedFuture(QuorumConfig.parseVoterConnections(config.quorumVoters)), - QuorumConfig.parseBootstrapServers(config.quorumBootstrapServers), new ProcessTerminatingFaultHandler.Builder().build() ) diff --git a/core/src/main/scala/kafka/zk/AdminZkClient.scala b/core/src/main/scala/kafka/zk/AdminZkClient.scala index cd9153c07dc99..efecfe854bbf2 100644 --- a/core/src/main/scala/kafka/zk/AdminZkClient.scala +++ b/core/src/main/scala/kafka/zk/AdminZkClient.scala @@ -163,7 +163,7 @@ class AdminZkClient(zkClient: KafkaZkClient, LogConfig.validate(config, kafkaConfig.map(_.extractLogConfigMap).getOrElse(Collections.emptyMap()), - kafkaConfig.exists(_.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) + kafkaConfig.exists(_.isRemoteLogStorageSystemEnabled)) } private def writeTopicPartitionAssignment(topic: String, replicaAssignment: Map[Int, ReplicaAssignment], @@ -481,7 +481,7 @@ class AdminZkClient(zkClient: KafkaZkClient, // remove the topic overrides LogConfig.validate(configs, kafkaConfig.map(_.extractLogConfigMap).getOrElse(Collections.emptyMap()), - kafkaConfig.exists(_.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) + kafkaConfig.exists(_.isRemoteLogStorageSystemEnabled)) } /** diff --git a/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java b/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java index 83fc5966b49dc..ba1c70eaae5c0 100644 --- a/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java +++ b/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java @@ -30,6 +30,7 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.compress.Compression; +import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.errors.ReplicaNotAvailableException; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.network.ListenerName; @@ -60,15 +61,14 @@ import org.apache.kafka.server.log.remote.storage.RemoteStorageManager.IndexType; import org.apache.kafka.server.metrics.KafkaMetricsGroup; import org.apache.kafka.server.metrics.KafkaYammerMetrics; -import org.apache.kafka.server.util.MockScheduler; -import org.apache.kafka.storage.internals.checkpoint.LeaderEpochCheckpointFile; +import org.apache.kafka.storage.internals.checkpoint.InMemoryLeaderEpochCheckpoint; +import org.apache.kafka.storage.internals.checkpoint.LeaderEpochCheckpoint; import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache; import org.apache.kafka.storage.internals.log.EpochEntry; import org.apache.kafka.storage.internals.log.FetchDataInfo; import org.apache.kafka.storage.internals.log.FetchIsolation; import org.apache.kafka.storage.internals.log.LazyIndex; import org.apache.kafka.storage.internals.log.LogConfig; -import org.apache.kafka.storage.internals.log.LogDirFailureChannel; import org.apache.kafka.storage.internals.log.LogFileUtils; import org.apache.kafka.storage.internals.log.LogSegment; import org.apache.kafka.storage.internals.log.OffsetIndex; @@ -90,19 +90,16 @@ import scala.Option; import scala.collection.JavaConverters; -import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.File; import java.io.InputStream; import java.io.FileInputStream; import java.io.IOException; -import java.io.InputStreamReader; -import java.io.UncheckedIOException; import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -202,23 +199,32 @@ public class RemoteLogManagerTest { private final EpochEntry epochEntry1 = new EpochEntry(1, 100); private final EpochEntry epochEntry2 = new EpochEntry(2, 200); private final List totalEpochEntries = Arrays.asList(epochEntry0, epochEntry1, epochEntry2); - private LeaderEpochCheckpointFile checkpoint; + private final LeaderEpochCheckpoint checkpoint = new LeaderEpochCheckpoint() { + List epochs = Collections.emptyList(); + + @Override + public void write(Collection epochs, boolean ignored) { + this.epochs = new ArrayList<>(epochs); + } + + @Override + public List read() { + return epochs; + } + }; private final AtomicLong currentLogStartOffset = new AtomicLong(0L); private UnifiedLog mockLog = mock(UnifiedLog.class); - private final MockScheduler scheduler = new MockScheduler(time); - @BeforeEach void setUp() throws Exception { - checkpoint = new LeaderEpochCheckpointFile(TestUtils.tempFile(), new LogDirFailureChannel(1)); topicIds.put(leaderTopicIdPartition.topicPartition().topic(), leaderTopicIdPartition.topicId()); topicIds.put(followerTopicIdPartition.topicPartition().topic(), followerTopicIdPartition.topicId()); Properties props = kafka.utils.TestUtils.createDummyBrokerConfig(); props.setProperty(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, "true"); props.setProperty(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP, "100"); remoteLogManagerConfig = createRLMConfig(props); - brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).remoteLogManagerConfig().isRemoteStorageSystemEnabled()); + brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).isRemoteLogStorageSystemEnabled()); remoteLogManager = new RemoteLogManager(remoteLogManagerConfig, brokerId, logDir, clusterId, time, tp -> Optional.of(mockLog), @@ -249,11 +255,13 @@ void tearDown() { @Test void testGetLeaderEpochCheckpoint() { checkpoint.write(totalEpochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); - assertEquals(totalEpochEntries, remoteLogManager.getLeaderEpochEntries(mockLog, 0, 300)); + InMemoryLeaderEpochCheckpoint inMemoryCheckpoint = remoteLogManager.getLeaderEpochCheckpoint(mockLog, 0, 300); + assertEquals(totalEpochEntries, inMemoryCheckpoint.read()); - List epochEntries = remoteLogManager.getLeaderEpochEntries(mockLog, 100, 200); + InMemoryLeaderEpochCheckpoint inMemoryCheckpoint2 = remoteLogManager.getLeaderEpochCheckpoint(mockLog, 100, 200); + List epochEntries = inMemoryCheckpoint2.read(); assertEquals(1, epochEntries.size()); assertEquals(epochEntry1, epochEntries.get(0)); } @@ -265,7 +273,7 @@ void testFindHighestRemoteOffsetOnEmptyRemoteStorage() throws RemoteStorageExce new EpochEntry(1, 500) ); checkpoint.write(totalEpochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); TopicIdPartition tpId = new TopicIdPartition(Uuid.randomUuid(), tp); OffsetAndEpoch offsetAndEpoch = remoteLogManager.findHighestRemoteOffset(tpId, mockLog); @@ -279,7 +287,7 @@ void testFindHighestRemoteOffset() throws RemoteStorageException { new EpochEntry(1, 500) ); checkpoint.write(totalEpochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); TopicIdPartition tpId = new TopicIdPartition(Uuid.randomUuid(), tp); when(remoteLogMetadataManager.highestOffsetForEpoch(eq(tpId), anyInt())).thenAnswer(ans -> { @@ -302,7 +310,7 @@ void testFindHighestRemoteOffsetWithUncleanLeaderElection() throws RemoteStorage new EpochEntry(2, 300) ); checkpoint.write(totalEpochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); TopicIdPartition tpId = new TopicIdPartition(Uuid.randomUuid(), tp); when(remoteLogMetadataManager.highestOffsetForEpoch(eq(tpId), anyInt())).thenAnswer(ans -> { @@ -464,7 +472,7 @@ private void assertCopyExpectedLogSegmentsToRemote(long oldSegmentStartOffset, // leader epoch preparation checkpoint.write(totalEpochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(-1L)); @@ -578,7 +586,7 @@ void testCustomMetadataSizeExceedsLimit() throws Exception { // leader epoch preparation checkpoint.write(totalEpochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(-1L)); @@ -678,7 +686,7 @@ void testRemoteLogManagerTasksAvgIdlePercentAndMetadataCountMetrics() throws Exc // leader epoch preparation checkpoint.write(totalEpochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(0L)); @@ -797,7 +805,7 @@ void testRemoteLogTaskUpdateRemoteLogSegmentMetadataAfterLogDirChanged() throws // leader epoch preparation checkpoint.write(totalEpochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())) .thenReturn(Optional.of(0L)) @@ -911,7 +919,7 @@ void testRemoteLogManagerRemoteMetrics() throws Exception { // leader epoch preparation checkpoint.write(totalEpochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(0L)); @@ -1061,7 +1069,7 @@ void testMetricsUpdateOnCopyLogSegmentsFailure() throws Exception { // leader epoch preparation checkpoint.write(totalEpochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(0L)); @@ -1134,7 +1142,7 @@ void testCopyLogSegmentsToRemoteShouldNotCopySegmentForFollower() throws Excepti // leader epoch preparation checkpoint.write(totalEpochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(0L)); @@ -1170,7 +1178,7 @@ void testRLMTaskDoesNotUploadSegmentsWhenRemoteLogMetadataManagerIsNotInitialize // leader epoch preparation checkpoint.write(totalEpochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); // Throw a retryable exception so indicate that the remote log metadata manager is not initialized yet @@ -1252,7 +1260,9 @@ private void verifyLogSegmentData(LogSegmentData logSegmentData, assertEquals(tempFile.getAbsolutePath(), logSegmentData.logSegment().toAbsolutePath().toString()); assertEquals(mockProducerSnapshotIndex.getAbsolutePath(), logSegmentData.producerSnapshotIndex().toAbsolutePath().toString()); - assertEquals(RemoteLogManager.epochEntriesAsByteBuffer(expectedLeaderEpoch), logSegmentData.leaderEpochIndex()); + InMemoryLeaderEpochCheckpoint inMemoryLeaderEpochCheckpoint = new InMemoryLeaderEpochCheckpoint(); + inMemoryLeaderEpochCheckpoint.write(expectedLeaderEpoch); + assertEquals(inMemoryLeaderEpochCheckpoint.readAsByteBuffer(), logSegmentData.leaderEpochIndex()); } @Test @@ -1371,7 +1381,7 @@ void testFindOffsetByTimestamp() throws IOException, RemoteStorageException { TreeMap validSegmentEpochs = new TreeMap<>(); validSegmentEpochs.put(targetLeaderEpoch, startOffset); - LeaderEpochFileCache leaderEpochFileCache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + LeaderEpochFileCache leaderEpochFileCache = new LeaderEpochFileCache(tp, checkpoint); leaderEpochFileCache.assign(4, 99L); leaderEpochFileCache.assign(5, 99L); leaderEpochFileCache.assign(targetLeaderEpoch, startOffset); @@ -1406,7 +1416,7 @@ void testFindOffsetByTimestampWithInvalidEpochSegments() throws IOException, Rem validSegmentEpochs.put(targetLeaderEpoch - 1, startOffset - 1); // invalid epochs not aligning with leader epoch cache validSegmentEpochs.put(targetLeaderEpoch, startOffset); - LeaderEpochFileCache leaderEpochFileCache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + LeaderEpochFileCache leaderEpochFileCache = new LeaderEpochFileCache(tp, checkpoint); leaderEpochFileCache.assign(4, 99L); leaderEpochFileCache.assign(5, 99L); leaderEpochFileCache.assign(targetLeaderEpoch, startOffset); @@ -1437,7 +1447,7 @@ void testFindOffsetByTimestampWithSegmentNotReady() throws IOException, RemoteSt TreeMap validSegmentEpochs = new TreeMap<>(); validSegmentEpochs.put(targetLeaderEpoch, startOffset); - LeaderEpochFileCache leaderEpochFileCache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + LeaderEpochFileCache leaderEpochFileCache = new LeaderEpochFileCache(tp, checkpoint); leaderEpochFileCache.assign(4, 99L); leaderEpochFileCache.assign(5, 99L); leaderEpochFileCache.assign(targetLeaderEpoch, startOffset); @@ -1897,7 +1907,7 @@ public void testFindLogStartOffset() throws RemoteStorageException, IOException epochEntries.add(new EpochEntry(2, 550L)); checkpoint.write(epochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); long timestamp = time.milliseconds(); @@ -1935,7 +1945,7 @@ public void testFindLogStartOffsetFallbackToLocalLogStartOffsetWhenRemoteIsEmpty epochEntries.add(new EpochEntry(2, 550L)); checkpoint.write(epochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); when(mockLog.localLogStartOffset()).thenReturn(250L); when(remoteLogMetadataManager.listRemoteLogSegments(eq(leaderTopicIdPartition), anyInt())) @@ -1960,7 +1970,7 @@ public void testLogStartOffsetUpdatedOnStartup() throws RemoteStorageException, epochEntries.add(new EpochEntry(2, 550L)); checkpoint.write(epochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); RemoteLogSegmentMetadata metadata = mock(RemoteLogSegmentMetadata.class); @@ -2003,7 +2013,7 @@ public void testDeletionOnRetentionBreachedSegments(long retentionSize, List epochEntries = Collections.singletonList(epochEntry0); checkpoint.write(epochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); @@ -2056,7 +2066,7 @@ public void testRemoteDeleteLagsOnRetentionBreachedSegments(long retentionSize, List epochEntries = Collections.singletonList(epochEntry0); checkpoint.write(epochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); @@ -2127,7 +2137,7 @@ public void testDeleteRetentionMsBeingCancelledBeforeSecondDelete() throws Remot .thenAnswer(ans -> metadataList.iterator()); checkpoint.write(epochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); Map logProps = new HashMap<>(); @@ -2189,7 +2199,7 @@ public void testFailedDeleteExpiredSegments(long retentionSize, List epochEntries = Collections.singletonList(epochEntry0); checkpoint.write(epochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); @@ -2242,7 +2252,7 @@ public void testDeleteLogSegmentDueToRetentionSizeBreach(int segmentCount, new EpochEntry(4, 100L) ); checkpoint.write(epochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint); int currentLeaderEpoch = epochEntries.get(epochEntries.size() - 1).epoch; long localLogSegmentsSize = 512L; @@ -2280,7 +2290,7 @@ public void testDeleteLogSegmentDueToRetentionTimeBreach(int segmentCount, new EpochEntry(4, 100L) ); checkpoint.write(epochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint); int currentLeaderEpoch = epochEntries.get(epochEntries.size() - 1).epoch; long localLogSegmentsSize = 512L; @@ -2367,7 +2377,7 @@ public RemoteLogMetadataManager createRemoteLogMetadataManager() { .thenReturn(remoteLogSegmentMetadatas.iterator()); checkpoint.write(epochEntries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); Map logProps = new HashMap<>(); @@ -2442,17 +2452,11 @@ private List listRemoteLogSegmentMetadataByTime(TopicI private Map truncateAndGetLeaderEpochs(List entries, Long startOffset, Long endOffset) { - LeaderEpochCheckpointFile myCheckpoint; - try { - myCheckpoint = new LeaderEpochCheckpointFile( - TestUtils.tempFile(), new LogDirFailureChannel(1)); - } catch (IOException e) { - throw new UncheckedIOException(e); - } + InMemoryLeaderEpochCheckpoint myCheckpoint = new InMemoryLeaderEpochCheckpoint(); myCheckpoint.write(entries); - LeaderEpochFileCache cache = new LeaderEpochFileCache(null, myCheckpoint, scheduler); - cache.truncateFromStartAsyncFlush(startOffset); - cache.truncateFromEndAsyncFlush(endOffset); + LeaderEpochFileCache cache = new LeaderEpochFileCache(null, myCheckpoint); + cache.truncateFromStart(startOffset); + cache.truncateFromEnd(endOffset); return myCheckpoint.read().stream().collect(Collectors.toMap(e -> e.epoch, e -> e.startOffset)); } @@ -2679,7 +2683,7 @@ int lookupPositionForOffset(RemoteLogSegmentMetadata remoteLogSegmentMetadata, l } } - + @Test public void testCopyQuotaManagerConfig() { Properties defaultProps = new Properties(); @@ -2699,7 +2703,7 @@ public void testCopyQuotaManagerConfig() { assertEquals(31, rlmCopyQuotaManagerConfig.numQuotaSamples()); assertEquals(1, rlmCopyQuotaManagerConfig.quotaWindowSizeSeconds()); } - + @Test public void testFetchQuotaManagerConfig() { Properties defaultProps = new Properties(); @@ -2720,21 +2724,6 @@ public void testFetchQuotaManagerConfig() { assertEquals(1, rlmFetchQuotaManagerConfig.quotaWindowSizeSeconds()); } - @Test - public void testEpochEntriesAsByteBuffer() throws Exception { - int expectedEpoch = 0; - long expectedStartOffset = 1L; - int expectedVersion = 0; - List epochs = Arrays.asList(new EpochEntry(expectedEpoch, expectedStartOffset)); - ByteBuffer buffer = RemoteLogManager.epochEntriesAsByteBuffer(epochs); - BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(buffer.array()), StandardCharsets.UTF_8)); - - assertEquals(String.valueOf(expectedVersion), bufferedReader.readLine()); - assertEquals(String.valueOf(epochs.size()), bufferedReader.readLine()); - assertEquals(expectedEpoch + " " + expectedStartOffset, bufferedReader.readLine()); - } - - private Partition mockPartition(TopicIdPartition topicIdPartition) { TopicPartition tp = topicIdPartition.topicPartition(); Partition partition = mock(Partition.class); @@ -2758,7 +2747,8 @@ private RemoteLogManagerConfig createRLMConfig(Properties props) { props.put(DEFAULT_REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX + remoteLogMetadataConsumerTestProp, remoteLogMetadataConsumerTestVal); props.put(DEFAULT_REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX + remoteLogMetadataProducerTestProp, remoteLogMetadataProducerTestVal); - return new RemoteLogManagerConfig(props); + AbstractConfig config = new AbstractConfig(RemoteLogManagerConfig.CONFIG_DEF, props); + return new RemoteLogManagerConfig(config); } } diff --git a/core/src/test/java/kafka/server/LogManagerIntegrationTest.java b/core/src/test/java/kafka/server/LogManagerIntegrationTest.java deleted file mode 100644 index 709454beccd6b..0000000000000 --- a/core/src/test/java/kafka/server/LogManagerIntegrationTest.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.server; - -import kafka.test.ClusterInstance; -import kafka.test.annotation.ClusterTest; -import kafka.test.annotation.Type; -import kafka.test.junit.ClusterTestExtensions; -import kafka.test.junit.RaftClusterInvocationContext; -import org.apache.kafka.clients.admin.Admin; -import org.apache.kafka.clients.admin.NewTopic; -import org.apache.kafka.clients.consumer.Consumer; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.ConsumerRecords; -import org.apache.kafka.clients.consumer.KafkaConsumer; -import org.apache.kafka.clients.producer.KafkaProducer; -import org.apache.kafka.clients.producer.Producer; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.TopicPartitionInfo; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.apache.kafka.common.serialization.StringSerializer; -import org.apache.kafka.storage.internals.checkpoint.PartitionMetadataFile; -import org.apache.kafka.test.TestUtils; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.extension.ExtendWith; - -import java.io.IOException; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; -import java.util.concurrent.ExecutionException; - -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; - -@ExtendWith(value = ClusterTestExtensions.class) -@Tag("integration") -public class LogManagerIntegrationTest { - private final ClusterInstance cluster; - - public LogManagerIntegrationTest(ClusterInstance cluster) { - this.cluster = cluster; - } - - @ClusterTest(types = {Type.KRAFT, Type.CO_KRAFT}, brokers = 3) - public void testRestartBrokerNoErrorIfMissingPartitionMetadata() throws IOException, ExecutionException, InterruptedException { - RaftClusterInvocationContext.RaftClusterInstance raftInstance = - (RaftClusterInvocationContext.RaftClusterInstance) cluster; - - try (Admin admin = cluster.createAdminClient()) { - admin.createTopics(Collections.singletonList(new NewTopic("foo", 1, (short) 3))).all().get(); - } - cluster.waitForTopic("foo", 1); - - Optional partitionMetadataFile = Optional.ofNullable( - raftInstance.getUnderlying().brokers().get(0).logManager() - .getLog(new TopicPartition("foo", 0), false).get() - .partitionMetadataFile().getOrElse(null)); - assertTrue(partitionMetadataFile.isPresent()); - - raftInstance.getUnderlying().brokers().get(0).shutdown(); - try (Admin admin = cluster.createAdminClient()) { - TestUtils.waitForCondition(() -> { - List partitionInfos = admin.describeTopics(Collections.singletonList("foo")) - .topicNameValues().get("foo").get().partitions(); - return partitionInfos.get(0).isr().size() == 2; - }, "isr size is not shrink to 2"); - } - - // delete partition.metadata file here to simulate the scenario that partition.metadata not flush to disk yet - partitionMetadataFile.get().delete(); - assertFalse(partitionMetadataFile.get().exists()); - raftInstance.getUnderlying().brokers().get(0).startup(); - // make sure there is no error during load logs - assertDoesNotThrow(() -> raftInstance.getUnderlying().fatalFaultHandler().maybeRethrowFirstException()); - try (Admin admin = cluster.createAdminClient()) { - TestUtils.waitForCondition(() -> { - List partitionInfos = admin.describeTopics(Collections.singletonList("foo")) - .topicNameValues().get("foo").get().partitions(); - return partitionInfos.get(0).isr().size() == 3; - }, "isr size is not expand to 3"); - } - - // make sure topic still work fine - Map producerConfigs = new HashMap<>(); - producerConfigs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()); - producerConfigs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); - producerConfigs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); - - try (Producer producer = new KafkaProducer<>(producerConfigs)) { - producer.send(new ProducerRecord<>("foo", 0, null, "bar")).get(); - producer.flush(); - } - - Map consumerConfigs = new HashMap<>(); - consumerConfigs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()); - consumerConfigs.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); - consumerConfigs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); - consumerConfigs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); - - try (Consumer consumer = new KafkaConsumer<>(consumerConfigs)) { - consumer.assign(Collections.singletonList(new TopicPartition("foo", 0))); - consumer.seekToBeginning(Collections.singletonList(new TopicPartition("foo", 0))); - List values = new ArrayList<>(); - ConsumerRecords records = consumer.poll(Duration.ofMinutes(1)); - for (ConsumerRecord record : records) { - values.add(record.value()); - } - assertEquals(1, values.size()); - assertEquals("bar", values.get(0)); - } - } -} diff --git a/core/src/test/java/kafka/server/MetadataVersionConfigValidatorTest.java b/core/src/test/java/kafka/server/MetadataVersionConfigValidatorTest.java deleted file mode 100644 index a484d592e232f..0000000000000 --- a/core/src/test/java/kafka/server/MetadataVersionConfigValidatorTest.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server; - -import org.apache.kafka.common.metadata.FeatureLevelRecord; -import org.apache.kafka.image.MetadataDelta; -import org.apache.kafka.image.MetadataImage; -import org.apache.kafka.image.MetadataProvenance; -import org.apache.kafka.image.loader.LogDeltaManifest; -import org.apache.kafka.raft.LeaderAndEpoch; -import org.apache.kafka.server.common.MetadataVersion; -import org.apache.kafka.server.fault.FaultHandler; -import org.junit.jupiter.api.Test; - -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -public class MetadataVersionConfigValidatorTest { - - private static final LogDeltaManifest TEST_MANIFEST = LogDeltaManifest.newBuilder() - .provenance(MetadataProvenance.EMPTY) - .leaderAndEpoch(LeaderAndEpoch.UNKNOWN) - .numBatches(1) - .elapsedNs(90) - .numBytes(88) - .build(); - public static final MetadataProvenance TEST_PROVENANCE = - new MetadataProvenance(50, 3, 8000); - - void testWith(MetadataVersion metadataVersion, KafkaConfig config, FaultHandler faultHandler) throws Exception { - try (MetadataVersionConfigValidator validator = new MetadataVersionConfigValidator(config, faultHandler)) { - MetadataDelta delta = new MetadataDelta.Builder() - .setImage(MetadataImage.EMPTY) - .build(); - if (metadataVersion != null) { - delta.replay(new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(metadataVersion.featureLevel())); - } - MetadataImage image = delta.apply(TEST_PROVENANCE); - - validator.onMetadataUpdate(delta, image, TEST_MANIFEST); - } - } - - @Test - void testValidatesConfigOnMetadataChange() throws Exception { - MetadataVersion metadataVersion = MetadataVersion.IBP_3_7_IV2; - KafkaConfig config = mock(KafkaConfig.class); - FaultHandler faultHandler = mock(FaultHandler.class); - - when(config.brokerId()).thenReturn(8); - - testWith(metadataVersion, config, faultHandler); - - verify(config, times(1)).validateWithMetadataVersion(eq(metadataVersion)); - verifyNoMoreInteractions(faultHandler); - } - - @SuppressWarnings("ThrowableNotThrown") - @Test - void testInvokesFaultHandlerOnException() throws Exception { - MetadataVersion metadataVersion = MetadataVersion.IBP_3_7_IV2; - Exception exception = new Exception(); - KafkaConfig config = mock(KafkaConfig.class); - FaultHandler faultHandler = mock(FaultHandler.class); - - when(config.brokerId()).thenReturn(8); - willAnswer(invocation -> { - throw exception; - }).given(config).validateWithMetadataVersion(eq(metadataVersion)); - - testWith(metadataVersion, config, faultHandler); - - verify(config, times(1)).validateWithMetadataVersion(eq(metadataVersion)); - verify(faultHandler, times(1)).handleFault( - eq("Broker configuration does not support the cluster MetadataVersion"), - eq(exception)); - } -} diff --git a/core/src/test/java/kafka/server/SharePartitionTest.java b/core/src/test/java/kafka/server/SharePartitionTest.java deleted file mode 100644 index c5b57bc378da2..0000000000000 --- a/core/src/test/java/kafka/server/SharePartitionTest.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.server; - -import kafka.server.SharePartition.RecordState; - -import org.junit.jupiter.api.Test; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; - -public class SharePartitionTest { - - @Test - public void testRecordStateValidateTransition() { - // Null check. - assertThrows(NullPointerException.class, () -> RecordState.AVAILABLE.validateTransition(null)); - // Same state transition check. - assertThrows(IllegalStateException.class, () -> RecordState.AVAILABLE.validateTransition(RecordState.AVAILABLE)); - assertThrows(IllegalStateException.class, () -> RecordState.ACQUIRED.validateTransition(RecordState.ACQUIRED)); - assertThrows(IllegalStateException.class, () -> RecordState.ACKNOWLEDGED.validateTransition(RecordState.ACKNOWLEDGED)); - assertThrows(IllegalStateException.class, () -> RecordState.ARCHIVED.validateTransition(RecordState.ARCHIVED)); - // Invalid state transition to any other state from Acknowledged state. - assertThrows(IllegalStateException.class, () -> RecordState.ACKNOWLEDGED.validateTransition(RecordState.AVAILABLE)); - assertThrows(IllegalStateException.class, () -> RecordState.ACKNOWLEDGED.validateTransition(RecordState.ACQUIRED)); - assertThrows(IllegalStateException.class, () -> RecordState.ACKNOWLEDGED.validateTransition(RecordState.ARCHIVED)); - // Invalid state transition to any other state from Archived state. - assertThrows(IllegalStateException.class, () -> RecordState.ARCHIVED.validateTransition(RecordState.AVAILABLE)); - assertThrows(IllegalStateException.class, () -> RecordState.ARCHIVED.validateTransition(RecordState.ACKNOWLEDGED)); - assertThrows(IllegalStateException.class, () -> RecordState.ARCHIVED.validateTransition(RecordState.ARCHIVED)); - // Invalid state transition to any other state from Available state other than Acquired. - assertThrows(IllegalStateException.class, () -> RecordState.AVAILABLE.validateTransition(RecordState.ACKNOWLEDGED)); - assertThrows(IllegalStateException.class, () -> RecordState.AVAILABLE.validateTransition(RecordState.ARCHIVED)); - - // Successful transition from Available to Acquired. - assertEquals(RecordState.ACQUIRED, RecordState.AVAILABLE.validateTransition(RecordState.ACQUIRED)); - // Successful transition from Acquired to any state. - assertEquals(RecordState.AVAILABLE, RecordState.ACQUIRED.validateTransition(RecordState.AVAILABLE)); - assertEquals(RecordState.ACKNOWLEDGED, RecordState.ACQUIRED.validateTransition(RecordState.ACKNOWLEDGED)); - assertEquals(RecordState.ARCHIVED, RecordState.ACQUIRED.validateTransition(RecordState.ARCHIVED)); - } - - @Test - public void testRecordStateForId() { - assertEquals(RecordState.AVAILABLE, RecordState.forId((byte) 0)); - assertEquals(RecordState.ACQUIRED, RecordState.forId((byte) 1)); - assertEquals(RecordState.ACKNOWLEDGED, RecordState.forId((byte) 2)); - assertEquals(RecordState.ARCHIVED, RecordState.forId((byte) 4)); - // Invalid check. - assertThrows(IllegalArgumentException.class, () -> RecordState.forId((byte) 5)); - } -} diff --git a/core/src/test/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandlerTest.java b/core/src/test/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandlerTest.java index 5c1cfc2f59f15..058ab9522b283 100644 --- a/core/src/test/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandlerTest.java +++ b/core/src/test/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandlerTest.java @@ -125,6 +125,7 @@ void testDescribeTopicPartitionsRequest() { Action expectedActions2 = new Action(AclOperation.DESCRIBE, new ResourcePattern(ResourceType.TOPIC, authorizedTopic, PatternType.LITERAL), 1, true, true); Action expectedActions3 = new Action(AclOperation.DESCRIBE, new ResourcePattern(ResourceType.TOPIC, authorizedNonExistTopic, PatternType.LITERAL), 1, true, true); + // Here we need to use AuthHelperTest.matchSameElements instead of EasyMock.eq since the order of the request is unknown when(authorizer.authorize(any(RequestContext.class), argThat(t -> t.contains(expectedActions1) || t.contains(expectedActions2) || t.contains(expectedActions3)))) .thenAnswer(invocation -> { @@ -327,6 +328,7 @@ void testDescribeTopicPartitionsRequestWithEdgeCases() { Action expectedActions1 = new Action(AclOperation.DESCRIBE, new ResourcePattern(ResourceType.TOPIC, authorizedTopic, PatternType.LITERAL), 1, true, true); Action expectedActions2 = new Action(AclOperation.DESCRIBE, new ResourcePattern(ResourceType.TOPIC, authorizedTopic2, PatternType.LITERAL), 1, true, true); + // Here we need to use AuthHelperTest.matchSameElements instead of EasyMock.eq since the order of the request is unknown when(authorizer.authorize(any(RequestContext.class), argThat(t -> t.contains(expectedActions1) || t.contains(expectedActions2)))) .thenAnswer(invocation -> { diff --git a/core/src/test/java/kafka/test/ClusterConfigTest.java b/core/src/test/java/kafka/test/ClusterConfigTest.java index 9d5ac4957b7f1..228c01cf02566 100644 --- a/core/src/test/java/kafka/test/ClusterConfigTest.java +++ b/core/src/test/java/kafka/test/ClusterConfigTest.java @@ -29,8 +29,6 @@ import java.lang.reflect.Field; import java.util.Arrays; import java.util.Collections; -import java.util.List; -import java.util.Set; import java.util.Map; import java.util.stream.Collectors; @@ -98,18 +96,4 @@ public void testDisksPerBrokerIsZero() { .setDisksPerBroker(0) .build()); } - - @Test - public void testDisplayTags() { - List tags = Arrays.asList("tag 1", "tag 2", "tag 3"); - ClusterConfig clusterConfig = ClusterConfig.defaultBuilder().setTags(tags).build(); - - Set expectedDisplayTags = clusterConfig.displayTags(); - - Assertions.assertTrue(expectedDisplayTags.contains("tag 1")); - Assertions.assertTrue(expectedDisplayTags.contains("tag 2")); - Assertions.assertTrue(expectedDisplayTags.contains("tag 3")); - Assertions.assertTrue(expectedDisplayTags.contains("MetadataVersion=" + MetadataVersion.latestTesting())); - Assertions.assertTrue(expectedDisplayTags.contains("Security=" + SecurityProtocol.PLAINTEXT)); - } } diff --git a/core/src/test/java/kafka/test/ClusterTestExtensionsTest.java b/core/src/test/java/kafka/test/ClusterTestExtensionsTest.java index aba96eccdd81b..c8b53f8b8a2ba 100644 --- a/core/src/test/java/kafka/test/ClusterTestExtensionsTest.java +++ b/core/src/test/java/kafka/test/ClusterTestExtensionsTest.java @@ -185,7 +185,7 @@ public void testNoAutoStart() { @ClusterTest public void testDefaults(ClusterInstance clusterInstance) { - Assertions.assertEquals(MetadataVersion.IBP_4_0_IV0, clusterInstance.config().metadataVersion()); + Assertions.assertEquals(MetadataVersion.IBP_4_0_IVO, clusterInstance.config().metadataVersion()); } @ClusterTests({ diff --git a/core/src/test/java/kafka/test/annotation/ClusterTest.java b/core/src/test/java/kafka/test/annotation/ClusterTest.java index 5557abeb335dc..bd95249b4f457 100644 --- a/core/src/test/java/kafka/test/annotation/ClusterTest.java +++ b/core/src/test/java/kafka/test/annotation/ClusterTest.java @@ -40,7 +40,7 @@ AutoStart autoStart() default AutoStart.DEFAULT; SecurityProtocol securityProtocol() default SecurityProtocol.PLAINTEXT; String listener() default ""; - MetadataVersion metadataVersion() default MetadataVersion.IBP_4_0_IV0; + MetadataVersion metadataVersion() default MetadataVersion.IBP_4_0_IVO; ClusterConfigProperty[] serverProperties() default {}; // users can add tags that they want to display in test String[] tags() default {}; diff --git a/core/src/test/java/kafka/testkit/KafkaClusterTestKit.java b/core/src/test/java/kafka/testkit/KafkaClusterTestKit.java index 94d94dc71735e..5365652a5fcb5 100644 --- a/core/src/test/java/kafka/testkit/KafkaClusterTestKit.java +++ b/core/src/test/java/kafka/testkit/KafkaClusterTestKit.java @@ -239,15 +239,12 @@ public KafkaClusterTestKit build() throws Exception { ThreadUtils.createThreadFactory("kafka-cluster-test-kit-executor-%d", false)); for (ControllerNode node : nodes.controllerNodes().values()) { setupNodeDirectories(baseDirectory, node.metadataDirectory(), Collections.emptyList()); - SharedServer sharedServer = new SharedServer( - createNodeConfig(node), - node.initialMetaPropertiesEnsemble(), - Time.SYSTEM, - new Metrics(), - connectFutureManager.future, - Collections.emptyList(), - faultHandlerFactory - ); + SharedServer sharedServer = new SharedServer(createNodeConfig(node), + node.initialMetaPropertiesEnsemble(), + Time.SYSTEM, + new Metrics(), + connectFutureManager.future, + faultHandlerFactory); ControllerServer controller = null; try { controller = new ControllerServer( @@ -270,18 +267,13 @@ public KafkaClusterTestKit build() throws Exception { jointServers.put(node.id(), sharedServer); } for (BrokerNode node : nodes.brokerNodes().values()) { - SharedServer sharedServer = jointServers.computeIfAbsent( - node.id(), - id -> new SharedServer( - createNodeConfig(node), + SharedServer sharedServer = jointServers.computeIfAbsent(node.id(), + id -> new SharedServer(createNodeConfig(node), node.initialMetaPropertiesEnsemble(), Time.SYSTEM, new Metrics(), connectFutureManager.future, - Collections.emptyList(), - faultHandlerFactory - ) - ); + faultHandlerFactory)); BrokerServer broker = null; try { broker = new BrokerServer(sharedServer); diff --git a/core/src/test/resources/log4j.properties b/core/src/test/resources/log4j.properties index b265ee9cdaaf1..f7fb7364a3c38 100644 --- a/core/src/test/resources/log4j.properties +++ b/core/src/test/resources/log4j.properties @@ -21,5 +21,6 @@ log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n log4j.logger.kafka=WARN log4j.logger.org.apache.kafka=WARN + # zkclient can be verbose, during debugging it is common to adjust it separately log4j.logger.org.apache.zookeeper=WARN diff --git a/core/src/test/scala/integration/kafka/api/AbstractConsumerTest.scala b/core/src/test/scala/integration/kafka/api/AbstractConsumerTest.scala index d29f05b36b3a8..d242ea105e665 100644 --- a/core/src/test/scala/integration/kafka/api/AbstractConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/AbstractConsumerTest.scala @@ -23,7 +23,6 @@ import org.apache.kafka.clients.consumer._ import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord} import org.apache.kafka.common.record.TimestampType import org.apache.kafka.common.TopicPartition -import org.apache.kafka.test.{TestUtils => JTestUtils} import kafka.utils.TestUtils import kafka.server.BaseRequestTest import org.junit.jupiter.api.Assertions._ @@ -91,14 +90,12 @@ abstract class AbstractConsumerTest extends BaseRequestTest { s"The current assignment is ${consumer.assignment()}") } - def awaitNonEmptyRecords[K, V](consumer: Consumer[K, V], - partition: TopicPartition, - pollTimeoutMs: Long = 100): ConsumerRecords[K, V] = { + def awaitNonEmptyRecords[K, V](consumer: Consumer[K, V], partition: TopicPartition): ConsumerRecords[K, V] = { TestUtils.pollRecordsUntilTrue(consumer, (polledRecords: ConsumerRecords[K, V]) => { if (polledRecords.records(partition).asScala.nonEmpty) return polledRecords false - }, s"Consumer did not consume any messages for partition $partition before timeout.", JTestUtils.DEFAULT_MAX_WAIT_MS, pollTimeoutMs) + }, s"Consumer did not consume any messages for partition $partition before timeout.") throw new IllegalStateException("Should have timed out before reaching here") } diff --git a/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala index 2a21873ed208b..ffe10c19c4392 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala @@ -204,21 +204,6 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertFutureExceptionTypeEquals(results.get(nonExistingTopicId), classOf[UnknownTopicIdException]) } - @ParameterizedTest - @ValueSource(strings = Array("zk", "kraft")) - def testDescribeTopicsWithNames(quorum: String): Unit = { - client = createAdminClient - - val existingTopic = "existing-topic" - client.createTopics(Seq(existingTopic).map(new NewTopic(_, 1, 1.toShort)).asJava).all.get() - waitForTopics(client, Seq(existingTopic), List()) - ensureConsistentKRaftMetadata() - - val existingTopicId = brokers.head.metadataCache.getTopicId(existingTopic) - val results = client.describeTopics(TopicCollection.ofTopicNames(Seq(existingTopic).asJava)).topicNameValues() - assertEquals(existingTopicId, results.get(existingTopic).get.topicId()) - } - @ParameterizedTest @ValueSource(strings = Array("zk", "kraft")) def testDescribeCluster(quorum: String): Unit = { diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerPollTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerPollTest.scala index 0184a6eea67c0..daed397e43f23 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextConsumerPollTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextConsumerPollTest.scala @@ -238,19 +238,6 @@ class PlaintextConsumerPollTest extends AbstractConsumerTest { runMultiConsumerSessionTimeoutTest(true) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) - def testPollEventuallyReturnsRecordsWithZeroTimeout(quorum: String, groupProtocol: String): Unit = { - val numMessages = 100 - val producer = createProducer() - sendRecords(producer, numMessages, tp) - - val consumer = createConsumer() - consumer.subscribe(Set(topic).asJava) - val records = awaitNonEmptyRecords(consumer, tp, 0L) - assertEquals(numMessages, records.count()) - } - def runMultiConsumerSessionTimeoutTest(closeConsumer: Boolean): Unit = { // use consumers defined in this class plus one additional consumer // Use topic defined in this class + one additional topic diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerSubscriptionTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerSubscriptionTest.scala index 997ef6c8b5dcd..6c708adf82453 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextConsumerSubscriptionTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextConsumerSubscriptionTest.scala @@ -12,16 +12,14 @@ */ package kafka.api -import kafka.utils.{TestInfoUtils, TestUtils} +import kafka.utils.TestInfoUtils import org.apache.kafka.clients.consumer._ import org.apache.kafka.common.TopicPartition -import org.apache.kafka.common.errors.InvalidTopicException import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Timeout import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{Arguments, MethodSource} -import java.time.Duration import java.util.regex.Pattern import java.util.stream.Stream import scala.jdk.CollectionConverters._ @@ -225,26 +223,6 @@ class PlaintextConsumerSubscriptionTest extends AbstractConsumerTest { assertEquals(0, consumer.assignment.size()) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) - def testSubscribeInvalidTopic(quorum: String, groupProtocol: String): Unit = { - // Invalid topic name due to space - val invalidTopicName = "topic abc" - val consumer = createConsumer() - - consumer.subscribe(List(invalidTopicName).asJava) - - var exception : InvalidTopicException = null - TestUtils.waitUntilTrue(() => { - try consumer.poll(Duration.ofMillis(500)) catch { - case e : InvalidTopicException => exception = e - case e : Throwable => fail(s"An InvalidTopicException should be thrown. But ${e.getClass} is thrown") - } - exception != null - }, waitTimeMs = 5000, msg = "An InvalidTopicException should be thrown.") - - assertEquals(s"Invalid topics: [${invalidTopicName}]", exception.getMessage) - } } object PlaintextConsumerSubscriptionTest { diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala index b4617af1503a8..7d32d1bd3e04d 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala @@ -21,12 +21,12 @@ import kafka.utils.{TestInfoUtils, TestUtils} import org.apache.kafka.clients.admin.{NewPartitions, NewTopic} import org.apache.kafka.clients.consumer._ import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord} +import org.apache.kafka.common.{KafkaException, MetricName, TopicPartition} import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.errors.{InvalidGroupIdException, InvalidTopicException, TimeoutException, WakeupException} import org.apache.kafka.common.header.Headers import org.apache.kafka.common.record.{CompressionType, TimestampType} import org.apache.kafka.common.serialization._ -import org.apache.kafka.common.{KafkaException, MetricName, TopicPartition} import org.apache.kafka.test.{MockConsumerInterceptor, MockProducerInterceptor} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Timeout diff --git a/core/src/test/scala/integration/kafka/server/DelayedRemoteFetchTest.scala b/core/src/test/scala/integration/kafka/server/DelayedRemoteFetchTest.scala index ea1ffaf0b1179..c35385bcbc8ca 100644 --- a/core/src/test/scala/integration/kafka/server/DelayedRemoteFetchTest.scala +++ b/core/src/test/scala/integration/kafka/server/DelayedRemoteFetchTest.scala @@ -40,7 +40,6 @@ class DelayedRemoteFetchTest { private val fetchOffset = 500L private val logStartOffset = 0L private val currentLeaderEpoch = Optional.of[Integer](10) - private val remoteFetchMaxWaitMs = 500 private val fetchStatus = FetchPartitionStatus( startOffsetMetadata = new LogOffsetMetadata(fetchOffset), @@ -65,8 +64,8 @@ class DelayedRemoteFetchTest { val leaderLogStartOffset = 10 val logReadInfo = buildReadResult(Errors.NONE, highWatermark, leaderLogStartOffset) - val delayedRemoteFetch = new DelayedRemoteFetch(null, future, fetchInfo, remoteFetchMaxWaitMs, - Seq(topicIdPartition -> fetchStatus), fetchParams, Seq(topicIdPartition -> logReadInfo), replicaManager, callback) + val delayedRemoteFetch = new DelayedRemoteFetch(null, future, fetchInfo, Seq(topicIdPartition -> fetchStatus), fetchParams, + Seq(topicIdPartition -> logReadInfo), replicaManager, callback) when(replicaManager.getPartitionOrException(topicIdPartition.topicPartition)) .thenReturn(mock(classOf[Partition])) @@ -101,8 +100,8 @@ class DelayedRemoteFetchTest { val leaderLogStartOffset = 10 val logReadInfo = buildReadResult(Errors.NONE, highWatermark, leaderLogStartOffset) val fetchParams = buildFetchParams(replicaId = 1, maxWaitMs = 500) - assertThrows(classOf[IllegalStateException], () => new DelayedRemoteFetch(null, future, fetchInfo, remoteFetchMaxWaitMs, - Seq(topicIdPartition -> fetchStatus), fetchParams, Seq(topicIdPartition -> logReadInfo), replicaManager, callback)) + assertThrows(classOf[IllegalStateException], () => new DelayedRemoteFetch(null, future, fetchInfo, Seq(topicIdPartition -> fetchStatus), fetchParams, + Seq(topicIdPartition -> logReadInfo), replicaManager, callback)) } @Test @@ -125,8 +124,8 @@ class DelayedRemoteFetchTest { val logReadInfo = buildReadResult(Errors.NONE) - val delayedRemoteFetch = new DelayedRemoteFetch(null, future, fetchInfo, remoteFetchMaxWaitMs, - Seq(topicIdPartition -> fetchStatus), fetchParams, Seq(topicIdPartition -> logReadInfo), replicaManager, callback) + val delayedRemoteFetch = new DelayedRemoteFetch(null, future, fetchInfo, Seq(topicIdPartition -> fetchStatus), fetchParams, + Seq(topicIdPartition -> logReadInfo), replicaManager, callback) // delayed remote fetch should still be able to complete assertTrue(delayedRemoteFetch.tryComplete()) @@ -156,8 +155,8 @@ class DelayedRemoteFetchTest { // build a read result with error val logReadInfo = buildReadResult(Errors.FENCED_LEADER_EPOCH) - val delayedRemoteFetch = new DelayedRemoteFetch(null, future, fetchInfo, remoteFetchMaxWaitMs, - Seq(topicIdPartition -> fetchStatus), fetchParams, Seq(topicIdPartition -> logReadInfo), replicaManager, callback) + val delayedRemoteFetch = new DelayedRemoteFetch(null, future, fetchInfo, Seq(topicIdPartition -> fetchStatus), fetchParams, + Seq(topicIdPartition -> logReadInfo), replicaManager, callback) assertTrue(delayedRemoteFetch.tryComplete()) assertTrue(delayedRemoteFetch.isCompleted) @@ -185,8 +184,8 @@ class DelayedRemoteFetchTest { val fetchInfo: RemoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, topicIdPartition.topicPartition(), null, null, false) val logReadInfo = buildReadResult(Errors.NONE, highWatermark, leaderLogStartOffset) - val delayedRemoteFetch = new DelayedRemoteFetch(remoteFetchTask, future, fetchInfo, remoteFetchMaxWaitMs, - Seq(topicIdPartition -> fetchStatus), fetchParams, Seq(topicIdPartition -> logReadInfo), replicaManager, callback) + val delayedRemoteFetch = new DelayedRemoteFetch(remoteFetchTask, future, fetchInfo, Seq(topicIdPartition -> fetchStatus), fetchParams, + Seq(topicIdPartition -> logReadInfo), replicaManager, callback) when(replicaManager.getPartitionOrException(topicIdPartition.topicPartition)) .thenReturn(mock(classOf[Partition])) diff --git a/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala b/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala index 82b5b4cfd1e39..9f787a1b16881 100755 --- a/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala +++ b/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala @@ -124,15 +124,12 @@ class KRaftQuorumImplementation( metaPropertiesEnsemble.verify(Optional.of(clusterId), OptionalInt.of(config.nodeId), util.EnumSet.of(REQUIRE_AT_LEAST_ONE_VALID, REQUIRE_METADATA_LOG_DIR)) - val sharedServer = new SharedServer( - config, + val sharedServer = new SharedServer(config, metaPropertiesEnsemble, time, new Metrics(), controllerQuorumVotersFuture, - controllerQuorumVotersFuture.get().values(), - faultHandlerFactory - ) + faultHandlerFactory) var broker: BrokerServer = null try { broker = new BrokerServer(sharedServer) @@ -374,15 +371,12 @@ abstract class QuorumTestHarness extends Logging { metaPropertiesEnsemble.verify(Optional.of(metaProperties.clusterId().get()), OptionalInt.of(nodeId), util.EnumSet.of(REQUIRE_AT_LEAST_ONE_VALID, REQUIRE_METADATA_LOG_DIR)) - val sharedServer = new SharedServer( - config, + val sharedServer = new SharedServer(config, metaPropertiesEnsemble, Time.SYSTEM, new Metrics(), controllerQuorumVotersFuture, - Collections.emptyList(), - faultHandlerFactory - ) + faultHandlerFactory) var controllerServer: ControllerServer = null try { controllerServer = new ControllerServer( diff --git a/core/src/test/scala/integration/kafka/zk/ZkMigrationIntegrationTest.scala b/core/src/test/scala/integration/kafka/zk/ZkMigrationIntegrationTest.scala index e98a8fdeccb70..c0b6d916ec228 100644 --- a/core/src/test/scala/integration/kafka/zk/ZkMigrationIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/zk/ZkMigrationIntegrationTest.scala @@ -73,7 +73,7 @@ object ZkMigrationIntegrationTest { MetadataVersion.IBP_3_7_IV2, MetadataVersion.IBP_3_7_IV4, MetadataVersion.IBP_3_8_IV0, - MetadataVersion.IBP_4_0_IV0 + MetadataVersion.IBP_4_0_IVO ).map { mv => val serverProperties = new util.HashMap[String, String]() serverProperties.put("inter.broker.listener.name", "EXTERNAL") diff --git a/core/src/test/scala/kafka/metrics/LinuxIoMetricsCollectorTest.scala b/core/src/test/scala/kafka/metrics/LinuxIoMetricsCollectorTest.scala new file mode 100644 index 0000000000000..5ef2bb0c58064 --- /dev/null +++ b/core/src/test/scala/kafka/metrics/LinuxIoMetricsCollectorTest.scala @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.metrics + +import java.nio.charset.StandardCharsets +import java.nio.file.Files +import kafka.utils.Logging +import org.apache.kafka.server.util.MockTime +import org.apache.kafka.test.TestUtils +import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertTrue} +import org.junit.jupiter.api.{Test, Timeout} + +@Timeout(120) +class LinuxIoMetricsCollectorTest extends Logging { + + class TestDirectory { + val baseDir = TestUtils.tempDirectory() + val selfDir = Files.createDirectories(baseDir.toPath.resolve("self")) + + def writeProcFile(readBytes: Long, writeBytes: Long) = { + val bld = new StringBuilder() + bld.append("rchar: 0%n".format()) + bld.append("wchar: 0%n".format()) + bld.append("syschr: 0%n".format()) + bld.append("syscw: 0%n".format()) + bld.append("read_bytes: %d%n".format(readBytes)) + bld.append("write_bytes: %d%n".format(writeBytes)) + bld.append("cancelled_write_bytes: 0%n".format()) + Files.write(selfDir.resolve("io"), bld.toString().getBytes(StandardCharsets.UTF_8)) + } + } + + @Test + def testReadProcFile(): Unit = { + val testDirectory = new TestDirectory() + val time = new MockTime(100, 1000) + testDirectory.writeProcFile(123L, 456L) + val collector = new LinuxIoMetricsCollector(testDirectory.baseDir.getAbsolutePath, + time, logger.underlying) + + // Test that we can read the values we wrote. + assertTrue(collector.usable()) + assertEquals(123L, collector.readBytes()) + assertEquals(456L, collector.writeBytes()) + testDirectory.writeProcFile(124L, 457L) + + // The previous values should still be cached. + assertEquals(123L, collector.readBytes()) + assertEquals(456L, collector.writeBytes()) + + // Update the time, and the values should be re-read. + time.sleep(1) + assertEquals(124L, collector.readBytes()) + assertEquals(457L, collector.writeBytes()) + } + + @Test + def testUnableToReadNonexistentProcFile(): Unit = { + val testDirectory = new TestDirectory() + val time = new MockTime(100, 1000) + val collector = new LinuxIoMetricsCollector(testDirectory.baseDir.getAbsolutePath, + time, logger.underlying) + + // Test that we can't read the file, since it hasn't been written. + assertFalse(collector.usable()) + } +} diff --git a/core/src/test/scala/unit/kafka/KafkaConfigTest.scala b/core/src/test/scala/unit/kafka/KafkaConfigTest.scala index 3a1fc2e4bda83..457326cd19adf 100644 --- a/core/src/test/scala/unit/kafka/KafkaConfigTest.scala +++ b/core/src/test/scala/unit/kafka/KafkaConfigTest.scala @@ -86,7 +86,7 @@ class KafkaConfigTest { @Test def testBrokerRoleNodeIdValidation(): Unit = { - // Ensure that validation is happening at startup to check that brokers do not use their node.id as a voter in controller.quorum.voters + // Ensure that validation is happening at startup to check that brokers do not use their node.id as a voter in controller.quorum.voters val propertiesFile = new Properties propertiesFile.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") propertiesFile.setProperty(KRaftConfigs.NODE_ID_CONFIG, "1") @@ -102,7 +102,7 @@ class KafkaConfigTest { @Test def testControllerRoleNodeIdValidation(): Unit = { - // Ensure that validation is happening at startup to check that controllers use their node.id as a voter in controller.quorum.voters + // Ensure that validation is happening at startup to check that controllers use their node.id as a voter in controller.quorum.voters val propertiesFile = new Properties propertiesFile.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "controller") propertiesFile.setProperty(KRaftConfigs.NODE_ID_CONFIG, "1") diff --git a/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala b/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala index 2e9bc068978bd..32ddfc6418d4d 100644 --- a/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala @@ -299,8 +299,7 @@ class PartitionLockTest extends Logging { val log = super.createLog(isNew, isFutureReplica, offsetCheckpoints, None, None) val logDirFailureChannel = new LogDirFailureChannel(1) val segments = new LogSegments(log.topicPartition) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - log.dir, log.topicPartition, logDirFailureChannel, log.config.recordVersion, "", None, mockTime.scheduler) + val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache(log.dir, log.topicPartition, logDirFailureChannel, log.config.recordVersion, "") val maxTransactionTimeout = 5 * 60 * 1000 val producerStateManagerConfig = new ProducerStateManagerConfig(TransactionLogConfigs.PRODUCER_ID_EXPIRATION_MS_DEFAULT, false) val producerStateManager = new ProducerStateManager( diff --git a/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala b/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala index 6dc6cc2a3c1f7..2134dcfaaa0c0 100644 --- a/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala @@ -434,8 +434,7 @@ class PartitionTest extends AbstractPartitionTest { val log = super.createLog(isNew, isFutureReplica, offsetCheckpoints, None, None) val logDirFailureChannel = new LogDirFailureChannel(1) val segments = new LogSegments(log.topicPartition) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - log.dir, log.topicPartition, logDirFailureChannel, log.config.recordVersion, "", None, time.scheduler) + val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache(log.dir, log.topicPartition, logDirFailureChannel, log.config.recordVersion, "") val maxTransactionTimeoutMs = 5 * 60 * 1000 val producerStateManagerConfig = new ProducerStateManagerConfig(TransactionLogConfigs.PRODUCER_ID_EXPIRATION_MS_DEFAULT, true) val producerStateManager = new ProducerStateManager( diff --git a/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala b/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala index 828f6eb111c99..7ed403f0a9557 100755 --- a/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala +++ b/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala @@ -17,29 +17,28 @@ package kafka.integration -import java.util.Properties -import java.util.concurrent.ExecutionException +import org.apache.kafka.common.config.{ConfigException, ConfigResource} +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} + import scala.util.Random import scala.jdk.CollectionConverters._ import scala.collection.{Map, Seq} -import kafka.server.{KafkaBroker, KafkaConfig, QuorumTestHarness} +import org.apache.log4j.{Level, Logger} +import java.util.Properties +import java.util.concurrent.ExecutionException + +import kafka.server.{KafkaConfig, KafkaServer} import kafka.utils.{CoreUtils, TestUtils} import kafka.utils.TestUtils._ +import kafka.server.QuorumTestHarness import org.apache.kafka.common.TopicPartition -import org.apache.kafka.common.config.{ConfigResource, TopicConfig} -import org.apache.kafka.common.errors.{InvalidConfigurationException, TimeoutException} +import org.apache.kafka.common.errors.TimeoutException import org.apache.kafka.common.serialization.StringDeserializer -import org.apache.kafka.common.network.ListenerName -import org.apache.kafka.common.security.auth.SecurityProtocol -import org.apache.kafka.clients.admin.{Admin, AdminClientConfig, AlterConfigOp, AlterConfigsResult, ConfigEntry} +import org.apache.kafka.clients.admin.{Admin, AdminClientConfig, AlterConfigsResult, Config, ConfigEntry} import org.apache.kafka.server.config.ReplicationConfigs -import org.apache.kafka.server.metrics.KafkaYammerMetrics -import org.apache.log4j.{Level, Logger} -import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource -import com.yammer.metrics.core.Meter + +import scala.annotation.nowarn class UncleanLeaderElectionTest extends QuorumTestHarness { val brokerId1 = 0 @@ -53,14 +52,11 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { var configProps2: Properties = _ var configs: Seq[KafkaConfig] = Seq.empty[KafkaConfig] - var brokers: Seq[KafkaBroker] = Seq.empty[KafkaBroker] - - var admin: Admin = _ + var servers: Seq[KafkaServer] = Seq.empty[KafkaServer] val random = new Random() val topic = "topic" + random.nextLong() val partitionId = 0 - val topicPartition = new TopicPartition(topic, partitionId) val kafkaApisLogger = Logger.getLogger(classOf[kafka.server.KafkaApis]) val networkProcessorLogger = Logger.getLogger(classOf[kafka.network.Processor]) @@ -69,8 +65,8 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { override def setUp(testInfo: TestInfo): Unit = { super.setUp(testInfo) - configProps1 = createBrokerConfig(brokerId1, zkConnectOrNull) - configProps2 = createBrokerConfig(brokerId2, zkConnectOrNull) + configProps1 = createBrokerConfig(brokerId1, zkConnect) + configProps2 = createBrokerConfig(brokerId2, zkConnect) for (configProps <- List(configProps1, configProps2)) { configProps.put("controlled.shutdown.enable", enableControlledShutdown.toString) @@ -85,57 +81,50 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { @AfterEach override def tearDown(): Unit = { - brokers.foreach(broker => shutdownBroker(broker)) - brokers.foreach(broker => CoreUtils.delete(broker.config.logDirs)) + servers.foreach(server => shutdownServer(server)) + servers.foreach(server => CoreUtils.delete(server.config.logDirs)) // restore log levels kafkaApisLogger.setLevel(Level.ERROR) networkProcessorLogger.setLevel(Level.ERROR) - admin.close() - super.tearDown() } private def startBrokers(cluster: Seq[Properties]): Unit = { for (props <- cluster) { val config = KafkaConfig.fromProps(props) - val broker = createBroker(config = config) + val server = createServer(config) configs ++= List(config) - brokers ++= List(broker) + servers ++= List(server) } - - val adminConfigs = new Properties - admin = TestUtils.createAdminClient(brokers, ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT), adminConfigs) } - @ParameterizedTest - @ValueSource(strings = Array("zk")) - def testUncleanLeaderElectionEnabled(quorum: String): Unit = { + @Test + def testUncleanLeaderElectionEnabled(): Unit = { // enable unclean leader election configProps1.put("unclean.leader.election.enable", "true") configProps2.put("unclean.leader.election.enable", "true") startBrokers(Seq(configProps1, configProps2)) // create topic with 1 partition, 2 replicas, one on each broker - TestUtils.createTopicWithAdmin(admin, topic, brokers, controllerServers, replicaAssignment = Map(partitionId -> Seq(brokerId1, brokerId2))) + TestUtils.createTopic(zkClient, topic, Map(partitionId -> Seq(brokerId1, brokerId2)), servers) + verifyUncleanLeaderElectionEnabled() } - @ParameterizedTest - @ValueSource(strings = Array("zk")) + @Test def testUncleanLeaderElectionDisabled(): Unit = { // unclean leader election is disabled by default startBrokers(Seq(configProps1, configProps2)) // create topic with 1 partition, 2 replicas, one on each broker - TestUtils.createTopicWithAdmin(admin, topic, brokers, controllerServers, replicaAssignment = Map(partitionId -> Seq(brokerId1, brokerId2))) + TestUtils.createTopic(zkClient, topic, Map(partitionId -> Seq(brokerId1, brokerId2)), servers) verifyUncleanLeaderElectionDisabled() } - @ParameterizedTest - @ValueSource(strings = Array("zk")) + @Test def testUncleanLeaderElectionEnabledByTopicOverride(): Unit = { // disable unclean leader election globally, but enable for our specific test topic configProps1.put("unclean.leader.election.enable", "false") @@ -144,14 +133,13 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { // create topic with 1 partition, 2 replicas, one on each broker, and unclean leader election enabled val topicProps = new Properties() - topicProps.put(TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, "true") - TestUtils.createTopicWithAdmin(admin, topic, brokers, controllerServers, replicaAssignment = Map(partitionId -> Seq(brokerId1, brokerId2)), topicConfig = topicProps) + topicProps.put("unclean.leader.election.enable", "true") + TestUtils.createTopic(zkClient, topic, Map(partitionId -> Seq(brokerId1, brokerId2)), servers, topicProps) verifyUncleanLeaderElectionEnabled() } - @ParameterizedTest - @ValueSource(strings = Array("zk")) + @Test def testUncleanLeaderElectionDisabledByTopicOverride(): Unit = { // enable unclean leader election globally, but disable for our specific test topic configProps1.put("unclean.leader.election.enable", "true") @@ -160,64 +148,58 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { // create topic with 1 partition, 2 replicas, one on each broker, and unclean leader election disabled val topicProps = new Properties() - topicProps.put(TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, "false") - TestUtils.createTopicWithAdmin(admin, topic, brokers, controllerServers, replicaAssignment = Map(partitionId -> Seq(brokerId1, brokerId2)), topicConfig = topicProps) + topicProps.put("unclean.leader.election.enable", "false") + TestUtils.createTopic(zkClient, topic, Map(partitionId -> Seq(brokerId1, brokerId2)), servers, topicProps) verifyUncleanLeaderElectionDisabled() } - @ParameterizedTest - @ValueSource(strings = Array("zk")) + @Test def testUncleanLeaderElectionInvalidTopicOverride(): Unit = { startBrokers(Seq(configProps1)) // create topic with an invalid value for unclean leader election val topicProps = new Properties() - topicProps.put(TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, "invalid") - - val e = assertThrows(classOf[ExecutionException], - () => TestUtils.createTopicWithAdmin(admin, topic, brokers, controllerServers, replicaAssignment = Map(partitionId -> Seq(brokerId1, brokerId2)), topicConfig = topicProps)) + topicProps.put("unclean.leader.election.enable", "invalid") - assertEquals(classOf[InvalidConfigurationException], e.getCause.getClass) + assertThrows(classOf[ConfigException], + () => TestUtils.createTopic(zkClient, topic, Map(partitionId -> Seq(brokerId1)), servers, topicProps)) } def verifyUncleanLeaderElectionEnabled(): Unit = { // wait until leader is elected - val leaderId = awaitLeaderChange(brokers, topicPartition) - debug("Leader for " + topic + " is elected to be: %s".format(leaderId)) + val leaderId = waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId) + debug("Leader for " + topic + " is elected to be: %s".format(leaderId)) assertTrue(leaderId == brokerId1 || leaderId == brokerId2, "Leader id is set to expected value for topic: " + topic) // the non-leader broker is the follower val followerId = if (leaderId == brokerId1) brokerId2 else brokerId1 - debug("Follower for " + topic + " is: %s".format(followerId)) + debug("Follower for " + topic + " is: %s".format(followerId)) - produceMessage(brokers, topic, "first") - waitForPartitionMetadata(brokers, topic, partitionId) + produceMessage(servers, topic, "first") + waitForPartitionMetadata(servers, topic, partitionId) assertEquals(List("first"), consumeAllMessages(topic, 1)) // shutdown follower server - brokers.filter(broker => broker.config.brokerId == followerId).map(broker => shutdownBroker(broker)) + servers.filter(server => server.config.brokerId == followerId).map(server => shutdownServer(server)) - produceMessage(brokers, topic, "second") + produceMessage(servers, topic, "second") assertEquals(List("first", "second"), consumeAllMessages(topic, 2)) - //verify that unclean election metric count is 0 - val uncleanLeaderElectionsPerSecGauge = getGauge("UncleanLeaderElectionsPerSec") - @volatile var uncleanLeaderElectionsPerSec = uncleanLeaderElectionsPerSecGauge.count() - assert(uncleanLeaderElectionsPerSec == 0) + //remove any previous unclean election metric + servers.map(_.kafkaController.controllerContext.stats.removeMetric("UncleanLeaderElectionsPerSec")) // shutdown leader and then restart follower - brokers.filter(_.config.brokerId == leaderId).map(shutdownBroker) - val followerBroker = brokers.find(_.config.brokerId == followerId).get - followerBroker.startup() + servers.filter(_.config.brokerId == leaderId).map(shutdownServer) + val followerServer = servers.find(_.config.brokerId == followerId).get + followerServer.startup() // wait until new leader is (uncleanly) elected - awaitLeaderChange(brokers, topicPartition, expectedLeaderOpt = Some(followerId), timeout = 30000) - uncleanLeaderElectionsPerSec = uncleanLeaderElectionsPerSecGauge.count() - assert(uncleanLeaderElectionsPerSec == 1) + waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId, newLeaderOpt = Some(followerId)) + assertEquals(1, followerServer.kafkaController.controllerContext.stats.uncleanLeaderElectionRate.count()) - produceMessage(brokers, topic, "third") + produceMessage(servers, topic, "third") // second message was lost due to unclean election assertEquals(List("first", "third"), consumeAllMessages(topic, 2)) @@ -225,7 +207,7 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { def verifyUncleanLeaderElectionDisabled(): Unit = { // wait until leader is elected - val leaderId = awaitLeaderChange(brokers, topicPartition) + val leaderId = waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId) debug("Leader for " + topic + " is elected to be: %s".format(leaderId)) assertTrue(leaderId == brokerId1 || leaderId == brokerId2, "Leader id is set to expected value for topic: " + topic) @@ -234,70 +216,60 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { val followerId = if (leaderId == brokerId1) brokerId2 else brokerId1 debug("Follower for " + topic + " is: %s".format(followerId)) - produceMessage(brokers, topic, "first") - waitForPartitionMetadata(brokers, topic, partitionId) + produceMessage(servers, topic, "first") + waitForPartitionMetadata(servers, topic, partitionId) assertEquals(List("first"), consumeAllMessages(topic, 1)) // shutdown follower server - brokers.filter(broker => broker.config.brokerId == followerId).map(broker => shutdownBroker(broker)) + servers.filter(server => server.config.brokerId == followerId).foreach(server => shutdownServer(server)) - produceMessage(brokers, topic, "second") + produceMessage(servers, topic, "second") assertEquals(List("first", "second"), consumeAllMessages(topic, 2)) //remove any previous unclean election metric - val uncleanLeaderElectionsPerSecGauge = getGauge("UncleanLeaderElectionsPerSec") - @volatile var uncleanLeaderElectionsPerSec = uncleanLeaderElectionsPerSecGauge.count() - assert(uncleanLeaderElectionsPerSec == 0) + servers.foreach(server => server.kafkaController.controllerContext.stats.removeMetric("UncleanLeaderElectionsPerSec")) // shutdown leader and then restart follower - brokers.filter(_.config.brokerId == leaderId).map(shutdownBroker) - val followerServer = brokers.find(_.config.brokerId == followerId).get + servers.filter(server => server.config.brokerId == leaderId).foreach(server => shutdownServer(server)) + val followerServer = servers.find(_.config.brokerId == followerId).get followerServer.startup() // verify that unclean election to non-ISR follower does not occur - awaitLeaderChange(brokers, topicPartition, expectedLeaderOpt = Some(leaderId)) - uncleanLeaderElectionsPerSec = uncleanLeaderElectionsPerSecGauge.count() - assert(uncleanLeaderElectionsPerSec == 0) + waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId, newLeaderOpt = Some(-1)) + assertEquals(0, followerServer.kafkaController.controllerContext.stats.uncleanLeaderElectionRate.count()) // message production and consumption should both fail while leader is down - val e = assertThrows(classOf[ExecutionException], () => produceMessage(brokers, topic, "third", deliveryTimeoutMs = 1000, requestTimeoutMs = 1000)) + val e = assertThrows(classOf[ExecutionException], () => produceMessage(servers, topic, "third", deliveryTimeoutMs = 1000, requestTimeoutMs = 1000)) assertEquals(classOf[TimeoutException], e.getCause.getClass) assertEquals(List.empty[String], consumeAllMessages(topic, 0)) // restart leader temporarily to send a successfully replicated message - brokers.find(_.config.brokerId == leaderId).get.startup() - awaitLeaderChange(brokers, topicPartition, expectedLeaderOpt = Some(leaderId)) + servers.filter(server => server.config.brokerId == leaderId).foreach(server => server.startup()) + waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId, newLeaderOpt = Some(leaderId)) - produceMessage(brokers, topic, "third") + produceMessage(servers, topic, "third") //make sure follower server joins the ISR TestUtils.waitUntilTrue(() => { val partitionInfoOpt = followerServer.metadataCache.getPartitionInfo(topic, partitionId) partitionInfoOpt.isDefined && partitionInfoOpt.get.isr.contains(followerId) }, "Inconsistent metadata after first server startup") - brokers.filter(_.config.brokerId == leaderId).map(shutdownBroker) - + servers.filter(server => server.config.brokerId == leaderId).foreach(server => shutdownServer(server)) // verify clean leader transition to ISR follower - awaitLeaderChange(brokers, topicPartition, expectedLeaderOpt = Some(followerId)) + waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId, newLeaderOpt = Some(followerId)) + // verify messages can be consumed from ISR follower that was just promoted to leader assertEquals(List("first", "second", "third"), consumeAllMessages(topic, 3)) } - private def getGauge(metricName: String) = { - KafkaYammerMetrics.defaultRegistry.allMetrics.asScala - .find { case (k, _) => k.getName.endsWith(metricName) } - .getOrElse(throw new AssertionError("Unable to find metric " + metricName)) - ._2.asInstanceOf[Meter] - } - - private def shutdownBroker(broker: KafkaBroker) = { - broker.shutdown() - broker.awaitShutdown() + private def shutdownServer(server: KafkaServer): Unit = { + server.shutdown() + server.awaitShutdown() } private def consumeAllMessages(topic: String, numMessages: Int): Seq[String] = { - val brokerList = TestUtils.plaintextBootstrapServers(brokers) + val brokerList = TestUtils.plaintextBootstrapServers(servers) // Don't rely on coordinator as it may be down when this method is called val consumer = TestUtils.createConsumer(brokerList, groupId = "group" + random.nextLong(), @@ -311,48 +283,42 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { } finally consumer.close() } - @ParameterizedTest - @ValueSource(strings = Array("zk")) - def testTopicUncleanLeaderElectionEnableWithAlterTopicConfigs(): Unit = { + @Test + def testTopicUncleanLeaderElectionEnable(): Unit = { // unclean leader election is disabled by default startBrokers(Seq(configProps1, configProps2)) // create topic with 1 partition, 2 replicas, one on each broker - TestUtils.createTopicWithAdmin(admin, topic, brokers, controllerServers, replicaAssignment = Map(partitionId -> Seq(brokerId1, brokerId2))) + adminZkClient.createTopicWithAssignment(topic, config = new Properties(), Map(partitionId -> Seq(brokerId1, brokerId2))) // wait until leader is elected - val leaderId = awaitLeaderChange(brokers, topicPartition) + val leaderId = waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId) // the non-leader broker is the follower val followerId = if (leaderId == brokerId1) brokerId2 else brokerId1 - produceMessage(brokers, topic, "first") - waitForPartitionMetadata(brokers, topic, partitionId) + produceMessage(servers, topic, "first") + waitForPartitionMetadata(servers, topic, partitionId) assertEquals(List("first"), consumeAllMessages(topic, 1)) // shutdown follower server - brokers.filter(broker => broker.config.brokerId == followerId).map(broker => shutdownBroker(broker)) + servers.filter(server => server.config.brokerId == followerId).map(server => shutdownServer(server)) - produceMessage(brokers, topic, "second") + produceMessage(servers, topic, "second") assertEquals(List("first", "second"), consumeAllMessages(topic, 2)) - //verify that unclean election metric count is 0 - val uncleanLeaderElectionsPerSecGauge = getGauge("UncleanLeaderElectionsPerSec") - @volatile var uncleanLeaderElectionsPerSec = uncleanLeaderElectionsPerSecGauge.count() - assert(uncleanLeaderElectionsPerSec == 0) + //remove any previous unclean election metric + servers.map(server => server.kafkaController.controllerContext.stats.removeMetric("UncleanLeaderElectionsPerSec")) // shutdown leader and then restart follower - brokers.filter(_.config.brokerId == leaderId).map(shutdownBroker) - val followerBroker = brokers.find(_.config.brokerId == followerId).get - followerBroker.startup() + servers.filter(server => server.config.brokerId == leaderId).map(server => shutdownServer(server)) + val followerServer = servers.find(_.config.brokerId == followerId).get + followerServer.startup() - // leader should not change - awaitLeaderChange(brokers, topicPartition, expectedLeaderOpt = Some(leaderId), timeout = 30000) - uncleanLeaderElectionsPerSec = uncleanLeaderElectionsPerSecGauge.count() - assert(uncleanLeaderElectionsPerSec == 0) + assertEquals(0, followerServer.kafkaController.controllerContext.stats.uncleanLeaderElectionRate.count()) // message production and consumption should both fail while leader is down - val e = assertThrows(classOf[ExecutionException], () => produceMessage(brokers, topic, "third", deliveryTimeoutMs = 1000, requestTimeoutMs = 1000)) + val e = assertThrows(classOf[ExecutionException], () => produceMessage(servers, topic, "third", deliveryTimeoutMs = 1000, requestTimeoutMs = 1000)) assertEquals(classOf[TimeoutException], e.getCause.getClass) assertEquals(List.empty[String], consumeAllMessages(topic, 0)) @@ -365,26 +331,26 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { adminClient.close() // wait until new leader is (uncleanly) elected - awaitLeaderChange(brokers, topicPartition, expectedLeaderOpt = Some(followerId), timeout = 30000) - uncleanLeaderElectionsPerSec = uncleanLeaderElectionsPerSecGauge.count() - assert(uncleanLeaderElectionsPerSec == 1) + waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId, newLeaderOpt = Some(followerId)) + assertEquals(1, followerServer.kafkaController.controllerContext.stats.uncleanLeaderElectionRate.count()) - produceMessage(brokers, topic, "third") + produceMessage(servers, topic, "third") // second message was lost due to unclean election assertEquals(List("first", "third"), consumeAllMessages(topic, 2)) } + @nowarn("cat=deprecation") private def alterTopicConfigs(adminClient: Admin, topic: String, topicConfigs: Properties): AlterConfigsResult = { val configEntries = topicConfigs.asScala.map { case (k, v) => new ConfigEntry(k, v) }.toList.asJava - adminClient.incrementalAlterConfigs(Map(new ConfigResource(ConfigResource.Type.TOPIC, topic) -> - configEntries.asScala.map((e: ConfigEntry) => new AlterConfigOp(e, AlterConfigOp.OpType.SET)).toSeq - .asJavaCollection).asJava) + val newConfig = new Config(configEntries) + val configs = Map(new ConfigResource(ConfigResource.Type.TOPIC, topic) -> newConfig).asJava + adminClient.alterConfigs(configs) } private def createAdminClient(): Admin = { val config = new Properties - val bootstrapServers = TestUtils.plaintextBootstrapServers(brokers) + val bootstrapServers = TestUtils.plaintextBootstrapServers(servers) config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers) config.put(AdminClientConfig.METADATA_MAX_AGE_CONFIG, "10") Admin.create(config) diff --git a/core/src/test/scala/unit/kafka/log/LogCleanerManagerTest.scala b/core/src/test/scala/unit/kafka/log/LogCleanerManagerTest.scala index bdbcac462b8c2..f17c724066fe0 100644 --- a/core/src/test/scala/unit/kafka/log/LogCleanerManagerTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogCleanerManagerTest.scala @@ -108,8 +108,7 @@ class LogCleanerManagerTest extends Logging { val maxTransactionTimeoutMs = 5 * 60 * 1000 val producerIdExpirationCheckIntervalMs = TransactionLogConfigs.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT val segments = new LogSegments(tp) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - tpDir, topicPartition, logDirFailureChannel, config.recordVersion, "", None, time.scheduler) + val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache(tpDir, topicPartition, logDirFailureChannel, config.recordVersion, "") val producerStateManager = new ProducerStateManager(topicPartition, tpDir, maxTransactionTimeoutMs, producerStateManagerConfig, time) val offsets = new LogLoader( tpDir, diff --git a/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala b/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala index beaa5f4c3f0dd..99b1e35e4eed9 100644 --- a/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala @@ -93,7 +93,7 @@ class LogCleanerTest extends Logging { val mockMetricsGroup = mockMetricsGroupCtor.constructed.get(0) val numMetricsRegistered = LogCleaner.MetricNames.size verify(mockMetricsGroup, times(numMetricsRegistered)).newGauge(anyString(), any()) - + // verify that each metric in `LogCleaner` is removed LogCleaner.MetricNames.foreach(verify(mockMetricsGroup).removeMetric(_)) @@ -168,8 +168,7 @@ class LogCleanerTest extends Logging { val maxTransactionTimeoutMs = 5 * 60 * 1000 val producerIdExpirationCheckIntervalMs = TransactionLogConfigs.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT val logSegments = new LogSegments(topicPartition) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - dir, topicPartition, logDirFailureChannel, config.recordVersion, "", None, time.scheduler) + val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache(dir, topicPartition, logDirFailureChannel, config.recordVersion, "") val producerStateManager = new ProducerStateManager(topicPartition, dir, maxTransactionTimeoutMs, producerStateManagerConfig, time) val offsets = new LogLoader( diff --git a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala index bd1ee39a4835e..ed91c936edc10 100644 --- a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala @@ -22,7 +22,6 @@ import kafka.utils.TestUtils import org.apache.kafka.common.config.ConfigDef.Importance.MEDIUM import org.apache.kafka.common.config.ConfigDef.Type.INT import org.apache.kafka.common.config.{ConfigException, SslConfigs, TopicConfig} -import org.apache.kafka.server.common.MetadataVersion import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test @@ -298,7 +297,7 @@ class LogConfigTest { props.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, localRetentionMs.toString) props.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, localRetentionBytes.toString) assertThrows(classOf[ConfigException], - () => LogConfig.validate(props, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) + () => LogConfig.validate(props, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) } @Test @@ -310,17 +309,17 @@ class LogConfigTest { val logProps = new Properties() logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE) logProps.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, "delete,compact") assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, "compact,delete") assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) } @ParameterizedTest(name = "testEnableRemoteLogStorage with sysRemoteStorageEnabled: {0}") @@ -333,10 +332,10 @@ class LogConfigTest { val logProps = new Properties() logProps.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") if (sysRemoteStorageEnabled) { - LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) } else { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) assertTrue(message.getMessage.contains("Tiered Storage functionality is disabled in the broker")) } } @@ -356,10 +355,10 @@ class LogConfigTest { logProps.put(TopicConfig.RETENTION_MS_CONFIG, "500") if (sysRemoteStorageEnabled) { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) assertTrue(message.getMessage.contains(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG)) } else { - LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) } } @@ -378,10 +377,10 @@ class LogConfigTest { logProps.put(TopicConfig.RETENTION_BYTES_CONFIG, "128") if (sysRemoteStorageEnabled) { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) assertTrue(message.getMessage.contains(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG)) } else { - LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) } } @@ -396,10 +395,10 @@ class LogConfigTest { if (sysRemoteStorageEnabled) { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) + () => LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) assertTrue(message.getMessage.contains(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG)) } else { - LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) } } @@ -420,21 +419,4 @@ class LogConfigTest { assertEquals(oneDayInMillis, logProps.get(TopicConfig.MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG)) assertEquals(oneDayInMillis, logProps.get(TopicConfig.MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG)) } - - @Test - def testValidateWithMetadataVersionJbodSupport(): Unit = { - def validate(metadataVersion: MetadataVersion, jbodConfig: Boolean): Unit = - KafkaConfig.fromProps( - TestUtils.createBrokerConfig(nodeId = 0, zkConnect = null, logDirCount = if (jbodConfig) 2 else 1) - ).validateWithMetadataVersion(metadataVersion) - - validate(MetadataVersion.IBP_3_6_IV2, jbodConfig = false) - validate(MetadataVersion.IBP_3_7_IV0, jbodConfig = false) - validate(MetadataVersion.IBP_3_7_IV2, jbodConfig = false) - assertThrows(classOf[IllegalArgumentException], () => - validate(MetadataVersion.IBP_3_6_IV2, jbodConfig = true)) - assertThrows(classOf[IllegalArgumentException], () => - validate(MetadataVersion.IBP_3_7_IV0, jbodConfig = true)) - validate(MetadataVersion.IBP_3_7_IV2, jbodConfig = true) - } } diff --git a/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala b/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala index 5287ef67e8654..1a781a93ea667 100644 --- a/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala @@ -128,7 +128,7 @@ class LogLoaderTest { logDirFailureChannel = logDirFailureChannel, time = time, keepPartitionMetadataFile = config.usesTopicId, - remoteStorageSystemEnable = config.remoteLogManagerConfig.isRemoteStorageSystemEnabled(), + remoteStorageSystemEnable = config.remoteLogManagerConfig.enableRemoteStorageSystem(), initialTaskDelayMs = config.logInitialTaskDelayMs) { override def loadLog(logDir: File, hadCleanShutdown: Boolean, recoveryPoints: Map[TopicPartition, Long], @@ -154,8 +154,7 @@ class LogLoaderTest { val logStartOffset = logStartOffsets.getOrElse(topicPartition, 0L) val logDirFailureChannel: LogDirFailureChannel = new LogDirFailureChannel(1) val segments = new LogSegments(topicPartition) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, config.recordVersion, "", None, time.scheduler) + val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache(logDir, topicPartition, logDirFailureChannel, config.recordVersion, "") val producerStateManager = new ProducerStateManager(topicPartition, logDir, this.maxTransactionTimeoutMs, this.producerStateManagerConfig, time) val logLoader = new LogLoader(logDir, topicPartition, config, time.scheduler, time, @@ -368,8 +367,7 @@ class LogLoaderTest { super.add(wrapper) } } - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, logConfig.recordVersion, "", None, mockTime.scheduler) + val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache(logDir, topicPartition, logDirFailureChannel, logConfig.recordVersion, "") val producerStateManager = new ProducerStateManager(topicPartition, logDir, maxTransactionTimeoutMs, producerStateManagerConfig, mockTime) val logLoader = new LogLoader( @@ -433,8 +431,7 @@ class LogLoaderTest { val logDirFailureChannel: LogDirFailureChannel = new LogDirFailureChannel(1) val config = new LogConfig(new Properties()) val segments = new LogSegments(topicPartition) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, config.recordVersion, "", None, mockTime.scheduler) + val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache(logDir, topicPartition, logDirFailureChannel, config.recordVersion, "") val offsets = new LogLoader( logDir, topicPartition, @@ -543,8 +540,7 @@ class LogLoaderTest { val config = new LogConfig(logProps) val logDirFailureChannel = null val segments = new LogSegments(topicPartition) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, config.recordVersion, "", None, mockTime.scheduler) + val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache(logDir, topicPartition, logDirFailureChannel, config.recordVersion, "") val offsets = new LogLoader( logDir, topicPartition, @@ -598,8 +594,7 @@ class LogLoaderTest { val config = new LogConfig(logProps) val logDirFailureChannel = null val segments = new LogSegments(topicPartition) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, config.recordVersion, "", None, mockTime.scheduler) + val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache(logDir, topicPartition, logDirFailureChannel, config.recordVersion, "") val offsets = new LogLoader( logDir, topicPartition, @@ -652,8 +647,7 @@ class LogLoaderTest { val config = new LogConfig(logProps) val logDirFailureChannel = null val segments = new LogSegments(topicPartition) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, config.recordVersion, "", None, mockTime.scheduler) + val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache(logDir, topicPartition, logDirFailureChannel, config.recordVersion, "") val offsets = new LogLoader( logDir, topicPartition, @@ -1393,7 +1387,7 @@ class LogLoaderTest { assertEquals(java.util.Arrays.asList(new EpochEntry(1, 0), new EpochEntry(2, 1), new EpochEntry(3, 3)), leaderEpochCache.epochEntries) // deliberately remove some of the epoch entries - leaderEpochCache.truncateFromEndAsyncFlush(2) + leaderEpochCache.truncateFromEnd(2) assertNotEquals(java.util.Arrays.asList(new EpochEntry(1, 0), new EpochEntry(2, 1), new EpochEntry(3, 3)), leaderEpochCache.epochEntries) log.close() @@ -1795,8 +1789,7 @@ class LogLoaderTest { log.logSegments.forEach(segment => segments.add(segment)) assertEquals(5, segments.firstSegment.get.baseOffset) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, logConfig.recordVersion, "", None, mockTime.scheduler) + val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache(logDir, topicPartition, logDirFailureChannel, logConfig.recordVersion, "") val offsets = new LogLoader( logDir, topicPartition, diff --git a/core/src/test/scala/unit/kafka/log/LogManagerTest.scala b/core/src/test/scala/unit/kafka/log/LogManagerTest.scala index 6e92094007152..13ce4d28e9d3d 100755 --- a/core/src/test/scala/unit/kafka/log/LogManagerTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogManagerTest.scala @@ -1286,13 +1286,6 @@ class LogManagerTest { onDisk.foreach(log => assertEquals(expectedStrays.contains(log.topicPartition), LogManager.isStrayKraftReplica(0, image, log))) } - @Test - def testIsStrayKraftMissingTopicId(): Unit = { - val log = Mockito.mock(classOf[UnifiedLog]) - Mockito.when(log.topicId).thenReturn(Option.empty) - assertTrue(LogManager.isStrayKraftReplica(0, topicsImage(Seq()), log)) - } - @Test def testFindStrayReplicasInEmptyLAIR(): Unit = { val onDisk = Seq(foo0, foo1, bar0, bar1, baz0, baz1, baz2, quux0) diff --git a/core/src/test/scala/unit/kafka/log/LogSegmentTest.scala b/core/src/test/scala/unit/kafka/log/LogSegmentTest.scala index e2272941ab3d6..b559c192790c4 100644 --- a/core/src/test/scala/unit/kafka/log/LogSegmentTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogSegmentTest.scala @@ -24,8 +24,7 @@ import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.record._ import org.apache.kafka.common.utils.{MockTime, Time, Utils} import org.apache.kafka.coordinator.transaction.TransactionLogConfigs -import org.apache.kafka.server.util.MockScheduler -import org.apache.kafka.storage.internals.checkpoint.LeaderEpochCheckpointFile +import org.apache.kafka.storage.internals.checkpoint.LeaderEpochCheckpoint import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache import org.apache.kafka.storage.internals.log._ import org.junit.jupiter.api.Assertions._ @@ -34,6 +33,7 @@ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{CsvSource, ValueSource} import java.io.{File, RandomAccessFile} +import java.util import java.util.{Optional, OptionalLong} import scala.collection._ import scala.jdk.CollectionConverters._ @@ -431,9 +431,17 @@ class LogSegmentTest { def testRecoveryRebuildsEpochCache(): Unit = { val seg = createSegment(0) - val checkpoint: LeaderEpochCheckpointFile = new LeaderEpochCheckpointFile(TestUtils.tempFile(), new LogDirFailureChannel(1)) + val checkpoint: LeaderEpochCheckpoint = new LeaderEpochCheckpoint { + private var epochs = Seq.empty[EpochEntry] - val cache = new LeaderEpochFileCache(topicPartition, checkpoint, new MockScheduler(new MockTime())) + override def write(epochs: util.Collection[EpochEntry], ignored: Boolean): Unit = { + this.epochs = epochs.asScala.toSeq + } + + override def read(): java.util.List[EpochEntry] = this.epochs.asJava + } + + val cache = new LeaderEpochFileCache(topicPartition, checkpoint) seg.append(105L, RecordBatch.NO_TIMESTAMP, 104L, MemoryRecords.withRecords(104L, Compression.NONE, 0, new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes))) diff --git a/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala b/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala index e2355dfe119e2..7c9d0ac0b4084 100644 --- a/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala +++ b/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala @@ -34,7 +34,8 @@ import org.apache.kafka.common.metrics.JmxReporter import org.apache.kafka.common.utils.Time import org.apache.kafka.metadata.migration.ZkMigrationState import org.apache.kafka.server.config.ServerLogConfigs -import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics, LinuxIoMetricsCollector} +import org.apache.kafka.server.metrics.KafkaMetricsGroup +import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.junit.jupiter.api.Timeout import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource @@ -109,7 +110,7 @@ class MetricsTest extends KafkaServerTestHarness with Logging { def testLinuxIoMetrics(quorum: String): Unit = { // Check if linux-disk-{read,write}-bytes metrics either do or do not exist depending on whether we are or are not // able to collect those metrics on the platform where this test is running. - val usable = new LinuxIoMetricsCollector("/proc", Time.SYSTEM).usable() + val usable = new LinuxIoMetricsCollector("/proc", Time.SYSTEM, logger.underlying).usable() val expectedCount = if (usable) 1 else 0 val metrics = KafkaYammerMetrics.defaultRegistry.allMetrics Set("linux-disk-read-bytes", "linux-disk-write-bytes").foreach(name => diff --git a/core/src/test/scala/unit/kafka/raft/RaftManagerTest.scala b/core/src/test/scala/unit/kafka/raft/RaftManagerTest.scala index da9d29304e5d3..3416ffe65b690 100644 --- a/core/src/test/scala/unit/kafka/raft/RaftManagerTest.scala +++ b/core/src/test/scala/unit/kafka/raft/RaftManagerTest.scala @@ -118,7 +118,6 @@ class RaftManagerTest { new Metrics(Time.SYSTEM), Option.empty, CompletableFuture.completedFuture(QuorumConfig.parseVoterConnections(config.quorumVoters)), - QuorumConfig.parseBootstrapServers(config.quorumBootstrapServers), mock(classOf[FaultHandler]) ) } diff --git a/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala b/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala index d17872099be39..a7415b5d50a2e 100644 --- a/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala @@ -47,7 +47,7 @@ object ApiVersionsRequestTest { List(ClusterConfig.defaultBuilder() .setTypes(java.util.Collections.singleton(Type.ZK)) .setServerProperties(serverProperties) - .setMetadataVersion(MetadataVersion.IBP_4_0_IV0) + .setMetadataVersion(MetadataVersion.IBP_4_0_IVO) .build()).asJava } @@ -83,7 +83,7 @@ object ApiVersionsRequestTest { class ApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVersionsRequestTest(cluster) { @ClusterTemplate("testApiVersionsRequestTemplate") - @ClusterTest(types = Array(Type.KRAFT, Type.CO_KRAFT), metadataVersion = MetadataVersion.IBP_4_0_IV0, serverProperties = Array( + @ClusterTest(types = Array(Type.KRAFT, Type.CO_KRAFT), metadataVersion = MetadataVersion.IBP_4_0_IVO, serverProperties = Array( new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "false"), new ClusterConfigProperty(key = "unstable.feature.versions.enable", value = "true") )) diff --git a/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala b/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala index 9a988a35c4ba9..34f9d139a03cc 100644 --- a/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala @@ -28,22 +28,13 @@ import org.apache.kafka.metadata.BrokerState import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.server.config.{KRaftConfigs, ServerLogConfigs} import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, Test, Timeout} +import org.junit.jupiter.api.{Test, Timeout} import java.util.concurrent.{CompletableFuture, Future} import scala.jdk.CollectionConverters._ @Timeout(value = 12) class BrokerLifecycleManagerTest { - private var manager: BrokerLifecycleManager = null - - @AfterEach - def tearDown(): Unit = { - if (manager != null) { - manager.close() - } - } - def configProperties = { val properties = new Properties() properties.setProperty(ServerLogConfigs.LOG_DIRS_CONFIG, "/tmp/foo") @@ -59,14 +50,14 @@ class BrokerLifecycleManagerTest { @Test def testCreateAndClose(): Unit = { val context = new RegistrationTestContext(configProperties) - manager = new BrokerLifecycleManager(context.config, context.time, "create-and-close-", isZkBroker = false, Set(Uuid.fromString("oFoTeS9QT0aAyCyH41v45A"))) + val manager = new BrokerLifecycleManager(context.config, context.time, "create-and-close-", isZkBroker = false, Set(Uuid.fromString("oFoTeS9QT0aAyCyH41v45A"))) manager.close() } @Test def testCreateStartAndClose(): Unit = { val context = new RegistrationTestContext(configProperties) - manager = new BrokerLifecycleManager(context.config, context.time, "create-start-and-close-", isZkBroker = false, Set(Uuid.fromString("uiUADXZWTPixVvp6UWFWnw"))) + val manager = new BrokerLifecycleManager(context.config, context.time, "create-start-and-close-", isZkBroker = false, Set(Uuid.fromString("uiUADXZWTPixVvp6UWFWnw"))) assertEquals(BrokerState.NOT_RUNNING, manager.state) manager.start(() => context.highestMetadataOffset.get(), context.mockChannelManager, context.clusterId, context.advertisedListeners, @@ -81,7 +72,7 @@ class BrokerLifecycleManagerTest { @Test def testSuccessfulRegistration(): Unit = { val context = new RegistrationTestContext(configProperties) - manager = new BrokerLifecycleManager(context.config, context.time, "successful-registration-", isZkBroker = false, Set(Uuid.fromString("gCpDJgRlS2CBCpxoP2VMsQ"))) + val manager = new BrokerLifecycleManager(context.config, context.time, "successful-registration-", isZkBroker = false, Set(Uuid.fromString("gCpDJgRlS2CBCpxoP2VMsQ"))) val controllerNode = new Node(3000, "localhost", 8021) context.controllerNodeProvider.node.set(controllerNode) manager.start(() => context.highestMetadataOffset.get(), @@ -97,13 +88,14 @@ class BrokerLifecycleManagerTest { context.poll() assertEquals(1000L, manager.brokerEpoch) } + manager.close() } @Test def testRegistrationTimeout(): Unit = { val context = new RegistrationTestContext(configProperties) val controllerNode = new Node(3000, "localhost", 8021) - manager = new BrokerLifecycleManager(context.config, context.time, "registration-timeout-", isZkBroker = false, Set(Uuid.fromString("9XBOAtr4T0Wbx2sbiWh6xg"))) + val manager = new BrokerLifecycleManager(context.config, context.time, "registration-timeout-", isZkBroker = false, Set(Uuid.fromString("9XBOAtr4T0Wbx2sbiWh6xg"))) context.controllerNodeProvider.node.set(controllerNode) def newDuplicateRegistrationResponse(): Unit = { context.mockClient.prepareResponseFrom(new BrokerRegistrationResponse( @@ -138,12 +130,13 @@ class BrokerLifecycleManagerTest { assertTrue(manager.initialCatchUpFuture.isCompletedExceptionally) assertEquals(-1L, manager.brokerEpoch) } + manager.close() } @Test def testControlledShutdown(): Unit = { val context = new RegistrationTestContext(configProperties) - manager = new BrokerLifecycleManager(context.config, context.time, "controlled-shutdown-", isZkBroker = false, Set(Uuid.fromString("B4RtUz1ySGip3A7ZFYB2dg"))) + val manager = new BrokerLifecycleManager(context.config, context.time, "controlled-shutdown-", isZkBroker = false, Set(Uuid.fromString("B4RtUz1ySGip3A7ZFYB2dg"))) val controllerNode = new Node(3000, "localhost", 8021) context.controllerNodeProvider.node.set(controllerNode) context.mockClient.prepareResponseFrom(new BrokerRegistrationResponse( @@ -193,6 +186,7 @@ class BrokerLifecycleManagerTest { assertEquals(BrokerState.SHUTTING_DOWN, manager.state) } manager.controlledShutdownFuture.get() + manager.close() } def prepareResponse[T<:AbstractRequest](ctx: RegistrationTestContext, response: AbstractResponse): Future[T] = { @@ -224,7 +218,7 @@ class BrokerLifecycleManagerTest { @Test def testAlwaysSendsAccumulatedOfflineDirs(): Unit = { val ctx = new RegistrationTestContext(configProperties) - manager = new BrokerLifecycleManager(ctx.config, ctx.time, "offline-dirs-sent-in-heartbeat-", isZkBroker = false, Set(Uuid.fromString("0IbF1sjhSGG6FNvnrPbqQg"))) + val manager = new BrokerLifecycleManager(ctx.config, ctx.time, "offline-dirs-sent-in-heartbeat-", isZkBroker = false, Set(Uuid.fromString("0IbF1sjhSGG6FNvnrPbqQg"))) val controllerNode = new Node(3000, "localhost", 8021) ctx.controllerNodeProvider.node.set(controllerNode) @@ -244,13 +238,14 @@ class BrokerLifecycleManagerTest { assertEquals(Set("h3sC4Yk-Q9-fd0ntJTocCA", "ej8Q9_d2Ri6FXNiTxKFiow"), nextHeartbeatDirs()) manager.propagateDirectoryFailure(Uuid.fromString("1iF76HVNRPqC7Y4r6647eg"), Integer.MAX_VALUE) assertEquals(Set("h3sC4Yk-Q9-fd0ntJTocCA", "ej8Q9_d2Ri6FXNiTxKFiow", "1iF76HVNRPqC7Y4r6647eg"), nextHeartbeatDirs()) + manager.close() } @Test def testRegistrationIncludesDirs(): Unit = { val logDirs = Set("ad5FLIeCTnaQdai5vOjeng", "ybdzUKmYSLK6oiIpI6CPlw").map(Uuid.fromString) val ctx = new RegistrationTestContext(configProperties) - manager = new BrokerLifecycleManager(ctx.config, ctx.time, "registration-includes-dirs-", + val manager = new BrokerLifecycleManager(ctx.config, ctx.time, "registration-includes-dirs-", isZkBroker = false, logDirs) val controllerNode = new Node(3000, "localhost", 8021) ctx.controllerNodeProvider.node.set(controllerNode) @@ -263,13 +258,14 @@ class BrokerLifecycleManagerTest { val request = poll(ctx, manager, registration).asInstanceOf[BrokerRegistrationRequest] assertEquals(logDirs, request.data.logDirs().asScala.toSet) + + manager.close() } @Test def testKraftJBODMetadataVersionUpdateEvent(): Unit = { val ctx = new RegistrationTestContext(configProperties) - manager = new BrokerLifecycleManager(ctx.config, ctx.time, "jbod-metadata-version-update", isZkBroker = false, Set(Uuid.fromString("gCpDJgRlS2CBCpxoP2VMsQ"))) - + val manager = new BrokerLifecycleManager(ctx.config, ctx.time, "jbod-metadata-version-update", isZkBroker = false, Set(Uuid.fromString("gCpDJgRlS2CBCpxoP2VMsQ"))) val controllerNode = new Node(3000, "localhost", 8021) ctx.controllerNodeProvider.node.set(controllerNode) @@ -296,5 +292,7 @@ class BrokerLifecycleManagerTest { nextHeartbeatRequest() assertEquals(1200L, manager.brokerEpoch) + + manager.close() } } diff --git a/core/src/test/scala/unit/kafka/server/ConsumerGroupDescribeRequestsTest.scala b/core/src/test/scala/unit/kafka/server/ConsumerGroupDescribeRequestsTest.scala index 0e745f33d5bb3..b1f8b8405e743 100644 --- a/core/src/test/scala/unit/kafka/server/ConsumerGroupDescribeRequestsTest.scala +++ b/core/src/test/scala/unit/kafka/server/ConsumerGroupDescribeRequestsTest.scala @@ -16,8 +16,9 @@ */ package kafka.server +import kafka.server.GroupCoordinatorBaseRequestTest import kafka.test.ClusterInstance -import kafka.test.annotation._ +import kafka.test.annotation.{ClusterConfigProperty, ClusterFeature, ClusterTest, ClusterTestDefaults, Type} import kafka.test.junit.ClusterTestExtensions import kafka.utils.TestUtils import org.apache.kafka.common.ConsumerGroupState @@ -25,15 +26,11 @@ import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData.{Assign import org.apache.kafka.common.message.{ConsumerGroupDescribeRequestData, ConsumerGroupDescribeResponseData, ConsumerGroupHeartbeatResponseData} import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.requests.{ConsumerGroupDescribeRequest, ConsumerGroupDescribeResponse} -import org.apache.kafka.common.resource.ResourceType -import org.apache.kafka.common.utils.Utils -import org.apache.kafka.security.authorizer.AclEntry import org.apache.kafka.server.common.Features import org.junit.jupiter.api.Assertions.assertEquals import org.junit.jupiter.api.extension.ExtendWith import org.junit.jupiter.api.{Tag, Timeout} -import java.lang.{Byte => JByte} import scala.jdk.CollectionConverters._ @Timeout(120) @@ -119,9 +116,6 @@ class ConsumerGroupDescribeRequestsTest(cluster: ClusterInstance) extends GroupC val timeoutMs = 5 * 60 * 1000 val clientId = "client-id" val clientHost = "/127.0.0.1" - val authorizedOperationsInt = Utils.to32BitField( - AclEntry.supportedOperations(ResourceType.GROUP).asScala - .map(_.code.asInstanceOf[JByte]).asJava) // Add first group with one member. var grp1Member1Response: ConsumerGroupHeartbeatResponseData = null @@ -168,7 +162,6 @@ class ConsumerGroupDescribeRequestsTest(cluster: ClusterInstance) extends GroupC .setGroupEpoch(1) .setAssignmentEpoch(1) .setAssignorName("uniform") - .setAuthorizedOperations(authorizedOperationsInt) .setMembers(List( new ConsumerGroupDescribeResponseData.Member() .setMemberId(grp1Member1Response.memberId) @@ -184,7 +177,6 @@ class ConsumerGroupDescribeRequestsTest(cluster: ClusterInstance) extends GroupC .setGroupEpoch(grp2Member2Response.memberEpoch) .setAssignmentEpoch(grp2Member2Response.memberEpoch) .setAssignorName("range") - .setAuthorizedOperations(authorizedOperationsInt) .setMembers(List( new ConsumerGroupDescribeResponseData.Member() .setMemberId(grp2Member2Response.memberId) @@ -227,8 +219,7 @@ class ConsumerGroupDescribeRequestsTest(cluster: ClusterInstance) extends GroupC val actual = consumerGroupDescribe( groupIds = List("grp-1", "grp-2"), - includeAuthorizedOperations = true, - version = version.toShort, + version = version.toShort ) assertEquals(expected, actual) diff --git a/core/src/test/scala/unit/kafka/server/DynamicBrokerConfigTest.scala b/core/src/test/scala/unit/kafka/server/DynamicBrokerConfigTest.scala index 2e5d77cdc6fae..328d6e41b5722 100755 --- a/core/src/test/scala/unit/kafka/server/DynamicBrokerConfigTest.scala +++ b/core/src/test/scala/unit/kafka/server/DynamicBrokerConfigTest.scala @@ -792,39 +792,6 @@ class DynamicBrokerConfigTest { verifyIncorrectLogLocalRetentionProps(2000L, 1000L, -1, 100) } - @Test - def testDynamicRemoteFetchMaxWaitMsConfig(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) - val config = KafkaConfig(props) - val kafkaBroker = mock(classOf[KafkaBroker]) - when(kafkaBroker.config).thenReturn(config) - assertEquals(500, config.remoteFetchMaxWaitMs) - - val dynamicRemoteLogConfig = new DynamicRemoteLogConfig(kafkaBroker) - config.dynamicConfig.initialize(None, None) - config.dynamicConfig.addBrokerReconfigurable(dynamicRemoteLogConfig) - - val newProps = new Properties() - newProps.put(RemoteLogManagerConfig.REMOTE_FETCH_MAX_WAIT_MS_PROP, "30000") - // update default config - config.dynamicConfig.validate(newProps, perBrokerConfig = false) - config.dynamicConfig.updateDefaultConfig(newProps) - assertEquals(30000, config.remoteFetchMaxWaitMs) - - // update per broker config - newProps.put(RemoteLogManagerConfig.REMOTE_FETCH_MAX_WAIT_MS_PROP, "10000") - config.dynamicConfig.validate(newProps, perBrokerConfig = true) - config.dynamicConfig.updateBrokerConfig(0, newProps) - assertEquals(10000, config.remoteFetchMaxWaitMs) - - // invalid values - for (maxWaitMs <- Seq(-1, 0)) { - newProps.put(RemoteLogManagerConfig.REMOTE_FETCH_MAX_WAIT_MS_PROP, maxWaitMs.toString) - assertThrows(classOf[ConfigException], () => config.dynamicConfig.validate(newProps, perBrokerConfig = true)) - assertThrows(classOf[ConfigException], () => config.dynamicConfig.validate(newProps, perBrokerConfig = false)) - } - } - @Test def testUpdateDynamicRemoteLogManagerConfig(): Unit = { val origProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) diff --git a/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala b/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala index 0a4f50ba12094..2411e612d20f2 100644 --- a/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala @@ -223,7 +223,7 @@ class FetchRequestTest extends BaseFetchRequestTest { // Force a leader change killBroker(firstLeaderId) // Write some more data in epoch 1 - val secondLeaderId = TestUtils.awaitLeaderChange(brokers, topicPartition, oldLeaderOpt = Some(firstLeaderId)) + val secondLeaderId = TestUtils.awaitLeaderChange(brokers, topicPartition, firstLeaderId) val secondLeaderEpoch = TestUtils.findLeaderEpoch(secondLeaderId, topicPartition, brokers) val secondEpochResponses = produceData(Seq(topicPartition), 100) val secondEpochEndOffset = secondEpochResponses.lastOption.get.offset + 1 @@ -285,7 +285,7 @@ class FetchRequestTest extends BaseFetchRequestTest { killBroker(firstLeaderId) // Check leader error codes - val secondLeaderId = TestUtils.awaitLeaderChange(brokers, topicPartition, oldLeaderOpt = Some(firstLeaderId)) + val secondLeaderId = TestUtils.awaitLeaderChange(brokers, topicPartition, firstLeaderId) val secondLeaderEpoch = TestUtils.findLeaderEpoch(secondLeaderId, topicPartition, brokers) assertResponseErrorForEpoch(Errors.NONE, secondLeaderId, Optional.empty()) assertResponseErrorForEpoch(Errors.NONE, secondLeaderId, Optional.of(secondLeaderEpoch)) @@ -322,7 +322,7 @@ class FetchRequestTest extends BaseFetchRequestTest { // -1 is treated as having no epoch at all killBroker(firstLeaderId) - val secondLeaderId = TestUtils.awaitLeaderChange(brokers, topicPartition, oldLeaderOpt = Some(firstLeaderId)) + val secondLeaderId = TestUtils.awaitLeaderChange(brokers, topicPartition, firstLeaderId) val secondLeaderEpoch = TestUtils.findLeaderEpoch(secondLeaderId, topicPartition, brokers) verifyFetchSessionErrors(topicPartition, secondLeaderEpoch, secondLeaderId, version) diff --git a/core/src/test/scala/unit/kafka/server/GroupCoordinatorBaseRequestTest.scala b/core/src/test/scala/unit/kafka/server/GroupCoordinatorBaseRequestTest.scala index 9fad21476e73c..847bdf3225f54 100644 --- a/core/src/test/scala/unit/kafka/server/GroupCoordinatorBaseRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/GroupCoordinatorBaseRequestTest.scala @@ -421,13 +421,10 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { protected def consumerGroupDescribe( groupIds: List[String], - includeAuthorizedOperations: Boolean, version: Short = ApiKeys.CONSUMER_GROUP_DESCRIBE.latestVersion(isUnstableApiEnabled) ): List[ConsumerGroupDescribeResponseData.DescribedGroup] = { val consumerGroupDescribeRequest = new ConsumerGroupDescribeRequest.Builder( - new ConsumerGroupDescribeRequestData() - .setGroupIds(groupIds.asJava) - .setIncludeAuthorizedOperations(includeAuthorizedOperations) + new ConsumerGroupDescribeRequestData().setGroupIds(groupIds.asJava) ).build(version) val consumerGroupDescribeResponse = connectAndReceive[ConsumerGroupDescribeResponse](consumerGroupDescribeRequest) diff --git a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala index 208f9d59e1b6f..bd0ded29debf9 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala @@ -76,7 +76,6 @@ import org.apache.kafka.common.utils.{ProducerIdAndEpoch, SecurityUtils, Utils} import org.apache.kafka.coordinator.group.{GroupCoordinator, GroupCoordinatorConfig} import org.apache.kafka.coordinator.transaction.TransactionLogConfigs import org.apache.kafka.raft.QuorumConfig -import org.apache.kafka.security.authorizer.AclEntry import org.apache.kafka.server.ClientMetricsManager import org.apache.kafka.server.authorizer.{Action, AuthorizationResult, Authorizer} import org.apache.kafka.server.common.MetadataVersion.{IBP_0_10_2_IV0, IBP_2_2_IV1} @@ -93,7 +92,6 @@ import org.mockito.ArgumentMatchers._ import org.mockito.Mockito._ import org.mockito.{ArgumentCaptor, ArgumentMatchers, Mockito} -import java.lang.{Byte => JByte} import java.net.InetAddress import java.nio.charset.StandardCharsets import java.time.Duration @@ -2827,6 +2825,7 @@ class KafkaApisTest extends Logging { @Test def requiredAclsNotPresentWriteTxnMarkersThrowsAuthorizationException(): Unit = { + // Here we need to use AuthHelperTest.matchSameElements instead of EasyMock.eq since the order of the request is unknown val topicPartition = new TopicPartition("t", 0) val (_, request) = createWriteTxnMarkersRequest(asList(topicPartition)) @@ -4099,6 +4098,7 @@ class KafkaApisTest extends Logging { new Action(AclOperation.DESCRIBE, new ResourcePattern(ResourceType.TOPIC, authorizedTopic, PatternType.LITERAL), 1, true, true) ) + // Here we need to use AuthHelperTest.matchSameElements instead of EasyMock.eq since the order of the request is unknown when(authorizer.authorize(any[RequestContext], argThat((t: java.util.List[Action]) => t.containsAll(expectedActions.asJava)))) .thenAnswer { invocation => val actions = invocation.getArgument(1).asInstanceOf[util.List[Action]].asScala @@ -7115,9 +7115,8 @@ class KafkaApisTest extends Logging { assertEquals(Errors.GROUP_AUTHORIZATION_FAILED.code, response.data.errorCode) } - @ParameterizedTest - @ValueSource(booleans = Array(true, false)) - def testConsumerGroupDescribe(includeAuthorizedOperations: Boolean): Unit = { + @Test + def testConsumerGroupDescribe(): Unit = { metadataCache = mock(classOf[KRaftMetadataCache]) when(metadataCache.features()).thenReturn { new FinalizedFeatures( @@ -7130,7 +7129,6 @@ class KafkaApisTest extends Logging { val groupIds = List("group-id-0", "group-id-1", "group-id-2").asJava val consumerGroupDescribeRequestData = new ConsumerGroupDescribeRequestData() - .setIncludeAuthorizedOperations(includeAuthorizedOperations) consumerGroupDescribeRequestData.groupIds.addAll(groupIds) val requestChannelRequest = buildRequest(new ConsumerGroupDescribeRequest.Builder(consumerGroupDescribeRequestData, true).build()) @@ -7145,27 +7143,15 @@ class KafkaApisTest extends Logging { ) kafkaApis.handle(requestChannelRequest, RequestLocal.NoCaching) - future.complete(List( - new DescribedGroup().setGroupId(groupIds.get(0)), - new DescribedGroup().setGroupId(groupIds.get(1)), - new DescribedGroup().setGroupId(groupIds.get(2)) - ).asJava) - - var authorizedOperationsInt = Int.MinValue; - if (includeAuthorizedOperations) { - authorizedOperationsInt = Utils.to32BitField( - AclEntry.supportedOperations(ResourceType.GROUP).asScala - .map(_.code.asInstanceOf[JByte]).asJava) - } - - // Can't reuse the above list here because we would not test the implementation in KafkaApis then val describedGroups = List( new DescribedGroup().setGroupId(groupIds.get(0)), new DescribedGroup().setGroupId(groupIds.get(1)), new DescribedGroup().setGroupId(groupIds.get(2)) - ).map(group => group.setAuthorizedOperations(authorizedOperationsInt)) + ).asJava + + future.complete(describedGroups) val expectedConsumerGroupDescribeResponseData = new ConsumerGroupDescribeResponseData() - .setGroups(describedGroups.asJava) + .setGroups(describedGroups) val response = verifyNoThrottling[ConsumerGroupDescribeResponse](requestChannelRequest) diff --git a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala index 506925c3339cb..b917b3c3a2b64 100755 --- a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala @@ -19,7 +19,7 @@ package kafka.server import java.net.InetSocketAddress import java.util -import java.util.{Arrays, Collections, Properties} +import java.util.{Collections, Properties} import kafka.cluster.EndPoint import kafka.security.authorizer.AclAuthorizer import kafka.utils.TestUtils.assertBadConfigContainingMessage @@ -1032,7 +1032,6 @@ class KafkaConfigTest { // Raft Quorum Configs case QuorumConfig.QUORUM_VOTERS_CONFIG => // ignore string - case QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG => // ignore string case QuorumConfig.QUORUM_ELECTION_TIMEOUT_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case QuorumConfig.QUORUM_FETCH_TIMEOUT_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case QuorumConfig.QUORUM_ELECTION_BACKOFF_MAX_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") @@ -1419,23 +1418,6 @@ class KafkaConfigTest { assertEquals(expectedVoters, addresses) } - @Test - def testParseQuorumBootstrapServers(): Unit = { - val expected = Arrays.asList( - InetSocketAddress.createUnresolved("kafka1", 9092), - InetSocketAddress.createUnresolved("kafka2", 9092) - ) - - val props = TestUtils.createBrokerConfig(0, null) - props.setProperty(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "kafka1:9092,kafka2:9092") - - val addresses = QuorumConfig.parseBootstrapServers( - KafkaConfig.fromProps(props).quorumBootstrapServers - ) - - assertEquals(expected, addresses) - } - @Test def testAcceptsLargeNodeIdForRaftBasedCase(): Unit = { // Generation of Broker IDs is not supported when using Raft-based controller quorums, diff --git a/core/src/test/scala/unit/kafka/server/ListOffsetsRequestTest.scala b/core/src/test/scala/unit/kafka/server/ListOffsetsRequestTest.scala index 358005cb6098e..48397a4b71d7a 100644 --- a/core/src/test/scala/unit/kafka/server/ListOffsetsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ListOffsetsRequestTest.scala @@ -125,7 +125,7 @@ class ListOffsetsRequestTest extends BaseRequestTest { killBroker(firstLeaderId) // Check leader error codes - val secondLeaderId = TestUtils.awaitLeaderChange(brokers, topicPartition, oldLeaderOpt = Some(firstLeaderId)) + val secondLeaderId = TestUtils.awaitLeaderChange(brokers, topicPartition, firstLeaderId) val secondLeaderEpoch = TestUtils.findLeaderEpoch(secondLeaderId, topicPartition, brokers) assertResponseErrorForEpoch(Errors.NONE, secondLeaderId, Optional.empty()) assertResponseErrorForEpoch(Errors.NONE, secondLeaderId, Optional.of(secondLeaderEpoch)) @@ -198,7 +198,7 @@ class ListOffsetsRequestTest extends BaseRequestTest { // Kill the first leader so that we can verify the epoch change when fetching the latest offset killBroker(firstLeaderId) - val secondLeaderId = TestUtils.awaitLeaderChange(brokers, partition, oldLeaderOpt = Some(firstLeaderId)) + val secondLeaderId = TestUtils.awaitLeaderChange(brokers, partition, firstLeaderId) // make sure high watermark of new leader has caught up TestUtils.waitUntilTrue(() => sendRequest(secondLeaderId, ListOffsetsRequest.LATEST_TIMESTAMP, -1).errorCode != Errors.OFFSET_NOT_AVAILABLE.code, "the second leader does not sync to follower") diff --git a/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala b/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala index 21dba44f9f718..a3bdb9a172355 100755 --- a/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala +++ b/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala @@ -138,7 +138,7 @@ class LogRecoveryTest extends QuorumTestHarness { assertEquals(hw, hwFile1.read().getOrElse(topicPartition, 0L)) // check if leader moves to the other server - leader = awaitLeaderChange(servers, topicPartition, oldLeaderOpt = Some(leader)) + leader = awaitLeaderChange(servers, topicPartition, leader) assertEquals(1, leader, "Leader must move to broker 1") // bring the preferred replica back @@ -166,7 +166,7 @@ class LogRecoveryTest extends QuorumTestHarness { server2.startup() updateProducer() - leader = awaitLeaderChange(servers, topicPartition, oldLeaderOpt = Some(leader)) + leader = awaitLeaderChange(servers, topicPartition, leader) assertTrue(leader == 0 || leader == 1, "Leader must remain on broker 0, in case of ZooKeeper session expiration it can move to broker 1") @@ -221,7 +221,7 @@ class LogRecoveryTest extends QuorumTestHarness { server2.startup() updateProducer() // check if leader moves to the other server - leader = awaitLeaderChange(servers, topicPartition, oldLeaderOpt = Some(leader)) + leader = awaitLeaderChange(servers, topicPartition, leader) assertEquals(1, leader, "Leader must move to broker 1") assertEquals(hw, hwFile1.read().getOrElse(topicPartition, 0L)) diff --git a/core/src/test/scala/unit/kafka/server/OffsetCommitRequestTest.scala b/core/src/test/scala/unit/kafka/server/OffsetCommitRequestTest.scala index afc934ac0bb68..696e6534bf047 100644 --- a/core/src/test/scala/unit/kafka/server/OffsetCommitRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/OffsetCommitRequestTest.scala @@ -96,7 +96,7 @@ class OffsetCommitRequestTest(cluster: ClusterInstance) extends GroupCoordinator topic = "foo", partition = 0, offset = 100L, - expectedError = if (useNewProtocol && version < 9) Errors.UNSUPPORTED_VERSION else Errors.NONE, + expectedError = Errors.NONE, version = version.toShort ) diff --git a/core/src/test/scala/unit/kafka/server/OffsetsForLeaderEpochRequestTest.scala b/core/src/test/scala/unit/kafka/server/OffsetsForLeaderEpochRequestTest.scala index 29e1b70a05d09..78c10ec5ec4fd 100644 --- a/core/src/test/scala/unit/kafka/server/OffsetsForLeaderEpochRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/OffsetsForLeaderEpochRequestTest.scala @@ -79,7 +79,7 @@ class OffsetsForLeaderEpochRequestTest extends BaseRequestTest { killBroker(firstLeaderId) // Check leader error codes - val secondLeaderId = TestUtils.awaitLeaderChange(brokers, topicPartition, oldLeaderOpt = Some(firstLeaderId)) + val secondLeaderId = TestUtils.awaitLeaderChange(brokers, topicPartition, firstLeaderId) val secondLeaderEpoch = TestUtils.findLeaderEpoch(secondLeaderId, topicPartition, brokers) assertResponseErrorForEpoch(Errors.NONE, secondLeaderId, Optional.empty()) assertResponseErrorForEpoch(Errors.NONE, secondLeaderId, Optional.of(secondLeaderEpoch)) diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala index e44a6efa5e19e..acca3bf8a9ed6 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala @@ -32,7 +32,7 @@ import kafka.zk.KafkaZkClient import org.apache.kafka.clients.FetchSessionHandler import org.apache.kafka.common.{DirectoryId, IsolationLevel, Node, TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.common.compress.Compression -import org.apache.kafka.common.config.{TopicConfig} +import org.apache.kafka.common.config.{AbstractConfig, TopicConfig} import org.apache.kafka.common.errors.{InvalidPidMappingException, KafkaStorageException} import org.apache.kafka.common.message.LeaderAndIsrRequestData import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState @@ -2911,8 +2911,7 @@ class ReplicaManagerTest { val maxTransactionTimeoutMs = 30000 val maxProducerIdExpirationMs = 30000 val segments = new LogSegments(tp) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - logDir, tp, mockLogDirFailureChannel, logConfig.recordVersion, "", None, time.scheduler) + val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache(logDir, tp, mockLogDirFailureChannel, logConfig.recordVersion, "") val producerStateManager = new ProducerStateManager(tp, logDir, maxTransactionTimeoutMs, new ProducerStateManagerConfig(maxProducerIdExpirationMs, true), time) val offsets = new LogLoader( @@ -3390,7 +3389,7 @@ class ReplicaManagerTest { when(mockLog.remoteLogEnabled()).thenReturn(enableRemoteStorage) when(mockLog.remoteStorageSystemEnable).thenReturn(enableRemoteStorage) val aliveBrokers = aliveBrokerIds.map(brokerId => new Node(brokerId, s"host$brokerId", brokerId)) - brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).remoteLogManagerConfig.isRemoteStorageSystemEnabled) + brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).remoteLogManagerConfig.enableRemoteStorageSystem) val metadataCache: MetadataCache = mock(classOf[MetadataCache]) when(metadataCache.topicIdInfo()).thenReturn((topicIds.asJava, topicNames.asJava)) @@ -4093,9 +4092,10 @@ class ReplicaManagerTest { props.put(RemoteLogManagerConfig.REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_PROP, classOf[NoOpRemoteLogMetadataManager].getName) // set log reader threads number to 2 props.put(RemoteLogManagerConfig.REMOTE_LOG_READER_THREADS_PROP, 2.toString) - val remoteLogManagerConfig = new RemoteLogManagerConfig(props) + val config = new AbstractConfig(RemoteLogManagerConfig.CONFIG_DEF, props) + val remoteLogManagerConfig = new RemoteLogManagerConfig(config) val mockLog = mock(classOf[UnifiedLog]) - val brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + val brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).isRemoteLogStorageSystemEnabled) val remoteLogManager = new RemoteLogManager( remoteLogManagerConfig, 0, @@ -4199,9 +4199,10 @@ class ReplicaManagerTest { props.put(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, true.toString) props.put(RemoteLogManagerConfig.REMOTE_STORAGE_MANAGER_CLASS_NAME_PROP, classOf[NoOpRemoteStorageManager].getName) props.put(RemoteLogManagerConfig.REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_PROP, classOf[NoOpRemoteLogMetadataManager].getName) - val remoteLogManagerConfig = new RemoteLogManagerConfig(props) + val config = new AbstractConfig(RemoteLogManagerConfig.CONFIG_DEF, props) + val remoteLogManagerConfig = new RemoteLogManagerConfig(config) val dummyLog = mock(classOf[UnifiedLog]) - val brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + val brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).isRemoteLogStorageSystemEnabled) val remoteLogManager = new RemoteLogManager( remoteLogManagerConfig, 0, @@ -5977,7 +5978,6 @@ class ReplicaManagerTest { .setPartitionId(0) .setTopicId(FOO_UUID) .setReplicas(util.Arrays.asList(localId, localId + 1, localId + 2)) - .setDirectories(util.Arrays.asList(Uuid.fromString("fKgQ2axkQiuzt4ANqKbPkQ"), DirectoryId.UNASSIGNED, DirectoryId.UNASSIGNED)) .setIsr(util.Arrays.asList(localId, localId + 1)) ) followerMetadataImage = imageFromTopics(followerTopicsDelta.apply()) @@ -6458,39 +6458,6 @@ class ReplicaManagerTest { assertEquals(Errors.NONE.code, response.errorCode) assertTrue(response.totalBytes > 0) assertTrue(response.usableBytes >= 0) - assertFalse(response.topics().isEmpty) - response.topics().forEach(t => assertFalse(t.partitions().isEmpty)) - } - } finally { - replicaManager.shutdown(checkpointHW = false) - } - } - - @Test - def testDescribeLogDirsWithoutAnyPartitionTopic(): Unit = { - val noneTopic = "none-topic" - val topicPartition = 0 - val topicId = Uuid.randomUuid() - val followerBrokerId = 0 - val leaderBrokerId = 1 - val leaderEpoch = 1 - val leaderEpochIncrement = 2 - val countDownLatch = new CountDownLatch(1) - val offsetFromLeader = 5 - - // Prepare the mocked components for the test - val (replicaManager, mockLogMgr) = prepareReplicaManagerAndLogManager(new MockTimer(time), - topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId, leaderBrokerId, countDownLatch, - expectTruncation = false, localLogOffset = Some(10), offsetFromLeader = offsetFromLeader, topicId = Some(topicId)) - - try { - val responses = replicaManager.describeLogDirs(Set(new TopicPartition(noneTopic, topicPartition))) - assertEquals(mockLogMgr.liveLogDirs.size, responses.size) - responses.foreach { response => - assertEquals(Errors.NONE.code, response.errorCode) - assertTrue(response.totalBytes > 0) - assertTrue(response.usableBytes >= 0) - assertTrue(response.topics().isEmpty) } } finally { replicaManager.shutdown(checkpointHW = false) @@ -6530,7 +6497,7 @@ class ReplicaManagerTest { partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) val leaderAndIsr = LeaderAndIsr(0, 1, List(0, 1), LeaderRecoveryState.RECOVERED, LeaderAndIsr.InitialPartitionEpoch) - val becomeLeaderRequest = makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), leaderAndIsr) + val becomeLeaderRequest = makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), leaderAndIsr) replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ()) verifyRLMOnLeadershipChange(Collections.singleton(partition), Collections.emptySet()) @@ -6710,79 +6677,6 @@ class ReplicaManagerTest { )) } } - - @Test - def testRemoteReadQuotaExceeded(): Unit = { - when(mockRemoteLogManager.isRemoteLogFetchQuotaExceeded).thenReturn(true) - - val tp0 = new TopicPartition(topic, 0) - val tpId0 = new TopicIdPartition(topicId, tp0) - val fetch: Seq[(TopicIdPartition, LogReadResult)] = readFromLogWithOffsetOutOfRange(tp0) - - assertEquals(1, fetch.size) - assertEquals(tpId0, fetch.head._1) - val fetchInfo = fetch.head._2.info - assertEquals(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, fetchInfo.fetchOffsetMetadata) - assertFalse(fetchInfo.records.records().iterator().hasNext) - assertFalse(fetchInfo.firstEntryIncomplete) - assertFalse(fetchInfo.abortedTransactions.isPresent) - assertFalse(fetchInfo.delayedRemoteStorageFetch.isPresent) - } - - @Test - def testRemoteReadQuotaNotExceeded(): Unit = { - when(mockRemoteLogManager.isRemoteLogFetchQuotaExceeded).thenReturn(false) - - val tp0 = new TopicPartition(topic, 0) - val tpId0 = new TopicIdPartition(topicId, tp0) - val fetch: Seq[(TopicIdPartition, LogReadResult)] = readFromLogWithOffsetOutOfRange(tp0) - - assertEquals(1, fetch.size) - assertEquals(tpId0, fetch.head._1) - val fetchInfo = fetch.head._2.info - assertEquals(1L, fetchInfo.fetchOffsetMetadata.messageOffset) - assertEquals(UnifiedLog.UnknownOffset, fetchInfo.fetchOffsetMetadata.segmentBaseOffset) - assertEquals(-1, fetchInfo.fetchOffsetMetadata.relativePositionInSegment) - assertEquals(MemoryRecords.EMPTY, fetchInfo.records) - assertTrue(fetchInfo.delayedRemoteStorageFetch.isPresent) - } - - private def readFromLogWithOffsetOutOfRange(tp: TopicPartition): Seq[(TopicIdPartition, LogReadResult)] = { - val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2), enableRemoteStorage = true, shouldMockLog = true) - try { - val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints) - replicaManager.createPartition(tp).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints = offsetCheckpoints, None) - val partition0Replicas = Seq[Integer](0, 1).asJava - val topicIds = Map(tp.topic -> topicId).asJava - val leaderEpoch = 0 - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, 0, 0, brokerEpoch, - Seq( - new LeaderAndIsrPartitionState() - .setTopicName(tp.topic) - .setPartitionIndex(tp.partition) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(0) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true) - ).asJava, - topicIds, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) - - val params = new FetchParams(ApiKeys.FETCH.latestVersion, -1, 1, 1000, 0, 100, FetchIsolation.HIGH_WATERMARK, None.asJava) - replicaManager.readFromLog( - params, - Seq(new TopicIdPartition(topicId, 0, topic) -> new PartitionData(topicId, 1, 0, 100000, Optional.of[Integer](leaderEpoch), Optional.of(leaderEpoch))), - UnboundedQuota, - readFromPurgatory = false) - } finally { - replicaManager.shutdown(checkpointHW = false) - } - } - // Some threads are closed, but the state didn't reflect in the JVM immediately, so add some wait time for it private def assertNoNonDaemonThreadsWithWaiting(threadNamePrefix: String, waitTimeMs: Long = 500L): Unit = { var nonDemonThreads: mutable.Set[Thread] = mutable.Set.empty[Thread] diff --git a/core/src/test/scala/unit/kafka/server/ReplicationQuotasTest.scala b/core/src/test/scala/unit/kafka/server/ReplicationQuotasTest.scala index 1f947dd8fae4b..03623bab41f54 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicationQuotasTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicationQuotasTest.scala @@ -299,7 +299,7 @@ class ReplicationQuotasTest extends QuorumTestHarness { features.add(new BrokerRegistrationRequestData.Feature() .setName(MetadataVersion.FEATURE_NAME) .setMinSupportedVersion(MetadataVersion.IBP_3_0_IV1.featureLevel()) - .setMaxSupportedVersion(MetadataVersion.IBP_4_0_IV0.featureLevel())) + .setMaxSupportedVersion(MetadataVersion.IBP_4_0_IVO.featureLevel())) controllerServer.controller.registerBroker( ControllerRequestContextUtil.ANONYMOUS_CONTEXT, new BrokerRegistrationRequestData() diff --git a/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala b/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala index cdceafcac7ba0..456d075f91655 100644 --- a/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala +++ b/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala @@ -704,10 +704,10 @@ class RequestQuotaTest extends BaseRequestTest { new ConsumerGroupDescribeRequest.Builder(new ConsumerGroupDescribeRequestData(), true) case ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS => - new GetTelemetrySubscriptionsRequest.Builder(new GetTelemetrySubscriptionsRequestData()) + new GetTelemetrySubscriptionsRequest.Builder(new GetTelemetrySubscriptionsRequestData(), true) case ApiKeys.PUSH_TELEMETRY => - new PushTelemetryRequest.Builder(new PushTelemetryRequestData()) + new PushTelemetryRequest.Builder(new PushTelemetryRequestData(), true) case ApiKeys.ASSIGN_REPLICAS_TO_DIRS => new AssignReplicasToDirsRequest.Builder(new AssignReplicasToDirsRequestData()) @@ -718,27 +718,6 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.DESCRIBE_TOPIC_PARTITIONS => new DescribeTopicPartitionsRequest.Builder(new DescribeTopicPartitionsRequestData()) - case ApiKeys.SHARE_GROUP_HEARTBEAT => - new ShareGroupHeartbeatRequest.Builder(new ShareGroupHeartbeatRequestData(), true) - - case ApiKeys.SHARE_GROUP_DESCRIBE => - new ShareGroupDescribeRequest.Builder(new ShareGroupDescribeRequestData(), true) - - case ApiKeys.SHARE_FETCH => - new ShareFetchRequest.Builder(new ShareFetchRequestData(), true) - - case ApiKeys.SHARE_ACKNOWLEDGE => - new ShareAcknowledgeRequest.Builder(new ShareAcknowledgeRequestData(), true) - - case ApiKeys.ADD_RAFT_VOTER => - new AddRaftVoterRequest.Builder(new AddRaftVoterRequestData()) - - case ApiKeys.REMOVE_RAFT_VOTER => - new RemoveRaftVoterRequest.Builder(new RemoveRaftVoterRequestData()) - - case ApiKeys.UPDATE_RAFT_VOTER => - new UpdateRaftVoterRequest.Builder(new UpdateRaftVoterRequestData()) - case _ => throw new IllegalArgumentException("Unsupported API key " + apiKey) } diff --git a/core/src/test/scala/unit/kafka/server/checkpoints/InMemoryLeaderEpochCheckpointTest.scala b/core/src/test/scala/unit/kafka/server/checkpoints/InMemoryLeaderEpochCheckpointTest.scala new file mode 100644 index 0000000000000..3af126f5c5529 --- /dev/null +++ b/core/src/test/scala/unit/kafka/server/checkpoints/InMemoryLeaderEpochCheckpointTest.scala @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.server.checkpoints + +import org.apache.kafka.storage.internals.checkpoint.InMemoryLeaderEpochCheckpoint +import org.apache.kafka.storage.internals.log.EpochEntry +import org.junit.jupiter.api.Assertions.assertEquals +import org.junit.jupiter.api.Test + +import java.io.{BufferedReader, ByteArrayInputStream, InputStreamReader} +import java.nio.charset.StandardCharsets + +class InMemoryLeaderEpochCheckpointTest { + + @Test + def shouldAppendNewEntry(): Unit = { + val checkpoint = new InMemoryLeaderEpochCheckpoint() + val epochs = java.util.Arrays.asList(new EpochEntry(0, 1L), new EpochEntry(1, 2L), new EpochEntry(2, 3L)) + checkpoint.write(epochs) + assertEquals(epochs, checkpoint.read()) + + val epochs2 = java.util.Arrays.asList(new EpochEntry(3, 4L), new EpochEntry(4, 5L)) + checkpoint.write(epochs2) + + assertEquals(epochs2, checkpoint.read()) + } + + @Test + def testReadAsByteBuffer(): Unit = { + val checkpoint = new InMemoryLeaderEpochCheckpoint() + val expectedEpoch = 0 + val expectedStartOffset = 1L + val expectedVersion = 0 + val epochs = java.util.Arrays.asList(new EpochEntry(expectedEpoch, expectedStartOffset)) + checkpoint.write(epochs) + assertEquals(epochs, checkpoint.read()) + val buffer = checkpoint.readAsByteBuffer() + + val bufferedReader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(buffer.array()), StandardCharsets.UTF_8)) + assertEquals(expectedVersion.toString, bufferedReader.readLine()) + assertEquals(epochs.size().toString, bufferedReader.readLine()) + assertEquals(s"$expectedEpoch $expectedStartOffset", bufferedReader.readLine()) + } +} diff --git a/core/src/test/scala/unit/kafka/server/checkpoints/OffsetCheckpointFileWithFailureHandlerTest.scala b/core/src/test/scala/unit/kafka/server/checkpoints/OffsetCheckpointFileWithFailureHandlerTest.scala index 7808cedb075ee..a7e370d7f4091 100644 --- a/core/src/test/scala/unit/kafka/server/checkpoints/OffsetCheckpointFileWithFailureHandlerTest.scala +++ b/core/src/test/scala/unit/kafka/server/checkpoints/OffsetCheckpointFileWithFailureHandlerTest.scala @@ -19,13 +19,12 @@ package kafka.server.checkpoints import kafka.utils.{Logging, TestUtils} import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.KafkaStorageException -import org.apache.kafka.storage.internals.checkpoint.{CheckpointFileWithFailureHandler, LeaderEpochCheckpointFile} -import org.apache.kafka.storage.internals.log.{EpochEntry, LogDirFailureChannel} +import org.apache.kafka.storage.internals.checkpoint.CheckpointFileWithFailureHandler +import org.apache.kafka.storage.internals.log.LogDirFailureChannel import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test import org.mockito.Mockito -import java.io.File import java.util.Collections import scala.collection.Map @@ -98,7 +97,7 @@ class OffsetCheckpointFileWithFailureHandlerTest extends Logging { val logDirFailureChannel = new LogDirFailureChannel(10) val checkpointFile = new CheckpointFileWithFailureHandler(file, OffsetCheckpointFile.CurrentVersion + 1, OffsetCheckpointFile.Formatter, logDirFailureChannel, file.getParent) - checkpointFile.write(Collections.singletonList(new TopicPartition("foo", 5) -> 10L)) + checkpointFile.write(Collections.singletonList(new TopicPartition("foo", 5) -> 10L), true) assertThrows(classOf[KafkaStorageException], () => new OffsetCheckpointFile(checkpointFile.file, logDirFailureChannel).read()) } @@ -134,15 +133,4 @@ class OffsetCheckpointFileWithFailureHandlerTest extends Logging { assertThrows(classOf[IllegalArgumentException], () => lazyCheckpoints.fetch("/invalid/kafka-logs", new TopicPartition("foo", 0))) } - @Test - def testWriteIfDirExistsShouldNotThrowWhenDirNotExists(): Unit = { - val dir = TestUtils.tempDir() - val file = dir.toPath.resolve("test-checkpoint").toFile - val logDirFailureChannel = new LogDirFailureChannel(10) - val checkpointFile = new CheckpointFileWithFailureHandler(file, 0, - LeaderEpochCheckpointFile.FORMATTER, logDirFailureChannel, file.getParent) - - dir.renameTo(new File(dir.getAbsolutePath + "-renamed")) - checkpointFile.writeIfDirExists(Collections.singletonList(new EpochEntry(1, 42))) - } } diff --git a/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochFileCacheTest.scala b/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochFileCacheTest.scala index 6f6d0bdbda58d..05041f39709f7 100644 --- a/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochFileCacheTest.scala +++ b/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochFileCacheTest.scala @@ -20,15 +20,15 @@ package kafka.server.epoch import kafka.utils.TestUtils import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET} -import org.apache.kafka.server.util.MockTime -import org.apache.kafka.storage.internals.checkpoint.LeaderEpochCheckpointFile +import org.apache.kafka.storage.internals.checkpoint.{LeaderEpochCheckpoint, LeaderEpochCheckpointFile} import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache import org.apache.kafka.storage.internals.log.{EpochEntry, LogDirFailureChannel} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test import java.io.File -import java.util.{Collections, Optional, OptionalInt} +import java.util.{Collections, OptionalInt, Optional} +import scala.collection.Seq import scala.jdk.CollectionConverters._ /** @@ -36,10 +36,13 @@ import scala.jdk.CollectionConverters._ */ class LeaderEpochFileCacheTest { val tp = new TopicPartition("TestTopic", 5) - val mockTime = new MockTime() - private val checkpoint: LeaderEpochCheckpointFile = new LeaderEpochCheckpointFile(TestUtils.tempFile(), new LogDirFailureChannel(1)) + private val checkpoint: LeaderEpochCheckpoint = new LeaderEpochCheckpoint { + private var epochs: Seq[EpochEntry] = Seq() + override def write(epochs: java.util.Collection[EpochEntry], ignored: Boolean): Unit = this.epochs = epochs.asScala.toSeq + override def read(): java.util.List[EpochEntry] = this.epochs.asJava + } - private val cache = new LeaderEpochFileCache(tp, checkpoint, mockTime.scheduler) + private val cache = new LeaderEpochFileCache(tp, checkpoint) @Test def testPreviousEpoch(): Unit = { @@ -54,7 +57,7 @@ class LeaderEpochFileCacheTest { cache.assign(10, 20) assertEquals(OptionalInt.of(4), cache.previousEpoch) - cache.truncateFromEndAsyncFlush(18) + cache.truncateFromEnd(18) assertEquals(OptionalInt.of(2), cache.previousEpoch) } @@ -242,12 +245,12 @@ class LeaderEpochFileCacheTest { val checkpoint = new LeaderEpochCheckpointFile(new File(checkpointPath), new LogDirFailureChannel(1)) //Given - val cache = new LeaderEpochFileCache(tp, checkpoint, new MockTime().scheduler) + val cache = new LeaderEpochFileCache(tp, checkpoint) cache.assign(2, 6) //When val checkpoint2 = new LeaderEpochCheckpointFile(new File(checkpointPath), new LogDirFailureChannel(1)) - val cache2 = new LeaderEpochFileCache(tp, checkpoint2, new MockTime().scheduler) + val cache2 = new LeaderEpochFileCache(tp, checkpoint2) //Then assertEquals(1, cache2.epochEntries.size) @@ -384,7 +387,7 @@ class LeaderEpochFileCacheTest { cache.assign(4, 11) //When clear latest on epoch boundary - cache.truncateFromEndAsyncFlush(8) + cache.truncateFromEnd(8) //Then should remove two latest epochs (remove is inclusive) assertEquals(java.util.Arrays.asList(new EpochEntry(2, 6)), cache.epochEntries) @@ -398,7 +401,7 @@ class LeaderEpochFileCacheTest { cache.assign(4, 11) //When reset to offset ON epoch boundary - cache.truncateFromStartAsyncFlush(8) + cache.truncateFromStart(8) //Then should preserve (3, 8) assertEquals(java.util.Arrays.asList(new EpochEntry(3, 8), new EpochEntry(4, 11)), cache.epochEntries) @@ -412,7 +415,7 @@ class LeaderEpochFileCacheTest { cache.assign(4, 11) //When reset to offset BETWEEN epoch boundaries - cache.truncateFromStartAsyncFlush(9) + cache.truncateFromStart(9) //Then we should retain epoch 3, but update it's offset to 9 as 8 has been removed assertEquals(java.util.Arrays.asList(new EpochEntry(3, 9), new EpochEntry(4, 11)), cache.epochEntries) @@ -426,7 +429,7 @@ class LeaderEpochFileCacheTest { cache.assign(4, 11) //When reset to offset before first epoch offset - cache.truncateFromStartAsyncFlush(1) + cache.truncateFromStart(1) //Then nothing should change assertEquals(java.util.Arrays.asList(new EpochEntry(2, 6),new EpochEntry(3, 8), new EpochEntry(4, 11)), cache.epochEntries) @@ -440,7 +443,7 @@ class LeaderEpochFileCacheTest { cache.assign(4, 11) //When reset to offset on earliest epoch boundary - cache.truncateFromStartAsyncFlush(6) + cache.truncateFromStart(6) //Then nothing should change assertEquals(java.util.Arrays.asList(new EpochEntry(2, 6),new EpochEntry(3, 8), new EpochEntry(4, 11)), cache.epochEntries) @@ -454,7 +457,7 @@ class LeaderEpochFileCacheTest { cache.assign(4, 11) //When - cache.truncateFromStartAsyncFlush(11) + cache.truncateFromStart(11) //Then retain the last assertEquals(Collections.singletonList(new EpochEntry(4, 11)), cache.epochEntries) @@ -468,7 +471,7 @@ class LeaderEpochFileCacheTest { cache.assign(4, 11) //When we clear from a position between offset 8 & offset 11 - cache.truncateFromStartAsyncFlush(9) + cache.truncateFromStart(9) //Then we should update the middle epoch entry's offset assertEquals(java.util.Arrays.asList(new EpochEntry(3, 9), new EpochEntry(4, 11)), cache.epochEntries) @@ -482,7 +485,7 @@ class LeaderEpochFileCacheTest { cache.assign(2, 10) //When we clear from a position between offset 0 & offset 7 - cache.truncateFromStartAsyncFlush(5) + cache.truncateFromStart(5) //Then we should keep epoch 0 but update the offset appropriately assertEquals(java.util.Arrays.asList(new EpochEntry(0,5), new EpochEntry(1, 7), new EpochEntry(2, 10)), @@ -497,7 +500,7 @@ class LeaderEpochFileCacheTest { cache.assign(4, 11) //When reset to offset beyond last epoch - cache.truncateFromStartAsyncFlush(15) + cache.truncateFromStart(15) //Then update the last assertEquals(Collections.singletonList(new EpochEntry(4, 15)), cache.epochEntries) @@ -511,7 +514,7 @@ class LeaderEpochFileCacheTest { cache.assign(4, 11) //When reset to offset BETWEEN epoch boundaries - cache.truncateFromEndAsyncFlush( 9) + cache.truncateFromEnd( 9) //Then should keep the preceding epochs assertEquals(OptionalInt.of(3), cache.latestEpoch) @@ -540,7 +543,7 @@ class LeaderEpochFileCacheTest { cache.assign(4, 11) //When reset to offset on epoch boundary - cache.truncateFromStartAsyncFlush(UNDEFINED_EPOCH_OFFSET) + cache.truncateFromStart(UNDEFINED_EPOCH_OFFSET) //Then should do nothing assertEquals(3, cache.epochEntries.size) @@ -554,7 +557,7 @@ class LeaderEpochFileCacheTest { cache.assign(4, 11) //When reset to offset on epoch boundary - cache.truncateFromEndAsyncFlush(UNDEFINED_EPOCH_OFFSET) + cache.truncateFromEnd(UNDEFINED_EPOCH_OFFSET) //Then should do nothing assertEquals(3, cache.epochEntries.size) @@ -575,13 +578,13 @@ class LeaderEpochFileCacheTest { @Test def shouldClearEarliestOnEmptyCache(): Unit = { //Then - cache.truncateFromStartAsyncFlush(7) + cache.truncateFromStart(7) } @Test def shouldClearLatestOnEmptyCache(): Unit = { //Then - cache.truncateFromEndAsyncFlush(7) + cache.truncateFromEnd(7) } @Test @@ -597,7 +600,7 @@ class LeaderEpochFileCacheTest { cache.assign(10, 20) assertEquals(OptionalInt.of(4), cache.previousEpoch(10)) - cache.truncateFromEndAsyncFlush(18) + cache.truncateFromEnd(18) assertEquals(OptionalInt.of(2), cache.previousEpoch(cache.latestEpoch.getAsInt)) } @@ -614,7 +617,7 @@ class LeaderEpochFileCacheTest { cache.assign(10, 20) assertEquals(Optional.of(new EpochEntry(4, 15)), cache.previousEntry(10)) - cache.truncateFromEndAsyncFlush(18) + cache.truncateFromEnd(18) assertEquals(Optional.of(new EpochEntry(2, 10)), cache.previousEntry(cache.latestEpoch.getAsInt)) } @@ -655,15 +658,4 @@ class LeaderEpochFileCacheTest { assertEquals(OptionalInt.empty(), cache.epochForOffset(5)) } - @Test - def shouldWriteCheckpointOnTruncation(): Unit = { - cache.assign(2, 6) - cache.assign(3, 8) - cache.assign(4, 11) - - cache.truncateFromEndAsyncFlush(11) - cache.truncateFromStartAsyncFlush(8) - - assertEquals(List(new EpochEntry(3, 8)).asJava, checkpoint.read()) - } } diff --git a/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala b/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala index 1e91cbb60b7a9..c2926c3b67db9 100644 --- a/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala +++ b/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala @@ -23,7 +23,7 @@ import java.util.Collections.{singleton, singletonList, singletonMap} import java.util.Properties import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} import kafka.log.LogManager -import kafka.server.{BrokerServer, KafkaConfig, ReplicaManager} +import kafka.server.{BrokerLifecycleManager, BrokerServer, KafkaConfig, ReplicaManager} import kafka.testkit.{KafkaClusterTestKit, TestKitNodes} import kafka.utils.TestUtils import org.apache.kafka.clients.admin.AlterConfigOp.OpType.SET @@ -201,7 +201,8 @@ class BrokerMetadataPublisherTest { mock(classOf[DelegationTokenPublisher]), mock(classOf[AclPublisher]), faultHandler, - faultHandler + faultHandler, + mock(classOf[BrokerLifecycleManager]), ) val image = MetadataImage.EMPTY diff --git a/core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala b/core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala index c625dc6e968d1..b8764f5fae3d0 100644 --- a/core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala +++ b/core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala @@ -22,8 +22,8 @@ import java.nio.ByteBuffer import java.util import java.util.Collections import java.util.Optional +import java.util.Arrays import java.util.Properties -import java.util.stream.IntStream import kafka.log.{LogTestUtils, UnifiedLog} import kafka.raft.{KafkaMetadataLog, MetadataLogConfig} import kafka.server.{BrokerTopicStats, KafkaRaftServer} @@ -338,7 +338,7 @@ class DumpLogSegmentsTest { .setLastContainedLogTimestamp(lastContainedLogTimestamp) .setRawSnapshotWriter(metadataLog.createNewSnapshot(new OffsetAndEpoch(0, 0)).get) .setKraftVersion(1) - .setVoterSet(Optional.of(VoterSetTest.voterSet(VoterSetTest.voterMap(IntStream.of(1, 2, 3), true)))) + .setVoterSet(Optional.of(VoterSetTest.voterSet(VoterSetTest.voterMap(Arrays.asList(1, 2, 3), true)))) .build(MetadataRecordSerde.INSTANCE) ) { snapshotWriter => snapshotWriter.append(metadataRecords.asJava) diff --git a/core/src/test/scala/unit/kafka/tools/StorageToolTest.scala b/core/src/test/scala/unit/kafka/tools/StorageToolTest.scala index 8365fbd10ebe1..54a436f231fc4 100644 --- a/core/src/test/scala/unit/kafka/tools/StorageToolTest.scala +++ b/core/src/test/scala/unit/kafka/tools/StorageToolTest.scala @@ -33,12 +33,11 @@ import org.apache.kafka.server.common.{ApiMessageAndVersion, Features, MetadataV import org.apache.kafka.common.metadata.{FeatureLevelRecord, UserScramCredentialRecord} import org.apache.kafka.metadata.properties.{MetaProperties, MetaPropertiesEnsemble, MetaPropertiesVersion, PropertiesUtils} import org.apache.kafka.raft.QuorumConfig -import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerConfigs, ServerLogConfigs} +import org.apache.kafka.server.config.{KRaftConfigs, ServerConfigs, ServerLogConfigs} import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertThrows, assertTrue} import org.junit.jupiter.api.{Test, Timeout} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{EnumSource, ValueSource} -import org.mockito.Mockito import scala.collection.mutable import scala.collection.mutable.ArrayBuffer @@ -183,7 +182,7 @@ Found problem: val bootstrapMetadata = StorageTool.buildBootstrapMetadata(MetadataVersion.latestTesting(), None, "test format command") assertEquals(0, StorageTool. formatCommand(new PrintStream(stream), Seq(tempDir.toString), metaProperties, bootstrapMetadata, MetadataVersion.latestTesting(), ignoreFormatted = false)) - assertTrue(stringAfterFirstLine(stream.toString()).startsWith("Formatting %s".format(tempDir))) + assertTrue(stream.toString().startsWith("Formatting %s".format(tempDir))) try assertEquals(1, StorageTool. formatCommand(new PrintStream(new ByteArrayOutputStream()), Seq(tempDir.toString), metaProperties, bootstrapMetadata, MetadataVersion.latestTesting(), ignoreFormatted = false)) catch { @@ -195,15 +194,10 @@ Found problem: val stream2 = new ByteArrayOutputStream() assertEquals(0, StorageTool. formatCommand(new PrintStream(stream2), Seq(tempDir.toString), metaProperties, bootstrapMetadata, MetadataVersion.latestTesting(), ignoreFormatted = true)) - assertEquals("All of the log directories are already formatted.%n".format(), stringAfterFirstLine(stream2.toString())) + assertEquals("All of the log directories are already formatted.%n".format(), stream2.toString()) } finally Utils.delete(tempDir) } - def stringAfterFirstLine(input: String): String = { - val firstNewline = input.indexOf("\n") - input.substring(firstNewline + 1) - } - private def runFormatCommand(stream: ByteArrayOutputStream, directories: Seq[String], ignoreFormatted: Boolean = false): Int = { val metaProperties = new MetaProperties.Builder(). setVersion(MetaPropertiesVersion.V1). @@ -221,7 +215,7 @@ Found problem: assertEquals(0, runFormatCommand(stream, availableDirs)) val actual = stream.toString().split("\\r?\\n") val expect = availableDirs.map("Formatting %s".format(_)) - assertEquals(availableDirs.size + 1, actual.size) + assertEquals(availableDirs.size, actual.size) expect.foreach(dir => { assertEquals(1, actual.count(_.startsWith(dir))) }) @@ -662,37 +656,5 @@ Found problem: assertEquals(1, exitStatus) } } - - @Test - def testFormatValidatesConfigForMetadataVersion(): Unit = { - val config = Mockito.spy(new KafkaConfig(TestUtils.createBrokerConfig(10, null))) - val args = Array("format", - "-c", "dummy.properties", - "-t", "XcZZOzUqS4yHOjhMQB6JLQ", - "--release-version", MetadataVersion.LATEST_PRODUCTION.toString) - val exitCode = StorageTool.runFormatCommand(StorageTool.parseArguments(args), config) - Mockito.verify(config, Mockito.times(1)).validateWithMetadataVersion(MetadataVersion.LATEST_PRODUCTION) - assertEquals(0, exitCode) - } - - @Test - def testJbodSupportValidation(): Unit = { - def formatWith(logDirCount: Int, metadataVersion: MetadataVersion): Integer = { - val properties = TestUtils.createBrokerConfig(10, null, logDirCount = logDirCount) - properties.remove(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG) - val configFile = TestUtils.tempPropertiesFile(properties.asScala.toMap).toPath.toString - StorageTool.execute(Array("format", - "-c", configFile, - "-t", "XcZZOzUqS4yHOjhMQB6JLQ", - "--release-version", metadataVersion.toString)) - } - - assertEquals(0, formatWith(1, MetadataVersion.IBP_3_6_IV2)) - assertEquals("Invalid configuration for metadata version: " + - "requirement failed: Multiple log directories (aka JBOD) are not supported in the current MetadataVersion 3.6-IV2. Need 3.7-IV2 or higher", - assertThrows(classOf[TerseFailure], () => formatWith(2, MetadataVersion.IBP_3_6_IV2)).getMessage) - assertEquals(0, formatWith(1, MetadataVersion.IBP_3_7_IV2)) - assertEquals(0, formatWith(2, MetadataVersion.IBP_3_7_IV2)) - } } diff --git a/core/src/test/scala/unit/kafka/utils/SchedulerTest.scala b/core/src/test/scala/unit/kafka/utils/SchedulerTest.scala index 6280318af5d60..d25fdb1b4e9b1 100644 --- a/core/src/test/scala/unit/kafka/utils/SchedulerTest.scala +++ b/core/src/test/scala/unit/kafka/utils/SchedulerTest.scala @@ -139,8 +139,7 @@ class SchedulerTest { val topicPartition = UnifiedLog.parseTopicPartitionName(logDir) val logDirFailureChannel = new LogDirFailureChannel(10) val segments = new LogSegments(topicPartition) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, logConfig.recordVersion, "", None, mockTime.scheduler) + val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache(logDir, topicPartition, logDirFailureChannel, logConfig.recordVersion, "") val producerStateManager = new ProducerStateManager(topicPartition, logDir, maxTransactionTimeoutMs, new ProducerStateManagerConfig(maxProducerIdExpirationMs, false), mockTime) val offsets = new LogLoader( diff --git a/core/src/test/scala/unit/kafka/utils/TestUtils.scala b/core/src/test/scala/unit/kafka/utils/TestUtils.scala index 5eb3187ff309f..31f671e3a0f4e 100755 --- a/core/src/test/scala/unit/kafka/utils/TestUtils.scala +++ b/core/src/test/scala/unit/kafka/utils/TestUtils.scala @@ -903,10 +903,9 @@ object TestUtils extends Logging { def pollRecordsUntilTrue[K, V](consumer: Consumer[K, V], action: ConsumerRecords[K, V] => Boolean, msg: => String, - waitTimeMs: Long = JTestUtils.DEFAULT_MAX_WAIT_MS, - pollTimeoutMs: Long = 100): Unit = { + waitTimeMs: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Unit = { waitUntilTrue(() => { - val records = consumer.poll(Duration.ofMillis(pollTimeoutMs)) + val records = consumer.poll(Duration.ofMillis(100)) action(records) }, msg = msg, pause = 0L, waitTimeMs = waitTimeMs) } @@ -1099,31 +1098,13 @@ object TestUtils extends Logging { def awaitLeaderChange[B <: KafkaBroker]( brokers: Seq[B], tp: TopicPartition, - oldLeaderOpt: Option[Int] = None, - expectedLeaderOpt: Option[Int] = None, + oldLeader: Int, timeout: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Int = { def newLeaderExists: Option[Int] = { - if (expectedLeaderOpt.isDefined) { - debug(s"Checking leader that has changed to ${expectedLeaderOpt.get}") - brokers.find { broker => - broker.config.brokerId == expectedLeaderOpt.get && - broker.replicaManager.onlinePartition(tp).exists(_.leaderLogIfLocal.isDefined) - }.map(_.config.brokerId) - - } else if (oldLeaderOpt.isDefined) { - debug(s"Checking leader that has changed from ${oldLeaderOpt}") - brokers.find { broker => - broker.replicaManager.onlinePartition(tp).exists(_.leaderLogIfLocal.isDefined) - broker.config.brokerId != oldLeaderOpt.get && - broker.replicaManager.onlinePartition(tp).exists(_.leaderLogIfLocal.isDefined) - }.map(_.config.brokerId) - - } else { - debug(s"Checking the elected leader") - brokers.find { broker => - broker.replicaManager.onlinePartition(tp).exists(_.leaderLogIfLocal.isDefined) - }.map(_.config.brokerId) - } + brokers.find { broker => + broker.config.brokerId != oldLeader && + broker.replicaManager.onlinePartition(tp).exists(_.leaderLogIfLocal.isDefined) + }.map(_.config.brokerId) } waitUntilTrue(() => newLeaderExists.isDefined, diff --git a/docker/README.md b/docker/README.md index a23c754c3b736..54d15c04feb17 100644 --- a/docker/README.md +++ b/docker/README.md @@ -4,7 +4,7 @@ Docker Images Introduction ------------ -This directory contains scripts to build, test, push and promote docker image for kafka. It also contains scripts to build, test the JVM-based Docker Official Image, and generate a PR template for the same to be raised under the Docker official images repo. +This directory contains scripts to build, test, push and promote docker image for kafka. All of the steps can be either performed locally or by using Github Actions. Github Actions @@ -135,42 +135,3 @@ Using the image in a docker container ------------------------------------- Please check [this](./examples/README.md) for usage guide of the docker image. - -Releasing the Docker Official Image ------------------------------------ - -- This is the recommended way to release docker official image. -- Ensure these steps are being run for a particular version, only once the AK release process for that version has been completed. - -- Provide the image type and kafka version to `Docker Prepare Docker Official Image Source` workflow. It will generate a artifact containing the static Dockerfile and assets for that specific version. Download the same from the workflow. - -``` -image_type: jvm -kafka_version: 3.7.0 -``` - -- Run the `docker/extract_docker_official_image_artifact.py` script, by providing it the path to the downloaded artifact. This will create a new directory under `docker/docker_official_images/kafka_version`. - -``` -python extract_docker_official_image_artifact.py --path_to_downloaded_artifact=path/to/downloaded/artifact -``` - -- If there any versions for which Docker Official Images should not be supported, remove the corresponding directories under `docker/docker_official_images`. -- Commit these changes to AK trunk. - -- Provide the image type and kafka version to `Docker Official Image Build Test` workflow. It will generate a test report and CVE report that can be shared with the community. - -``` -image_type: jvm -kafka_version: 3.7.0 -``` - -- Run the `docker/generate_kafka_pr_template.py` script from trunk, by providing it the image type. Update the existing entry, and raise a new PR in [Docker Hub's Docker Official Repo](https://github.com/docker-library/official-images/tree/master/library/kafka) by using this new PR template. - -``` -python generate_kafka_pr_template.py --image-type=jvm -``` - -- kafka-version - This is the version to create the Docker official images static Dockerfile and assets for, as well as the version to build and test the Docker official image for. -- image-type - This is the type of image that we intend to build. This will be dropdown menu type selection in the workflow. `jvm` image type is for official docker image (to be hosted on apache/kafka) as described in [KIP-975](https://cwiki.apache.org/confluence/display/KAFKA/KIP-975%3A+Docker+Image+for+Apache+Kafka). As of now [KIP-1028](https://cwiki.apache.org/confluence/display/KAFKA/KIP-1028%3A+Docker+Official+Image+for+Apache+Kafka) only aims to release JVM based Docker Official Images. - diff --git a/docs/ops.html b/docs/ops.html index 5df69b09f042f..1210b26739f50 100644 --- a/docs/ops.html +++ b/docs/ops.html @@ -1398,7 +1398,6 @@

  • commit=num_secs: This tunes the frequency with which ext4 commits to its metadata journal. Setting this to a lower value reduces the loss of unflushed data during a crash. Setting this to a higher value will improve throughput.
  • nobh: This setting controls additional ordering guarantees when using data=writeback mode. This should be safe with Kafka as we do not depend on write ordering and improves throughput and latency.
  • delalloc: Delayed allocation means that the filesystem avoid allocating any blocks until the physical write occurs. This allows ext4 to allocate a large extent instead of smaller pages and helps ensure the data is written sequentially. This feature is great for throughput. It does seem to involve some locking in the filesystem which adds a bit of latency variance. -
  • fast_commit: Added in Linux 5.10, fast_commit is a lighter-weight journaling method which can be used with data=ordered journaling mode. Enabling it seems to significantly reduce latency.

    Replace KRaft Controller Disk

    @@ -3249,29 +3248,19 @@
    num.standby.replicastopology.optimization Medium - A configuration telling Kafka Streams if it should optimize the topology and what optimizations to apply. Acceptable values are: StreamsConfig.NO_OPTIMIZATION (none), StreamsConfig.OPTIMIZE (all) or a comma separated list of specific optimizations: StreamsConfig.REUSE_KTABLE_SOURCE_TOPICS (reuse.ktable.source.topics), StreamsConfig.MERGE_REPARTITION_TOPICS (merge.repartition.topics), - StreamsConfig.SINGLE_STORE_SELF_JOIN (single.store.self.join). + A configuration telling Kafka Streams if it should optimize the topology and what optimizations to apply. Acceptable values are: StreamsConfig.NO_OPTIMIZATION (none), StreamsConfig.OPTIMIZE (all) or a comma separated list of specific optimizations: (StreamsConfig.REUSE_KTABLE_SOURCE_TOPICS (reuse.ktable.source.topics), StreamsConfig.MERGE_REPARTITION_TOPICS (merge.repartition.topics)). NO_OPTIMIZATION upgrade.from @@ -1022,7 +1021,7 @@

    topology.optimizationKTable-KTable Foreign-Key