From f82d15cf7112a5051a018b217f227272d3baaab4 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Fri, 24 May 2024 12:08:32 -0500 Subject: [PATCH 01/61] Migrated ConsumerTestBuilder Removed dependencies upon ConsumerTestBuilder --- .../internals/CommitRequestManager.java | 14 +- .../internals/ConsumerNetworkThread.java | 4 + .../ConsumerNetworkThreadUnitTest.java | 407 ++++++++++++++++++ 3 files changed, 417 insertions(+), 8 deletions(-) create mode 100644 clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java index 577cf7dee6b76..93de22975fccc 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java @@ -1090,14 +1090,12 @@ private void chainFuture( } @Override - public String toString() { - return "OffsetFetchRequestState{" + - "requestedPartitions=" + requestedPartitions + + public String toStringBase() { + return super.toStringBase() + ", memberId=" + memberInfo.memberId.orElse("undefined") + - ", memberEpoch=" + (memberInfo.memberEpoch.isPresent() ? memberInfo.memberEpoch.get() : "undefined") + - ", future=" + future + - ", " + toStringBase() + - '}'; + ", memberEpoch=" + (memberInfo.memberEpoch.isPresent() ? memberInfo.memberEpoch.get() : "undefined") + + ", requestedPartitions=" + requestedPartitions + + ", future=" + future; } } @@ -1290,4 +1288,4 @@ static class MemberInfo { this.memberEpoch = Optional.empty(); } } -} +} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java index aa352cd68a22e..f3ed2b83451c1 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java @@ -96,6 +96,7 @@ public void run() { } } + // Thunk - lazy sequence () -> 1 void initializeResources() { applicationEventProcessor = applicationEventProcessorSupplier.get(); networkClientDelegate = networkClientDelegateSupplier.get(); @@ -130,7 +131,9 @@ void runOnce() { // errors to be propagated to the caller. applicationEventProcessor.process(); + // here we get the system time and pass it to the request manager because we don't want to invoke system time all the time final long currentTimeMs = time.milliseconds(); + // TODO: Make sure pollWaitTimeMs is computed correctly. Try to examine different scenarios final long pollWaitTimeMs = requestManagers.entries().stream() .filter(Optional::isPresent) .map(Optional::get) @@ -139,6 +142,7 @@ void runOnce() { .reduce(MAX_POLL_TIMEOUT_MS, Math::min); networkClientDelegate.poll(pollWaitTimeMs, currentTimeMs); + // TODO: Check computation of cachedMaximumTimeToWait cachedMaximumTimeToWait = requestManagers.entries().stream() .filter(Optional::isPresent) .map(Optional::get) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java new file mode 100644 index 0000000000000..c3197b07a8a15 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -0,0 +1,407 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer.internals; + +import org.apache.kafka.clients.*; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.consumer.internals.events.*; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.message.FindCoordinatorRequestData; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.requests.*; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.MockTime; +import org.apache.kafka.common.utils.Time; +import org.apache.kafka.common.utils.Timer; +import org.apache.kafka.test.TestCondition; +import org.apache.kafka.test.TestUtils; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.time.Duration; +import java.util.*; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.LinkedBlockingQueue; + +import static org.apache.kafka.clients.consumer.internals.ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS; +import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.*; +import static org.mockito.Mockito.mock; + +public class ConsumerNetworkThreadUnitTest { + + private final Time time; + private final ConsumerMetadata metadata; + private final ConsumerConfig config; + private final SubscriptionState subscriptions; + private final BlockingQueue applicationEventsQueue; + private final ApplicationEventProcessor applicationEventProcessor; + private final OffsetsRequestManager offsetsRequestManager; + private final CommitRequestManager commitRequestManager; + private final HeartbeatRequestManager heartbeatRequestManager; + private final CoordinatorRequestManager coordinatorRequestManager; + private final ConsumerNetworkThread consumerNetworkThread; + private final MockClient client; + private final NetworkClientDelegate networkClientDelegate; + private final RequestManagers requestManagers; + + static final int DEFAULT_REQUEST_TIMEOUT_MS = 500; + static final int DEFAULT_HEARTBEAT_INTERVAL_MS = 1000; + + ConsumerNetworkThreadUnitTest() { + LogContext logContext = new LogContext(); + this.time = new MockTime(); + this.client = new MockClient(time); + + // usually we don't mock 1. time - MockTime 2. logContext and 3. networkClient - MockClient + this.networkClientDelegate = mock(NetworkClientDelegate.class); + this.requestManagers = mock(RequestManagers.class); + this.offsetsRequestManager = mock(OffsetsRequestManager.class); + this.commitRequestManager = mock(CommitRequestManager.class); + this.heartbeatRequestManager = mock(HeartbeatRequestManager.class); + this.coordinatorRequestManager = mock(CoordinatorRequestManager.class); + this.applicationEventsQueue = mock(LinkedBlockingQueue.class); + this.config = mock(ConsumerConfig.class); + this.subscriptions = mock(SubscriptionState.class); + this.metadata = mock(ConsumerMetadata.class); + this.applicationEventProcessor = mock(ApplicationEventProcessor.class); + + this.consumerNetworkThread = new ConsumerNetworkThread( + logContext, + time, + () -> applicationEventProcessor, + () -> networkClientDelegate, + () -> requestManagers + ); + } + + @BeforeEach + public void setup() { + consumerNetworkThread.initializeResources(); + } + + @AfterEach + public void tearDown() { + if (consumerNetworkThread != null) + consumerNetworkThread.close(); + } + + @Test + public void testStartupAndTearDown() throws InterruptedException { + consumerNetworkThread.start(); + TestCondition isStarted = () -> consumerNetworkThread.isRunning(); + TestCondition isClosed = () -> !(consumerNetworkThread.isRunning() || consumerNetworkThread.isAlive()); + + // There's a nonzero amount of time between starting the thread and having it + // begin to execute our code. Wait for a bit before checking... + TestUtils.waitForCondition(isStarted, + "The consumer network thread did not start within " + DEFAULT_MAX_WAIT_MS + " ms"); + + consumerNetworkThread.close(Duration.ofMillis(DEFAULT_MAX_WAIT_MS)); + + TestUtils.waitForCondition(isClosed, + "The consumer network thread did not stop within " + DEFAULT_MAX_WAIT_MS + " ms"); + } + + // can you rename this test? testEnsureApplicationEventProcessorProcesses.... + @Test + public void testApplicationEvent() { + //ApplicationEvent e = new PollEvent(100); + //applicationEventsQueue.add(e); + consumerNetworkThread.runOnce(); + verify(applicationEventProcessor, times(1)).process(); + } + + // TODO: Remove test, place elsewhere + // this test is testing ApplicationEventProcessor and ConsumerMetaData + @Test + public void testMetadataUpdateEvent() { + ApplicationEvent e = new NewTopicsMetadataUpdateRequestEvent(); + applicationEventsQueue.add(e); + consumerNetworkThread.runOnce(); + verify(metadata).requestUpdateForNewTopics(); + } + + // TODO: Remove test, place elsewhere + // This test is testing behavior of the application event processor + @Test + public void testAsyncCommitEvent() { + ApplicationEvent e = new AsyncCommitEvent(new HashMap<>()); + applicationEventsQueue.add(e); + consumerNetworkThread.runOnce(); + //verify(applicationEventProcessor).process(any(AsyncCommitEvent.class)); + verify(applicationEventProcessor).process(); + } + + // TODO: Remove test, place elsewhere + // This test is testing behavior of the application event processor + @Test + public void testSyncCommitEvent() { + Timer timer = time.timer(100); + ApplicationEvent e = new SyncCommitEvent(new HashMap<>(), timer); + applicationEventsQueue.add(e); + consumerNetworkThread.runOnce(); + //verify(applicationEventProcessor).process(any(SyncCommitEvent.class)); + verify(applicationEventProcessor).process(); + } + + // TODO: Remove/move this. + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testListOffsetsEventIsProcessed(boolean requireTimestamp) { + Map timestamps = Collections.singletonMap(new TopicPartition("topic1", 1), 5L); + Timer timer = time.timer(100); + ApplicationEvent e = new ListOffsetsEvent(timestamps, timer, requireTimestamp); + applicationEventsQueue.add(e); + consumerNetworkThread.runOnce(); + //verify(applicationEventProcessor).process(any(ListOffsetsEvent.class)); + verify(applicationEventProcessor).process(); + //assertTrue(applicationEventsQueue.isEmpty()); + } + + // TODO: Remove test, place elsewhere + // Redundant + @Test + public void testResetPositionsEventIsProcessed() { + Timer timer = time.timer(100); + ResetPositionsEvent e = new ResetPositionsEvent(timer); + applicationEventsQueue.add(e); + consumerNetworkThread.runOnce(); + // in the runOnce it is doing processor.process() so we should not be expecting any object being passed into this func. So clean up the rest of the test. + verify(applicationEventProcessor).process(); + //assertTrue(applicationEventsQueue.isEmpty()); + //when(coordinatorRequestManager.poll(time.milliseconds())).thenReturn(new NetworkClientDelegate.PollResult(100)); + //when(networkClientDelegate.addAll(new NetworkClientDelegate.PollResult(100))).thenReturn(100L); + } + + // TODO: This needs to be tested but should be removed from here. + @Test + public void testResetPositionsProcessFailureIsIgnored() { + doThrow(new NullPointerException()).when(offsetsRequestManager).resetPositionsIfNeeded(); + + Timer timer = time.timer(100); + ResetPositionsEvent event = new ResetPositionsEvent(timer); + applicationEventsQueue.add(event); + assertDoesNotThrow(() -> consumerNetworkThread.runOnce()); + + //verify(applicationEventProcessor).process(any(ResetPositionsEvent.class)); + verify(applicationEventProcessor).process(); + } + + // TODO: Remove test, place elsewhere + // Redundant + @Test + public void testValidatePositionsEventIsProcessed() { + Timer timer = time.timer(100); + ValidatePositionsEvent e = new ValidatePositionsEvent(timer); + applicationEventsQueue.add(e); + consumerNetworkThread.runOnce(); + //verify(applicationEventProcessor).process(any(ValidatePositionsEvent.class)); + verify(applicationEventProcessor).process(); + //assertTrue(applicationEventsQueue.isEmpty()); + } + + // TODO: Remove test, place elsewhere + // Move this test to applicationEventProcessor if not already there + // Seems more like integration testing, also redundant since we just need consumerNetworkThread + // only needs invoke runOnce() to then invoke applicationEventProcessor.process() + @Test + public void testAssignmentChangeEvent() { + HashMap offset = mockTopicPartitionOffset(); + + final long currentTimeMs = time.milliseconds(); + ApplicationEvent e = new AssignmentChangeEvent(offset, currentTimeMs); + applicationEventsQueue.add(e); + + consumerNetworkThread.runOnce(); + verify(applicationEventProcessor).process(); + verify(networkClientDelegate, times(1)).poll(anyLong(), anyLong()); + verify(commitRequestManager, times(1)).updateAutoCommitTimer(currentTimeMs); + // Assignment change should generate an async commit (not retried). + verify(commitRequestManager, times(1)).maybeAutoCommitAsync(); + } + + // TODO: Remove/move this. + @Test + void testFetchTopicMetadata() { + Timer timer = time.timer(Long.MAX_VALUE); + applicationEventsQueue.add(new TopicMetadataEvent("topic", timer)); + consumerNetworkThread.runOnce(); + //verify(applicationEventProcessor).process(any(TopicMetadataEvent.class)); + verify(applicationEventProcessor).process(); + } + + // TODO: Remove test, place elsewhere + // seems like this belongs to the ncd test. check if there's a similar test there + // I think this should be moved to NCD test, we are not testing ConsumerNetworkThread + @Test + void testPollResultTimer() { + NetworkClientDelegate.UnsentRequest req = new NetworkClientDelegate.UnsentRequest( + new FindCoordinatorRequest.Builder( + new FindCoordinatorRequestData() + .setKeyType(FindCoordinatorRequest.CoordinatorType.TRANSACTION.id()) + .setKey("foobar")), + Optional.empty()); + req.setTimer(time, DEFAULT_REQUEST_TIMEOUT_MS); + + // purposely setting a non-MAX time to ensure it is returning Long.MAX_VALUE upon success + NetworkClientDelegate.PollResult success = new NetworkClientDelegate.PollResult( + 10, + Collections.singletonList(req)); + assertEquals(10, networkClientDelegate.addAll(success)); + + NetworkClientDelegate.PollResult failure = new NetworkClientDelegate.PollResult( + 10, + new ArrayList<>()); + assertEquals(10, networkClientDelegate.addAll(failure)); + } + + // TODO: Remove test, place elsewhere + // I think this test should be moved to HeartBeatRequestManager, explained below + @Test + void testMaximumTimeToWait() { + // Initial value before runOnce has been called + // Target: 5000, correct bc maximumTimeToWait is 5000 by default + // This first part is redundant, it will always be equal + assertEquals(MAX_POLL_TIMEOUT_MS, consumerNetworkThread.maximumTimeToWait()); + consumerNetworkThread.runOnce(); + // After runOnce has been called, it takes the default heartbeat interval from the heartbeat request manager + // Looking at the implementation, it seems that this is testing HeartBeatRequestManager behavior + // + // In ConsumerNetworkThread.runOnce(), cachedTimeToWait is being calculated, as implied above + // after running runOnce(), the consumerNetworkThread.maximumTimeToWait() should return default heartbeat interval. + // How this can be done is when rm.maximumTimeToWait() is called, the HBRM pollTimer is not expired, and + // pollTimer.remainingMs() returns 1000 exactly + assertEquals(DEFAULT_HEARTBEAT_INTERVAL_MS, consumerNetworkThread.maximumTimeToWait()); + } + + @Test + void testRequestManagersArePolledOnce() { + consumerNetworkThread.runOnce(); + requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm, times(1)).poll(anyLong()))); + requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm, times(1)).maximumTimeToWait(anyLong()))); + // We just need to test networkClientDelegate not networkClient + verify(networkClientDelegate, times(1)).poll(anyLong(), anyLong()); + } + + /** + * // Make sure the tests include testing poll with params of (pollWaitTime, currentTimeMs) + * 1. Add a test that have RM to return different poll times and ensure pollWaitTimeMs is computed correctly. i.e. takes the min of all + * 2. Test maxTimeToWait with different request manager returns + * 3. Remove the tests and create a commit for it so taht we can look back later. + */ + + // TODO: Remove test, place elsewhere + // This test can probably go because it is testing metadata update in the NetworkClient module + // May move somewhere else if it is not being tested elsewhere + @Test + void testEnsureMetadataUpdateOnPoll() { + MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(2, Collections.emptyMap()); + client.prepareMetadataUpdate(metadataResponse); + metadata.requestUpdate(false); + consumerNetworkThread.runOnce(); + verify(metadata, times(1)).updateWithCurrentRequestVersion(eq(metadataResponse), eq(false), anyLong()); + } + + // TODO: Remove test, place elsewhere + // Test should be moved, testing pretty much everything but the ConsumerNetworkThread + @Test + void testEnsureEventsAreCompleted() { + List list = new ArrayList<>(); + list.add(new Node(1, null, 1)); + when(metadata.fetch().nodes()).thenReturn(list); + Node node = metadata.fetch().nodes().get(0); + coordinatorRequestManager.markCoordinatorUnknown("test", time.milliseconds()); + client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "group-id", node)); + prepareOffsetCommitRequest(new HashMap<>(), Errors.NONE, false); + CompletableApplicationEvent event1 = spy(new AsyncCommitEvent(Collections.emptyMap())); + + ApplicationEvent event2 = new AsyncCommitEvent(Collections.emptyMap()); + CompletableFuture future = new CompletableFuture<>(); + when(event1.future()).thenReturn(future); + applicationEventsQueue.add(event1); + applicationEventsQueue.add(event2); + assertFalse(future.isDone()); + assertFalse(applicationEventsQueue.isEmpty()); + + consumerNetworkThread.cleanup(); + assertTrue(future.isCompletedExceptionally()); + assertTrue(applicationEventsQueue.isEmpty()); + } + + private void prepareOffsetCommitRequest(final Map expectedOffsets, + final Errors error, + final boolean disconnected) { + Map errors = partitionErrors(expectedOffsets.keySet(), error); + client.prepareResponse(offsetCommitRequestMatcher(expectedOffsets), offsetCommitResponse(errors), disconnected); + } + + private Map partitionErrors(final Collection partitions, + final Errors error) { + final Map errors = new HashMap<>(); + for (TopicPartition partition : partitions) { + errors.put(partition, error); + } + return errors; + } + + private OffsetCommitResponse offsetCommitResponse(final Map responseData) { + return new OffsetCommitResponse(responseData); + } + + private MockClient.RequestMatcher offsetCommitRequestMatcher(final Map expectedOffsets) { + return body -> { + OffsetCommitRequest req = (OffsetCommitRequest) body; + Map offsets = req.offsets(); + if (offsets.size() != expectedOffsets.size()) + return false; + + for (Map.Entry expectedOffset : expectedOffsets.entrySet()) { + if (!offsets.containsKey(expectedOffset.getKey())) { + return false; + } else { + Long actualOffset = offsets.get(expectedOffset.getKey()); + if (!actualOffset.equals(expectedOffset.getValue())) { + return false; + } + } + } + return true; + }; + } + + private HashMap mockTopicPartitionOffset() { + final TopicPartition t0 = new TopicPartition("t0", 2); + final TopicPartition t1 = new TopicPartition("t0", 3); + HashMap topicPartitionOffsets = new HashMap<>(); + topicPartitionOffsets.put(t0, new OffsetAndMetadata(10L)); + topicPartitionOffsets.put(t1, new OffsetAndMetadata(20L)); + return topicPartitionOffsets; + } +} From c7a479ea40191c06587ac30881ce225925732454 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Fri, 24 May 2024 12:57:55 -0500 Subject: [PATCH 02/61] Removed changes to OffsetFetchRequestState Removed change of toString to toStringBase, moving change to separate branch --- .../consumer/internals/CommitRequestManager.java | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java index 93de22975fccc..96b0270fd4a0b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java @@ -1090,12 +1090,14 @@ private void chainFuture( } @Override - public String toStringBase() { - return super.toStringBase() + + public String toString() { + return "OffsetFetchRequestState{" + + "requestedPartitions=" + requestedPartitions + ", memberId=" + memberInfo.memberId.orElse("undefined") + - ", memberEpoch=" + (memberInfo.memberEpoch.isPresent() ? memberInfo.memberEpoch.get() : "undefined") + - ", requestedPartitions=" + requestedPartitions + - ", future=" + future; + ", memberEpoch=" + (memberInfo.memberEpoch.isPresent() ? memberInfo.memberEpoch.get() : "undefined") + + ", future=" + future + + ", " + toStringBase() + + '}'; } } From 1fcf205dd5af270b90dc0f48990aedb69ff804ac Mon Sep 17 00:00:00 2001 From: brenden20 Date: Fri, 24 May 2024 16:54:23 -0500 Subject: [PATCH 03/61] Added test for RMs Added test for RMs, seeing if pollWaitTimeMs is computed correctly --- .../ConsumerNetworkThreadUnitTest.java | 28 +++++++++++++------ 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java index c3197b07a8a15..8982c5d9c7be0 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -112,6 +112,25 @@ public void tearDown() { consumerNetworkThread.close(); } + /** + * // Make sure the tests include testing poll with params of (pollWaitTime, currentTimeMs) + * 1. Add a test that have RM to return different poll times and ensure pollWaitTimeMs is computed correctly. i.e. takes the min of all + * 2. Test maxTimeToWait with different request manager returns + * 3. Remove the tests and create a commit for it so that we can look back later. + */ + + @Test + public void testRequestManagersPoll() { + consumerNetworkThread.runOnce(); + when(coordinatorRequestManager.poll(anyLong())).thenReturn(new NetworkClientDelegate.PollResult(1040)); + when(commitRequestManager.poll(anyLong())).thenReturn(new NetworkClientDelegate.PollResult(100)); + when(heartbeatRequestManager.poll(anyLong())).thenReturn(new NetworkClientDelegate.PollResult(1020)); + when(offsetsRequestManager.poll(anyLong())).thenReturn(new NetworkClientDelegate.PollResult(5100)); + when(requestManagers.entries()).thenReturn(..); + //when(networkClientDelegate.addAll(new NetworkClientDelegate.PollResult(100))).thenReturn(100L); + consumerNetworkThread.runOnce(); + } + @Test public void testStartupAndTearDown() throws InterruptedException { consumerNetworkThread.start(); @@ -131,7 +150,7 @@ public void testStartupAndTearDown() throws InterruptedException { // can you rename this test? testEnsureApplicationEventProcessorProcesses.... @Test - public void testApplicationEvent() { + public void testEnsureApplicationEventProcessorInvokesProcess() { //ApplicationEvent e = new PollEvent(100); //applicationEventsQueue.add(e); consumerNetworkThread.runOnce(); @@ -310,13 +329,6 @@ void testRequestManagersArePolledOnce() { verify(networkClientDelegate, times(1)).poll(anyLong(), anyLong()); } - /** - * // Make sure the tests include testing poll with params of (pollWaitTime, currentTimeMs) - * 1. Add a test that have RM to return different poll times and ensure pollWaitTimeMs is computed correctly. i.e. takes the min of all - * 2. Test maxTimeToWait with different request manager returns - * 3. Remove the tests and create a commit for it so taht we can look back later. - */ - // TODO: Remove test, place elsewhere // This test can probably go because it is testing metadata update in the NetworkClient module // May move somewhere else if it is not being tested elsewhere From 5271bb59c213c0e4a72272e3cce2e8966df097b3 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Tue, 28 May 2024 09:54:24 -0500 Subject: [PATCH 04/61] Cleaning up comments Cleaning up comments --- .../consumer/internals/ConsumerNetworkThreadUnitTest.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java index 8982c5d9c7be0..ff67dfc4f3dbd 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -148,7 +148,6 @@ public void testStartupAndTearDown() throws InterruptedException { "The consumer network thread did not stop within " + DEFAULT_MAX_WAIT_MS + " ms"); } - // can you rename this test? testEnsureApplicationEventProcessorProcesses.... @Test public void testEnsureApplicationEventProcessorInvokesProcess() { //ApplicationEvent e = new PollEvent(100); @@ -168,7 +167,7 @@ public void testMetadataUpdateEvent() { } // TODO: Remove test, place elsewhere - // This test is testing behavior of the application event processor + // This test is testing behavior of the application event processor, redundant @Test public void testAsyncCommitEvent() { ApplicationEvent e = new AsyncCommitEvent(new HashMap<>()); @@ -179,7 +178,7 @@ public void testAsyncCommitEvent() { } // TODO: Remove test, place elsewhere - // This test is testing behavior of the application event processor + // This test is testing behavior of the application event processor, redundant @Test public void testSyncCommitEvent() { Timer timer = time.timer(100); @@ -191,6 +190,7 @@ public void testSyncCommitEvent() { } // TODO: Remove/move this. + // This test is testing behavior of the application event processor, redundant @ParameterizedTest @ValueSource(booleans = {true, false}) public void testListOffsetsEventIsProcessed(boolean requireTimestamp) { From 3f037e5169b8dee91153fbbea3517f8e51ee98b3 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Tue, 28 May 2024 11:19:20 -0500 Subject: [PATCH 05/61] Added test for wait time computation Added a new param test to ensure ConsumerNetworkThread wait times are being computed correctly. --- .../ConsumerNetworkThreadUnitTest.java | 29 +++++++++++++------ 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java index ff67dfc4f3dbd..03ceffdb6aba7 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -119,16 +119,27 @@ public void tearDown() { * 3. Remove the tests and create a commit for it so that we can look back later. */ - @Test - public void testRequestManagersPoll() { - consumerNetworkThread.runOnce(); - when(coordinatorRequestManager.poll(anyLong())).thenReturn(new NetworkClientDelegate.PollResult(1040)); - when(commitRequestManager.poll(anyLong())).thenReturn(new NetworkClientDelegate.PollResult(100)); - when(heartbeatRequestManager.poll(anyLong())).thenReturn(new NetworkClientDelegate.PollResult(1020)); - when(offsetsRequestManager.poll(anyLong())).thenReturn(new NetworkClientDelegate.PollResult(5100)); - when(requestManagers.entries()).thenReturn(..); - //when(networkClientDelegate.addAll(new NetworkClientDelegate.PollResult(100))).thenReturn(100L); + @ParameterizedTest + @ValueSource(longs = {1, 100, 1000, 4999, 5001}) + public void testConsumerNetworkThreadWaitTimeComputations(long exampleTime) { + List> rmsList = new ArrayList<>(); + rmsList.add(Optional.of(coordinatorRequestManager)); +// rmsList.add(Optional.of(commitRequestManager)); +// rmsList.add(Optional.of(heartbeatRequestManager)); +// rmsList.add(Optional.of(offsetsRequestManager)); + when(requestManagers.entries()).thenReturn(rmsList); + + NetworkClientDelegate.PollResult pr = new NetworkClientDelegate.PollResult(exampleTime); + + when(coordinatorRequestManager.poll(anyLong())).thenReturn(pr); + when(coordinatorRequestManager.maximumTimeToWait(anyLong())).thenReturn(exampleTime); +// when(commitRequestManager.poll(anyLong())).thenReturn(new NetworkClientDelegate.PollResult(10000L)); +// when(heartbeatRequestManager.poll(anyLong())).thenReturn(new NetworkClientDelegate.PollResult(10000L)); +// when(offsetsRequestManager.poll(anyLong())).thenReturn(new NetworkClientDelegate.PollResult(10000L)); + when(networkClientDelegate.addAll(pr)).thenReturn(pr.timeUntilNextPollMs); consumerNetworkThread.runOnce(); + + assertEquals(consumerNetworkThread.maximumTimeToWait(), exampleTime); } @Test From 4a85031104cb886e73d9c664f82b17281c5f61c2 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Tue, 28 May 2024 11:22:01 -0500 Subject: [PATCH 06/61] Test removal Removed all redundant/integration tests --- .../ConsumerNetworkThreadUnitTest.java | 202 ------------------ 1 file changed, 202 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java index 03ceffdb6aba7..4a8af44679a84 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -167,170 +167,6 @@ public void testEnsureApplicationEventProcessorInvokesProcess() { verify(applicationEventProcessor, times(1)).process(); } - // TODO: Remove test, place elsewhere - // this test is testing ApplicationEventProcessor and ConsumerMetaData - @Test - public void testMetadataUpdateEvent() { - ApplicationEvent e = new NewTopicsMetadataUpdateRequestEvent(); - applicationEventsQueue.add(e); - consumerNetworkThread.runOnce(); - verify(metadata).requestUpdateForNewTopics(); - } - - // TODO: Remove test, place elsewhere - // This test is testing behavior of the application event processor, redundant - @Test - public void testAsyncCommitEvent() { - ApplicationEvent e = new AsyncCommitEvent(new HashMap<>()); - applicationEventsQueue.add(e); - consumerNetworkThread.runOnce(); - //verify(applicationEventProcessor).process(any(AsyncCommitEvent.class)); - verify(applicationEventProcessor).process(); - } - - // TODO: Remove test, place elsewhere - // This test is testing behavior of the application event processor, redundant - @Test - public void testSyncCommitEvent() { - Timer timer = time.timer(100); - ApplicationEvent e = new SyncCommitEvent(new HashMap<>(), timer); - applicationEventsQueue.add(e); - consumerNetworkThread.runOnce(); - //verify(applicationEventProcessor).process(any(SyncCommitEvent.class)); - verify(applicationEventProcessor).process(); - } - - // TODO: Remove/move this. - // This test is testing behavior of the application event processor, redundant - @ParameterizedTest - @ValueSource(booleans = {true, false}) - public void testListOffsetsEventIsProcessed(boolean requireTimestamp) { - Map timestamps = Collections.singletonMap(new TopicPartition("topic1", 1), 5L); - Timer timer = time.timer(100); - ApplicationEvent e = new ListOffsetsEvent(timestamps, timer, requireTimestamp); - applicationEventsQueue.add(e); - consumerNetworkThread.runOnce(); - //verify(applicationEventProcessor).process(any(ListOffsetsEvent.class)); - verify(applicationEventProcessor).process(); - //assertTrue(applicationEventsQueue.isEmpty()); - } - - // TODO: Remove test, place elsewhere - // Redundant - @Test - public void testResetPositionsEventIsProcessed() { - Timer timer = time.timer(100); - ResetPositionsEvent e = new ResetPositionsEvent(timer); - applicationEventsQueue.add(e); - consumerNetworkThread.runOnce(); - // in the runOnce it is doing processor.process() so we should not be expecting any object being passed into this func. So clean up the rest of the test. - verify(applicationEventProcessor).process(); - //assertTrue(applicationEventsQueue.isEmpty()); - //when(coordinatorRequestManager.poll(time.milliseconds())).thenReturn(new NetworkClientDelegate.PollResult(100)); - //when(networkClientDelegate.addAll(new NetworkClientDelegate.PollResult(100))).thenReturn(100L); - } - - // TODO: This needs to be tested but should be removed from here. - @Test - public void testResetPositionsProcessFailureIsIgnored() { - doThrow(new NullPointerException()).when(offsetsRequestManager).resetPositionsIfNeeded(); - - Timer timer = time.timer(100); - ResetPositionsEvent event = new ResetPositionsEvent(timer); - applicationEventsQueue.add(event); - assertDoesNotThrow(() -> consumerNetworkThread.runOnce()); - - //verify(applicationEventProcessor).process(any(ResetPositionsEvent.class)); - verify(applicationEventProcessor).process(); - } - - // TODO: Remove test, place elsewhere - // Redundant - @Test - public void testValidatePositionsEventIsProcessed() { - Timer timer = time.timer(100); - ValidatePositionsEvent e = new ValidatePositionsEvent(timer); - applicationEventsQueue.add(e); - consumerNetworkThread.runOnce(); - //verify(applicationEventProcessor).process(any(ValidatePositionsEvent.class)); - verify(applicationEventProcessor).process(); - //assertTrue(applicationEventsQueue.isEmpty()); - } - - // TODO: Remove test, place elsewhere - // Move this test to applicationEventProcessor if not already there - // Seems more like integration testing, also redundant since we just need consumerNetworkThread - // only needs invoke runOnce() to then invoke applicationEventProcessor.process() - @Test - public void testAssignmentChangeEvent() { - HashMap offset = mockTopicPartitionOffset(); - - final long currentTimeMs = time.milliseconds(); - ApplicationEvent e = new AssignmentChangeEvent(offset, currentTimeMs); - applicationEventsQueue.add(e); - - consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(); - verify(networkClientDelegate, times(1)).poll(anyLong(), anyLong()); - verify(commitRequestManager, times(1)).updateAutoCommitTimer(currentTimeMs); - // Assignment change should generate an async commit (not retried). - verify(commitRequestManager, times(1)).maybeAutoCommitAsync(); - } - - // TODO: Remove/move this. - @Test - void testFetchTopicMetadata() { - Timer timer = time.timer(Long.MAX_VALUE); - applicationEventsQueue.add(new TopicMetadataEvent("topic", timer)); - consumerNetworkThread.runOnce(); - //verify(applicationEventProcessor).process(any(TopicMetadataEvent.class)); - verify(applicationEventProcessor).process(); - } - - // TODO: Remove test, place elsewhere - // seems like this belongs to the ncd test. check if there's a similar test there - // I think this should be moved to NCD test, we are not testing ConsumerNetworkThread - @Test - void testPollResultTimer() { - NetworkClientDelegate.UnsentRequest req = new NetworkClientDelegate.UnsentRequest( - new FindCoordinatorRequest.Builder( - new FindCoordinatorRequestData() - .setKeyType(FindCoordinatorRequest.CoordinatorType.TRANSACTION.id()) - .setKey("foobar")), - Optional.empty()); - req.setTimer(time, DEFAULT_REQUEST_TIMEOUT_MS); - - // purposely setting a non-MAX time to ensure it is returning Long.MAX_VALUE upon success - NetworkClientDelegate.PollResult success = new NetworkClientDelegate.PollResult( - 10, - Collections.singletonList(req)); - assertEquals(10, networkClientDelegate.addAll(success)); - - NetworkClientDelegate.PollResult failure = new NetworkClientDelegate.PollResult( - 10, - new ArrayList<>()); - assertEquals(10, networkClientDelegate.addAll(failure)); - } - - // TODO: Remove test, place elsewhere - // I think this test should be moved to HeartBeatRequestManager, explained below - @Test - void testMaximumTimeToWait() { - // Initial value before runOnce has been called - // Target: 5000, correct bc maximumTimeToWait is 5000 by default - // This first part is redundant, it will always be equal - assertEquals(MAX_POLL_TIMEOUT_MS, consumerNetworkThread.maximumTimeToWait()); - consumerNetworkThread.runOnce(); - // After runOnce has been called, it takes the default heartbeat interval from the heartbeat request manager - // Looking at the implementation, it seems that this is testing HeartBeatRequestManager behavior - // - // In ConsumerNetworkThread.runOnce(), cachedTimeToWait is being calculated, as implied above - // after running runOnce(), the consumerNetworkThread.maximumTimeToWait() should return default heartbeat interval. - // How this can be done is when rm.maximumTimeToWait() is called, the HBRM pollTimer is not expired, and - // pollTimer.remainingMs() returns 1000 exactly - assertEquals(DEFAULT_HEARTBEAT_INTERVAL_MS, consumerNetworkThread.maximumTimeToWait()); - } - @Test void testRequestManagersArePolledOnce() { consumerNetworkThread.runOnce(); @@ -340,44 +176,6 @@ void testRequestManagersArePolledOnce() { verify(networkClientDelegate, times(1)).poll(anyLong(), anyLong()); } - // TODO: Remove test, place elsewhere - // This test can probably go because it is testing metadata update in the NetworkClient module - // May move somewhere else if it is not being tested elsewhere - @Test - void testEnsureMetadataUpdateOnPoll() { - MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(2, Collections.emptyMap()); - client.prepareMetadataUpdate(metadataResponse); - metadata.requestUpdate(false); - consumerNetworkThread.runOnce(); - verify(metadata, times(1)).updateWithCurrentRequestVersion(eq(metadataResponse), eq(false), anyLong()); - } - - // TODO: Remove test, place elsewhere - // Test should be moved, testing pretty much everything but the ConsumerNetworkThread - @Test - void testEnsureEventsAreCompleted() { - List list = new ArrayList<>(); - list.add(new Node(1, null, 1)); - when(metadata.fetch().nodes()).thenReturn(list); - Node node = metadata.fetch().nodes().get(0); - coordinatorRequestManager.markCoordinatorUnknown("test", time.milliseconds()); - client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "group-id", node)); - prepareOffsetCommitRequest(new HashMap<>(), Errors.NONE, false); - CompletableApplicationEvent event1 = spy(new AsyncCommitEvent(Collections.emptyMap())); - - ApplicationEvent event2 = new AsyncCommitEvent(Collections.emptyMap()); - CompletableFuture future = new CompletableFuture<>(); - when(event1.future()).thenReturn(future); - applicationEventsQueue.add(event1); - applicationEventsQueue.add(event2); - assertFalse(future.isDone()); - assertFalse(applicationEventsQueue.isEmpty()); - - consumerNetworkThread.cleanup(); - assertTrue(future.isCompletedExceptionally()); - assertTrue(applicationEventsQueue.isEmpty()); - } - private void prepareOffsetCommitRequest(final Map expectedOffsets, final Errors error, final boolean disconnected) { From a10a961a2f5821a08e00a323bca0e4ab5e1991eb Mon Sep 17 00:00:00 2001 From: brenden20 Date: Tue, 28 May 2024 11:51:51 -0500 Subject: [PATCH 07/61] Changed variables names Changed variables names in testConsumerNetworkThreadWaitTimeComputations to be more descriptive --- .../internals/ConsumerNetworkThreadUnitTest.java | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java index 4a8af44679a84..aff16ef4bfed4 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -122,21 +122,21 @@ public void tearDown() { @ParameterizedTest @ValueSource(longs = {1, 100, 1000, 4999, 5001}) public void testConsumerNetworkThreadWaitTimeComputations(long exampleTime) { - List> rmsList = new ArrayList<>(); - rmsList.add(Optional.of(coordinatorRequestManager)); + List> requestManagersList = new ArrayList<>(); + requestManagersList.add(Optional.of(coordinatorRequestManager)); // rmsList.add(Optional.of(commitRequestManager)); // rmsList.add(Optional.of(heartbeatRequestManager)); // rmsList.add(Optional.of(offsetsRequestManager)); - when(requestManagers.entries()).thenReturn(rmsList); + when(requestManagers.entries()).thenReturn(requestManagersList); - NetworkClientDelegate.PollResult pr = new NetworkClientDelegate.PollResult(exampleTime); + NetworkClientDelegate.PollResult pollResult = new NetworkClientDelegate.PollResult(exampleTime); - when(coordinatorRequestManager.poll(anyLong())).thenReturn(pr); + when(coordinatorRequestManager.poll(anyLong())).thenReturn(pollResult); when(coordinatorRequestManager.maximumTimeToWait(anyLong())).thenReturn(exampleTime); // when(commitRequestManager.poll(anyLong())).thenReturn(new NetworkClientDelegate.PollResult(10000L)); // when(heartbeatRequestManager.poll(anyLong())).thenReturn(new NetworkClientDelegate.PollResult(10000L)); // when(offsetsRequestManager.poll(anyLong())).thenReturn(new NetworkClientDelegate.PollResult(10000L)); - when(networkClientDelegate.addAll(pr)).thenReturn(pr.timeUntilNextPollMs); + when(networkClientDelegate.addAll(pollResult)).thenReturn(pollResult.timeUntilNextPollMs); consumerNetworkThread.runOnce(); assertEquals(consumerNetworkThread.maximumTimeToWait(), exampleTime); From a1a4de0293deb44a7f24e5c12d77720156384833 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Tue, 28 May 2024 11:53:37 -0500 Subject: [PATCH 08/61] Removed vars and methods with no usage Removed vars and methods with no usage --- .../ConsumerNetworkThreadUnitTest.java | 55 +------------------ 1 file changed, 1 insertion(+), 54 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java index aff16ef4bfed4..fa42ecdd24525 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -71,9 +71,6 @@ public class ConsumerNetworkThreadUnitTest { private final NetworkClientDelegate networkClientDelegate; private final RequestManagers requestManagers; - static final int DEFAULT_REQUEST_TIMEOUT_MS = 500; - static final int DEFAULT_HEARTBEAT_INTERVAL_MS = 1000; - ConsumerNetworkThreadUnitTest() { LogContext logContext = new LogContext(); this.time = new MockTime(); @@ -175,54 +172,4 @@ void testRequestManagersArePolledOnce() { // We just need to test networkClientDelegate not networkClient verify(networkClientDelegate, times(1)).poll(anyLong(), anyLong()); } - - private void prepareOffsetCommitRequest(final Map expectedOffsets, - final Errors error, - final boolean disconnected) { - Map errors = partitionErrors(expectedOffsets.keySet(), error); - client.prepareResponse(offsetCommitRequestMatcher(expectedOffsets), offsetCommitResponse(errors), disconnected); - } - - private Map partitionErrors(final Collection partitions, - final Errors error) { - final Map errors = new HashMap<>(); - for (TopicPartition partition : partitions) { - errors.put(partition, error); - } - return errors; - } - - private OffsetCommitResponse offsetCommitResponse(final Map responseData) { - return new OffsetCommitResponse(responseData); - } - - private MockClient.RequestMatcher offsetCommitRequestMatcher(final Map expectedOffsets) { - return body -> { - OffsetCommitRequest req = (OffsetCommitRequest) body; - Map offsets = req.offsets(); - if (offsets.size() != expectedOffsets.size()) - return false; - - for (Map.Entry expectedOffset : expectedOffsets.entrySet()) { - if (!offsets.containsKey(expectedOffset.getKey())) { - return false; - } else { - Long actualOffset = offsets.get(expectedOffset.getKey()); - if (!actualOffset.equals(expectedOffset.getValue())) { - return false; - } - } - } - return true; - }; - } - - private HashMap mockTopicPartitionOffset() { - final TopicPartition t0 = new TopicPartition("t0", 2); - final TopicPartition t1 = new TopicPartition("t0", 3); - HashMap topicPartitionOffsets = new HashMap<>(); - topicPartitionOffsets.put(t0, new OffsetAndMetadata(10L)); - topicPartitionOffsets.put(t1, new OffsetAndMetadata(20L)); - return topicPartitionOffsets; - } -} +} \ No newline at end of file From 372eea6c46e085f2fd548c7bdfb03058594dcd2b Mon Sep 17 00:00:00 2001 From: brenden20 Date: Tue, 28 May 2024 11:54:19 -0500 Subject: [PATCH 09/61] Removed unused imports Removed unused imports --- .../internals/ConsumerNetworkThreadUnitTest.java | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java index fa42ecdd24525..cab9533b8bce5 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -18,17 +18,10 @@ import org.apache.kafka.clients.*; import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.internals.events.*; -import org.apache.kafka.common.Node; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.message.FindCoordinatorRequestData; -import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.requests.*; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; -import org.apache.kafka.common.utils.Timer; import org.apache.kafka.test.TestCondition; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.AfterEach; @@ -40,17 +33,11 @@ import java.time.Duration; import java.util.*; import java.util.concurrent.BlockingQueue; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.LinkedBlockingQueue; -import static org.apache.kafka.clients.consumer.internals.ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS; import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.*; import static org.mockito.Mockito.mock; From 43be21e1d4a51bac50c2e05f89e25e869525b62a Mon Sep 17 00:00:00 2001 From: brenden20 Date: Tue, 28 May 2024 13:06:23 -0500 Subject: [PATCH 10/61] Updating comments Updating comments --- .../consumer/internals/ConsumerNetworkThreadUnitTest.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java index cab9533b8bce5..0e4ca7253db3f 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -108,9 +108,9 @@ public void tearDown() { public void testConsumerNetworkThreadWaitTimeComputations(long exampleTime) { List> requestManagersList = new ArrayList<>(); requestManagersList.add(Optional.of(coordinatorRequestManager)); -// rmsList.add(Optional.of(commitRequestManager)); -// rmsList.add(Optional.of(heartbeatRequestManager)); -// rmsList.add(Optional.of(offsetsRequestManager)); +// requestManagersList.add(Optional.of(commitRequestManager)); +// requestManagersList.add(Optional.of(heartbeatRequestManager)); +// requestManagersList.add(Optional.of(offsetsRequestManager)); when(requestManagers.entries()).thenReturn(requestManagersList); NetworkClientDelegate.PollResult pollResult = new NetworkClientDelegate.PollResult(exampleTime); From ce3d5c5cb6e6b807abd01f15befc78c0d5629066 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Tue, 28 May 2024 14:07:07 -0500 Subject: [PATCH 11/61] Comments Comments --- .../clients/consumer/internals/ConsumerNetworkThread.java | 6 ++++++ .../consumer/internals/ConsumerNetworkThreadUnitTest.java | 1 + 2 files changed, 7 insertions(+) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java index f3ed2b83451c1..3eae1d8761c17 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java @@ -75,6 +75,11 @@ public ConsumerNetworkThread(LogContext logContext, this.running = true; } + /** + * TODO: We should also test close() and cleanup() in two different test cases + * However, we can create a separated Jira ticket and PR for the closing tests to ensure full coverage + */ + @Override public void run() { try { @@ -274,6 +279,7 @@ void cleanup() { try { runAtClose(requestManagers.entries(), networkClientDelegate, timer); } catch (Exception e) { + // TODO: Makesure exception case is being tested log.error("Unexpected error during shutdown. Proceed with closing.", e); } finally { sendUnsentRequests(timer); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java index 0e4ca7253db3f..bc69683b14045 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -123,6 +123,7 @@ public void testConsumerNetworkThreadWaitTimeComputations(long exampleTime) { when(networkClientDelegate.addAll(pollResult)).thenReturn(pollResult.timeUntilNextPollMs); consumerNetworkThread.runOnce(); + // verify networkClientDelegate polls with the correct time assertEquals(consumerNetworkThread.maximumTimeToWait(), exampleTime); } From 8a785e1fb589e6b179980145c84dc0f8f9baef83 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Wed, 29 May 2024 10:50:53 -0500 Subject: [PATCH 12/61] Updated constructor Updated constructor to include correct parameters --- .../ConsumerNetworkThreadUnitTest.java | 28 ++++++------------- 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java index bc69683b14045..93ef30c14b15a 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -57,13 +57,12 @@ public class ConsumerNetworkThreadUnitTest { private final MockClient client; private final NetworkClientDelegate networkClientDelegate; private final RequestManagers requestManagers; + private final CompletableEventReaper applicationEventReaper; ConsumerNetworkThreadUnitTest() { LogContext logContext = new LogContext(); this.time = new MockTime(); this.client = new MockClient(time); - - // usually we don't mock 1. time - MockTime 2. logContext and 3. networkClient - MockClient this.networkClientDelegate = mock(NetworkClientDelegate.class); this.requestManagers = mock(RequestManagers.class); this.offsetsRequestManager = mock(OffsetsRequestManager.class); @@ -75,10 +74,13 @@ public class ConsumerNetworkThreadUnitTest { this.subscriptions = mock(SubscriptionState.class); this.metadata = mock(ConsumerMetadata.class); this.applicationEventProcessor = mock(ApplicationEventProcessor.class); + this.applicationEventReaper = mock(CompletableEventReaper.class); this.consumerNetworkThread = new ConsumerNetworkThread( logContext, time, + applicationEventsQueue, + applicationEventReaper, () -> applicationEventProcessor, () -> networkClientDelegate, () -> requestManagers @@ -96,30 +98,17 @@ public void tearDown() { consumerNetworkThread.close(); } - /** - * // Make sure the tests include testing poll with params of (pollWaitTime, currentTimeMs) - * 1. Add a test that have RM to return different poll times and ensure pollWaitTimeMs is computed correctly. i.e. takes the min of all - * 2. Test maxTimeToWait with different request manager returns - * 3. Remove the tests and create a commit for it so that we can look back later. - */ - @ParameterizedTest @ValueSource(longs = {1, 100, 1000, 4999, 5001}) public void testConsumerNetworkThreadWaitTimeComputations(long exampleTime) { List> requestManagersList = new ArrayList<>(); requestManagersList.add(Optional.of(coordinatorRequestManager)); -// requestManagersList.add(Optional.of(commitRequestManager)); -// requestManagersList.add(Optional.of(heartbeatRequestManager)); -// requestManagersList.add(Optional.of(offsetsRequestManager)); when(requestManagers.entries()).thenReturn(requestManagersList); NetworkClientDelegate.PollResult pollResult = new NetworkClientDelegate.PollResult(exampleTime); when(coordinatorRequestManager.poll(anyLong())).thenReturn(pollResult); when(coordinatorRequestManager.maximumTimeToWait(anyLong())).thenReturn(exampleTime); -// when(commitRequestManager.poll(anyLong())).thenReturn(new NetworkClientDelegate.PollResult(10000L)); -// when(heartbeatRequestManager.poll(anyLong())).thenReturn(new NetworkClientDelegate.PollResult(10000L)); -// when(offsetsRequestManager.poll(anyLong())).thenReturn(new NetworkClientDelegate.PollResult(10000L)); when(networkClientDelegate.addAll(pollResult)).thenReturn(pollResult.timeUntilNextPollMs); consumerNetworkThread.runOnce(); @@ -146,10 +135,10 @@ public void testStartupAndTearDown() throws InterruptedException { @Test public void testEnsureApplicationEventProcessorInvokesProcess() { - //ApplicationEvent e = new PollEvent(100); - //applicationEventsQueue.add(e); + ApplicationEvent e = new PollEvent(100); + applicationEventsQueue.add(e); consumerNetworkThread.runOnce(); - verify(applicationEventProcessor, times(1)).process(); + verify(applicationEventProcessor).process(e); } @Test @@ -157,7 +146,6 @@ void testRequestManagersArePolledOnce() { consumerNetworkThread.runOnce(); requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm, times(1)).poll(anyLong()))); requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm, times(1)).maximumTimeToWait(anyLong()))); - // We just need to test networkClientDelegate not networkClient - verify(networkClientDelegate, times(1)).poll(anyLong(), anyLong()); + verify(networkClientDelegate).poll(anyLong(), anyLong()); } } \ No newline at end of file From 70ad4576baa9b5cebbbdce62f69d60b6807c3258 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Wed, 29 May 2024 11:09:37 -0500 Subject: [PATCH 13/61] Added tests back Added previously removed tests and changed some imports and edited the constructor --- .../ConsumerNetworkThreadUnitTest.java | 258 +++++++++++++++++- 1 file changed, 250 insertions(+), 8 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java index 93ef30c14b15a..a98ee254086e8 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -18,7 +18,14 @@ import org.apache.kafka.clients.*; import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.internals.events.*; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.message.FindCoordinatorRequestData; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.requests.*; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; @@ -33,10 +40,14 @@ import java.time.Duration; import java.util.*; import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.LinkedBlockingQueue; +import static org.apache.kafka.clients.consumer.internals.ConsumerTestBuilder.DEFAULT_HEARTBEAT_INTERVAL_MS; +import static org.apache.kafka.clients.consumer.internals.ConsumerTestBuilder.DEFAULT_REQUEST_TIMEOUT_MS; +import static org.apache.kafka.clients.consumer.internals.events.CompletableEvent.calculateDeadlineMs; import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; -import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.*; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.*; import static org.mockito.Mockito.mock; @@ -69,7 +80,7 @@ public class ConsumerNetworkThreadUnitTest { this.commitRequestManager = mock(CommitRequestManager.class); this.heartbeatRequestManager = mock(HeartbeatRequestManager.class); this.coordinatorRequestManager = mock(CoordinatorRequestManager.class); - this.applicationEventsQueue = mock(LinkedBlockingQueue.class); + this.applicationEventsQueue = new LinkedBlockingQueue<>(); this.config = mock(ConsumerConfig.class); this.subscriptions = mock(SubscriptionState.class); this.metadata = mock(ConsumerMetadata.class); @@ -134,18 +145,249 @@ public void testStartupAndTearDown() throws InterruptedException { } @Test - public void testEnsureApplicationEventProcessorInvokesProcess() { + void testRequestManagersArePolledOnce() { + consumerNetworkThread.runOnce(); + requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm, times(1)).poll(anyLong()))); + requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm, times(1)).maximumTimeToWait(anyLong()))); + verify(networkClientDelegate).poll(anyLong(), anyLong()); + } + + @Test + public void testApplicationEvent() { ApplicationEvent e = new PollEvent(100); applicationEventsQueue.add(e); consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(e); + verify(applicationEventProcessor, times(1)).process(e); } + // Probably remove or change @Test - void testRequestManagersArePolledOnce() { + public void testMetadataUpdateEvent() { + ApplicationEvent e = new NewTopicsMetadataUpdateRequestEvent(); + applicationEventsQueue.add(e); consumerNetworkThread.runOnce(); - requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm, times(1)).poll(anyLong()))); - requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm, times(1)).maximumTimeToWait(anyLong()))); - verify(networkClientDelegate).poll(anyLong(), anyLong()); + verify(metadata).requestUpdateForNewTopics(); + } + + @Test + public void testAsyncCommitEvent() { + ApplicationEvent e = new AsyncCommitEvent(new HashMap<>()); + applicationEventsQueue.add(e); + consumerNetworkThread.runOnce(); + verify(applicationEventProcessor).process(any(AsyncCommitEvent.class)); + } + + @Test + public void testSyncCommitEvent() { + ApplicationEvent e = new SyncCommitEvent(new HashMap<>(), calculateDeadlineMs(time, 100)); + applicationEventsQueue.add(e); + consumerNetworkThread.runOnce(); + verify(applicationEventProcessor).process(any(SyncCommitEvent.class)); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testListOffsetsEventIsProcessed(boolean requireTimestamp) { + Map timestamps = Collections.singletonMap(new TopicPartition("topic1", 1), 5L); + ApplicationEvent e = new ListOffsetsEvent(timestamps, calculateDeadlineMs(time, 100), requireTimestamp); + applicationEventsQueue.add(e); + consumerNetworkThread.runOnce(); + verify(applicationEventProcessor).process(any(ListOffsetsEvent.class)); + assertTrue(applicationEventsQueue.isEmpty()); + } + + @Test + public void testResetPositionsEventIsProcessed() { + ResetPositionsEvent e = new ResetPositionsEvent(calculateDeadlineMs(time, 100)); + applicationEventsQueue.add(e); + consumerNetworkThread.runOnce(); + verify(applicationEventProcessor).process(any(ResetPositionsEvent.class)); + assertTrue(applicationEventsQueue.isEmpty()); + } + + @Test + public void testResetPositionsProcessFailureIsIgnored() { + doThrow(new NullPointerException()).when(offsetsRequestManager).resetPositionsIfNeeded(); + + ResetPositionsEvent event = new ResetPositionsEvent(calculateDeadlineMs(time, 100)); + applicationEventsQueue.add(event); + assertDoesNotThrow(() -> consumerNetworkThread.runOnce()); + + verify(applicationEventProcessor).process(any(ResetPositionsEvent.class)); + } + + @Test + public void testValidatePositionsEventIsProcessed() { + ValidatePositionsEvent e = new ValidatePositionsEvent(calculateDeadlineMs(time, 100)); + applicationEventsQueue.add(e); + consumerNetworkThread.runOnce(); + verify(applicationEventProcessor).process(any(ValidatePositionsEvent.class)); + assertTrue(applicationEventsQueue.isEmpty()); + } + + // Maybe remove, try out making commitRequestManager not a mock + @Test + public void testAssignmentChangeEvent() { + HashMap offset = mockTopicPartitionOffset(); + + final long currentTimeMs = time.milliseconds(); + ApplicationEvent e = new AssignmentChangeEvent(offset, currentTimeMs); + applicationEventsQueue.add(e); + + consumerNetworkThread.runOnce(); + verify(applicationEventProcessor).process(any(AssignmentChangeEvent.class)); + verify(networkClientDelegate, times(1)).poll(anyLong(), anyLong()); + verify(commitRequestManager, times(1)).updateAutoCommitTimer(currentTimeMs); + // Assignment change should generate an async commit (not retried). + verify(commitRequestManager, times(1)).maybeAutoCommitAsync(); + } + + @Test + void testFetchTopicMetadata() { + applicationEventsQueue.add(new TopicMetadataEvent("topic", Long.MAX_VALUE)); + consumerNetworkThread.runOnce(); + verify(applicationEventProcessor).process(any(TopicMetadataEvent.class)); + } + + // Look into this one, may need to use networkClient instead of the delegate + @Test + void testPollResultTimer() { + NetworkClientDelegate.UnsentRequest req = new NetworkClientDelegate.UnsentRequest( + new FindCoordinatorRequest.Builder( + new FindCoordinatorRequestData() + .setKeyType(FindCoordinatorRequest.CoordinatorType.TRANSACTION.id()) + .setKey("foobar")), + Optional.empty()); + req.setTimer(time, DEFAULT_REQUEST_TIMEOUT_MS); + + // purposely setting a non-MAX time to ensure it is returning Long.MAX_VALUE upon success + NetworkClientDelegate.PollResult success = new NetworkClientDelegate.PollResult( + 10, + Collections.singletonList(req)); + assertEquals(10, networkClientDelegate.addAll(success)); + + NetworkClientDelegate.PollResult failure = new NetworkClientDelegate.PollResult( + 10, + new ArrayList<>()); + assertEquals(10, networkClientDelegate.addAll(failure)); + } + + // Should work, look into this + @Test + void testMaximumTimeToWait() { + // Initial value before runOnce has been called + assertEquals(ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS, consumerNetworkThread.maximumTimeToWait()); + consumerNetworkThread.runOnce(); + // After runOnce has been called, it takes the default heartbeat interval from the heartbeat request manager + assertEquals(DEFAULT_HEARTBEAT_INTERVAL_MS, consumerNetworkThread.maximumTimeToWait()); + } + + // Check on this one + @Test + void testEnsureMetadataUpdateOnPoll() { + MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(2, Collections.emptyMap()); + client.prepareMetadataUpdate(metadataResponse); + metadata.requestUpdate(false); + consumerNetworkThread.runOnce(); + verify(metadata, times(1)).updateWithCurrentRequestVersion(eq(metadataResponse), eq(false), anyLong()); + } + + // Look into this one + @Test + void testEnsureEventsAreCompleted() { + // Mimic the logic of CompletableEventReaper.reap(Collection): + doAnswer(__ -> { + Iterator i = applicationEventsQueue.iterator(); + + while (i.hasNext()) { + ApplicationEvent event = i.next(); + + if (event instanceof CompletableEvent) + ((CompletableEvent) event).future().completeExceptionally(new TimeoutException()); + + i.remove(); + } + + return null; + }).when(applicationEventReaper).reap(any(Collection.class)); + + Node node = metadata.fetch().nodes().get(0); + coordinatorRequestManager.markCoordinatorUnknown("test", time.milliseconds()); + client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "group-id", node)); + prepareOffsetCommitRequest(new HashMap<>(), Errors.NONE, false); + CompletableApplicationEvent event1 = spy(new AsyncCommitEvent(Collections.emptyMap())); + ApplicationEvent event2 = new AsyncCommitEvent(Collections.emptyMap()); + CompletableFuture future = new CompletableFuture<>(); + when(event1.future()).thenReturn(future); + applicationEventsQueue.add(event1); + applicationEventsQueue.add(event2); + assertFalse(future.isDone()); + assertFalse(applicationEventsQueue.isEmpty()); + consumerNetworkThread.cleanup(); + assertTrue(future.isCompletedExceptionally()); + assertTrue(applicationEventsQueue.isEmpty()); + } + + // Look into this one + @Test + void testCleanupInvokesReaper() { + consumerNetworkThread.cleanup(); + verify(applicationEventReaper).reap(applicationEventsQueue); + } + + @Test + void testRunOnceInvokesReaper() { + consumerNetworkThread.runOnce(); + verify(applicationEventReaper).reap(any(Long.class)); + } + + private HashMap mockTopicPartitionOffset() { + final TopicPartition t0 = new TopicPartition("t0", 2); + final TopicPartition t1 = new TopicPartition("t0", 3); + HashMap topicPartitionOffsets = new HashMap<>(); + topicPartitionOffsets.put(t0, new OffsetAndMetadata(10L)); + topicPartitionOffsets.put(t1, new OffsetAndMetadata(20L)); + return topicPartitionOffsets; + } + + private void prepareOffsetCommitRequest(final Map expectedOffsets, + final Errors error, + final boolean disconnected) { + Map errors = partitionErrors(expectedOffsets.keySet(), error); + client.prepareResponse(offsetCommitRequestMatcher(expectedOffsets), offsetCommitResponse(errors), disconnected); + } + + private Map partitionErrors(final Collection partitions, + final Errors error) { + final Map errors = new HashMap<>(); + for (TopicPartition partition : partitions) { + errors.put(partition, error); + } + return errors; + } + + private OffsetCommitResponse offsetCommitResponse(final Map responseData) { + return new OffsetCommitResponse(responseData); + } + + private MockClient.RequestMatcher offsetCommitRequestMatcher(final Map expectedOffsets) { + return body -> { + OffsetCommitRequest req = (OffsetCommitRequest) body; + Map offsets = req.offsets(); + if (offsets.size() != expectedOffsets.size()) + return false; + + for (Map.Entry expectedOffset : expectedOffsets.entrySet()) { + if (!offsets.containsKey(expectedOffset.getKey())) { + return false; + } else { + Long actualOffset = offsets.get(expectedOffset.getKey()); + if (!actualOffset.equals(expectedOffset.getValue())) { + return false; + } + } + } + return true; + }; } } \ No newline at end of file From 94f9bcc02f476a7381fcc94bcd1ee0ef36039c12 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Wed, 29 May 2024 11:17:21 -0500 Subject: [PATCH 14/61] Minor updates to tests Did some minor updates, changed testMetadataUpdateEvent since it was integration testing --- .../clients/consumer/internals/ConsumerNetworkThread.java | 2 -- .../consumer/internals/ConsumerNetworkThreadUnitTest.java | 7 +++---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java index 3c6475e2962dd..acc40af59dfd7 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java @@ -145,7 +145,6 @@ void runOnce() { // here we get the system time and pass it to the request manager because we don't want to invoke system time all the time final long currentTimeMs = time.milliseconds(); - // TODO: Make sure pollWaitTimeMs is computed correctly. Try to examine different scenarios final long pollWaitTimeMs = requestManagers.entries().stream() .filter(Optional::isPresent) .map(Optional::get) @@ -154,7 +153,6 @@ void runOnce() { .reduce(MAX_POLL_TIMEOUT_MS, Math::min); networkClientDelegate.poll(pollWaitTimeMs, currentTimeMs); - // TODO: Check computation of cachedMaximumTimeToWait cachedMaximumTimeToWait = requestManagers.entries().stream() .filter(Optional::isPresent) .map(Optional::get) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java index a98ee254086e8..4c4e18f1de481 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -157,16 +157,15 @@ public void testApplicationEvent() { ApplicationEvent e = new PollEvent(100); applicationEventsQueue.add(e); consumerNetworkThread.runOnce(); - verify(applicationEventProcessor, times(1)).process(e); + verify(applicationEventProcessor).process(e); } - // Probably remove or change @Test public void testMetadataUpdateEvent() { ApplicationEvent e = new NewTopicsMetadataUpdateRequestEvent(); applicationEventsQueue.add(e); consumerNetworkThread.runOnce(); - verify(metadata).requestUpdateForNewTopics(); + verify(applicationEventProcessor).process(e); } @Test @@ -225,7 +224,7 @@ public void testValidatePositionsEventIsProcessed() { assertTrue(applicationEventsQueue.isEmpty()); } - // Maybe remove, try out making commitRequestManager not a mock + // Look into this one @Test public void testAssignmentChangeEvent() { HashMap offset = mockTopicPartitionOffset(); From 99c4e151f7463c78aa8a4f8fcfdcc37adb3d790c Mon Sep 17 00:00:00 2001 From: brenden20 Date: Wed, 29 May 2024 13:16:51 -0500 Subject: [PATCH 15/61] Constructor and test updates Updated constructor, removed unnecessary variables. Also updated a few tests to make them work or to make them unit tests rather than integration tests --- .../ConsumerNetworkThreadUnitTest.java | 53 +++++++++++++------ 1 file changed, 36 insertions(+), 17 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java index 4c4e18f1de481..ab93c81939613 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -20,6 +20,7 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.internals.events.*; +import org.apache.kafka.common.Cluster; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.TimeoutException; @@ -56,37 +57,40 @@ public class ConsumerNetworkThreadUnitTest { private final Time time; private final ConsumerMetadata metadata; - private final ConsumerConfig config; - private final SubscriptionState subscriptions; private final BlockingQueue applicationEventsQueue; private final ApplicationEventProcessor applicationEventProcessor; private final OffsetsRequestManager offsetsRequestManager; - private final CommitRequestManager commitRequestManager; private final HeartbeatRequestManager heartbeatRequestManager; private final CoordinatorRequestManager coordinatorRequestManager; private final ConsumerNetworkThread consumerNetworkThread; private final MockClient client; private final NetworkClientDelegate networkClientDelegate; + private final NetworkClientDelegate networkClient; private final RequestManagers requestManagers; private final CompletableEventReaper applicationEventReaper; ConsumerNetworkThreadUnitTest() { LogContext logContext = new LogContext(); + ConsumerConfig config = mock(ConsumerConfig.class); this.time = new MockTime(); this.client = new MockClient(time); this.networkClientDelegate = mock(NetworkClientDelegate.class); this.requestManagers = mock(RequestManagers.class); this.offsetsRequestManager = mock(OffsetsRequestManager.class); - this.commitRequestManager = mock(CommitRequestManager.class); this.heartbeatRequestManager = mock(HeartbeatRequestManager.class); this.coordinatorRequestManager = mock(CoordinatorRequestManager.class); this.applicationEventsQueue = new LinkedBlockingQueue<>(); - this.config = mock(ConsumerConfig.class); - this.subscriptions = mock(SubscriptionState.class); this.metadata = mock(ConsumerMetadata.class); this.applicationEventProcessor = mock(ApplicationEventProcessor.class); this.applicationEventReaper = mock(CompletableEventReaper.class); + this.networkClient = new NetworkClientDelegate( + time, + config, + logContext, + client + ); + this.consumerNetworkThread = new ConsumerNetworkThread( logContext, time, @@ -224,7 +228,7 @@ public void testValidatePositionsEventIsProcessed() { assertTrue(applicationEventsQueue.isEmpty()); } - // Look into this one + // Seems to be more like integration testing @Test public void testAssignmentChangeEvent() { HashMap offset = mockTopicPartitionOffset(); @@ -235,10 +239,10 @@ public void testAssignmentChangeEvent() { consumerNetworkThread.runOnce(); verify(applicationEventProcessor).process(any(AssignmentChangeEvent.class)); - verify(networkClientDelegate, times(1)).poll(anyLong(), anyLong()); - verify(commitRequestManager, times(1)).updateAutoCommitTimer(currentTimeMs); - // Assignment change should generate an async commit (not retried). - verify(commitRequestManager, times(1)).maybeAutoCommitAsync(); +// verify(networkClientDelegate, times(1)).poll(anyLong(), anyLong()); +// verify(commitRequestManager, times(1)).updateAutoCommitTimer(currentTimeMs); +// // Assignment change should generate an async commit (not retried). +// verify(commitRequestManager, times(1)).maybeAutoCommitAsync(); } @Test @@ -248,7 +252,6 @@ void testFetchTopicMetadata() { verify(applicationEventProcessor).process(any(TopicMetadataEvent.class)); } - // Look into this one, may need to use networkClient instead of the delegate @Test void testPollResultTimer() { NetworkClientDelegate.UnsentRequest req = new NetworkClientDelegate.UnsentRequest( @@ -263,25 +266,30 @@ void testPollResultTimer() { NetworkClientDelegate.PollResult success = new NetworkClientDelegate.PollResult( 10, Collections.singletonList(req)); - assertEquals(10, networkClientDelegate.addAll(success)); + assertEquals(10, networkClient.addAll(success)); NetworkClientDelegate.PollResult failure = new NetworkClientDelegate.PollResult( 10, new ArrayList<>()); - assertEquals(10, networkClientDelegate.addAll(failure)); + assertEquals(10, networkClient.addAll(failure)); } - // Should work, look into this @Test void testMaximumTimeToWait() { + List> list = new ArrayList<>(); + list.add(Optional.of(heartbeatRequestManager)); // Initial value before runOnce has been called assertEquals(ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS, consumerNetworkThread.maximumTimeToWait()); + + when(requestManagers.entries()).thenReturn(list); + when(heartbeatRequestManager.maximumTimeToWait(time.milliseconds())).thenReturn((long) DEFAULT_HEARTBEAT_INTERVAL_MS); + consumerNetworkThread.runOnce(); // After runOnce has been called, it takes the default heartbeat interval from the heartbeat request manager assertEquals(DEFAULT_HEARTBEAT_INTERVAL_MS, consumerNetworkThread.maximumTimeToWait()); } - // Check on this one + // Looks like integration testing, I think this should be removed @Test void testEnsureMetadataUpdateOnPoll() { MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(2, Collections.emptyMap()); @@ -291,9 +299,18 @@ void testEnsureMetadataUpdateOnPoll() { verify(metadata, times(1)).updateWithCurrentRequestVersion(eq(metadataResponse), eq(false), anyLong()); } - // Look into this one @Test void testEnsureEventsAreCompleted() { + Cluster cluster = mock(Cluster.class); + when(metadata.fetch()).thenReturn(cluster); + + List list = new ArrayList<>(); + list.add(new Node(0, "host", 0)); + when(cluster.nodes()).thenReturn(list); + + Queue queue = new LinkedList<>(); + when(networkClientDelegate.unsentRequests()).thenReturn(queue); + // Mimic the logic of CompletableEventReaper.reap(Collection): doAnswer(__ -> { Iterator i = applicationEventsQueue.iterator(); @@ -330,6 +347,8 @@ void testEnsureEventsAreCompleted() { // Look into this one @Test void testCleanupInvokesReaper() { + Queue queue = new LinkedList<>(); + when(networkClientDelegate.unsentRequests()).thenReturn(queue); consumerNetworkThread.cleanup(); verify(applicationEventReaper).reap(applicationEventsQueue); } From 9aa6e536da37589f11a5df03d4799b16b4eab6a2 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Wed, 29 May 2024 13:19:09 -0500 Subject: [PATCH 16/61] Updated comment Updated comment on testEnsureMetadataUpdateOnPoll --- .../consumer/internals/ConsumerNetworkThreadUnitTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java index ab93c81939613..2cad8d9c51635 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -289,7 +289,7 @@ void testMaximumTimeToWait() { assertEquals(DEFAULT_HEARTBEAT_INTERVAL_MS, consumerNetworkThread.maximumTimeToWait()); } - // Looks like integration testing, I think this should be removed + // Looks like integration testing, I think this should be removed/moved elsewhere @Test void testEnsureMetadataUpdateOnPoll() { MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(2, Collections.emptyMap()); From a587124fdfbfc9a04859555152f7585a45d395bf Mon Sep 17 00:00:00 2001 From: brenden20 Date: Wed, 29 May 2024 13:36:20 -0500 Subject: [PATCH 17/61] Remove comment Remove comment --- .../kafka/clients/consumer/internals/ConsumerNetworkThread.java | 1 - 1 file changed, 1 deletion(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java index acc40af59dfd7..23dbf33da4a96 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java @@ -111,7 +111,6 @@ public void run() { } } - // Thunk - lazy sequence () -> 1 void initializeResources() { applicationEventProcessor = applicationEventProcessorSupplier.get(); networkClientDelegate = networkClientDelegateSupplier.get(); From a95e0c7e230686e53e455339a0ca5d6a21345696 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Wed, 29 May 2024 14:19:31 -0500 Subject: [PATCH 18/61] Added testEnsureCloseStopsRunningThread(), updated others Added testEnsureCloseStopsRunningThread. Updated testConsumerNetworkThreadWaitTimeComputations to also test that networkClientDelegate polls with correct times. Removed one instance of a spy object, replaced with mock. --- .../internals/ConsumerNetworkThread.java | 5 ----- .../ConsumerNetworkThreadUnitTest.java | 22 ++++++++++++++++--- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java index 23dbf33da4a96..c9cae79199fb5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java @@ -85,11 +85,6 @@ public ConsumerNetworkThread(LogContext logContext, this.running = true; } - /** - * TODO: We should also test close() and cleanup() in two different test cases - * However, we can create a separated Jira ticket and PR for the closing tests to ensure full coverage - */ - @Override public void run() { try { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java index 2cad8d9c51635..534345295407f 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -50,7 +50,13 @@ import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; import static org.junit.jupiter.api.Assertions.*; import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.*; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; public class ConsumerNetworkThreadUnitTest { @@ -113,6 +119,16 @@ public void tearDown() { consumerNetworkThread.close(); } + @Test + public void testEnsureCloseStopsRunningThread() { + // consumerNetworkThread.running is set to true in the constructor + assertTrue(consumerNetworkThread.isRunning()); + + // close() should make consumerNetworkThread.running false by calling closeInternal(Duration timeout) + consumerNetworkThread.close(); + assertFalse(consumerNetworkThread.isRunning()); + } + @ParameterizedTest @ValueSource(longs = {1, 100, 1000, 4999, 5001}) public void testConsumerNetworkThreadWaitTimeComputations(long exampleTime) { @@ -127,7 +143,7 @@ public void testConsumerNetworkThreadWaitTimeComputations(long exampleTime) { when(networkClientDelegate.addAll(pollResult)).thenReturn(pollResult.timeUntilNextPollMs); consumerNetworkThread.runOnce(); - // verify networkClientDelegate polls with the correct time + verify(networkClientDelegate).poll((exampleTime < 5001 ? exampleTime : 5000), time.milliseconds()); assertEquals(consumerNetworkThread.maximumTimeToWait(), exampleTime); } @@ -331,7 +347,7 @@ void testEnsureEventsAreCompleted() { coordinatorRequestManager.markCoordinatorUnknown("test", time.milliseconds()); client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "group-id", node)); prepareOffsetCommitRequest(new HashMap<>(), Errors.NONE, false); - CompletableApplicationEvent event1 = spy(new AsyncCommitEvent(Collections.emptyMap())); + CompletableApplicationEvent event1 = mock(AsyncCommitEvent.class); ApplicationEvent event2 = new AsyncCommitEvent(Collections.emptyMap()); CompletableFuture future = new CompletableFuture<>(); when(event1.future()).thenReturn(future); From da90e1b58f97ce339f327ab63a8d1537884cff2a Mon Sep 17 00:00:00 2001 From: brenden20 Date: Wed, 29 May 2024 14:21:24 -0500 Subject: [PATCH 19/61] Cleaning up warnings Cleaning up warnings, changed lambda expressions to explicit method references --- .../consumer/internals/ConsumerNetworkThreadUnitTest.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java index 534345295407f..e3207c54a0c60 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -150,7 +150,7 @@ public void testConsumerNetworkThreadWaitTimeComputations(long exampleTime) { @Test public void testStartupAndTearDown() throws InterruptedException { consumerNetworkThread.start(); - TestCondition isStarted = () -> consumerNetworkThread.isRunning(); + TestCondition isStarted = consumerNetworkThread::isRunning; TestCondition isClosed = () -> !(consumerNetworkThread.isRunning() || consumerNetworkThread.isAlive()); // There's a nonzero amount of time between starting the thread and having it @@ -230,7 +230,7 @@ public void testResetPositionsProcessFailureIsIgnored() { ResetPositionsEvent event = new ResetPositionsEvent(calculateDeadlineMs(time, 100)); applicationEventsQueue.add(event); - assertDoesNotThrow(() -> consumerNetworkThread.runOnce()); + assertDoesNotThrow(consumerNetworkThread::runOnce); verify(applicationEventProcessor).process(any(ResetPositionsEvent.class)); } From 8c9ad01821c57fc7d60f7a57bdc46d62132fb43d Mon Sep 17 00:00:00 2001 From: brenden20 Date: Wed, 29 May 2024 16:25:55 -0500 Subject: [PATCH 20/61] Updated testAssignmentChangeEvent() Removed tests from this test that seem to be more like integration testing --- .../internals/ConsumerNetworkThreadUnitTest.java | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java index e3207c54a0c60..d830dda5f2457 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -74,10 +74,12 @@ public class ConsumerNetworkThreadUnitTest { private final NetworkClientDelegate networkClient; private final RequestManagers requestManagers; private final CompletableEventReaper applicationEventReaper; + private final LogContext logContext; + private final ConsumerConfig config; ConsumerNetworkThreadUnitTest() { - LogContext logContext = new LogContext(); - ConsumerConfig config = mock(ConsumerConfig.class); + logContext = new LogContext(); + config = mock(ConsumerConfig.class); this.time = new MockTime(); this.client = new MockClient(time); this.networkClientDelegate = mock(NetworkClientDelegate.class); @@ -244,7 +246,6 @@ public void testValidatePositionsEventIsProcessed() { assertTrue(applicationEventsQueue.isEmpty()); } - // Seems to be more like integration testing @Test public void testAssignmentChangeEvent() { HashMap offset = mockTopicPartitionOffset(); @@ -255,10 +256,7 @@ public void testAssignmentChangeEvent() { consumerNetworkThread.runOnce(); verify(applicationEventProcessor).process(any(AssignmentChangeEvent.class)); -// verify(networkClientDelegate, times(1)).poll(anyLong(), anyLong()); -// verify(commitRequestManager, times(1)).updateAutoCommitTimer(currentTimeMs); -// // Assignment change should generate an async commit (not retried). -// verify(commitRequestManager, times(1)).maybeAutoCommitAsync(); + verify(networkClientDelegate, times(1)).poll(anyLong(), anyLong()); } @Test From 0d2b957e4bfc28e3a5379ba2c25acb1c8bdf96e5 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Wed, 29 May 2024 18:32:43 -0500 Subject: [PATCH 21/61] Removed testEnsureMetadataUpdateOnPoll() Removed testEnsureMetadataUpdateOnPoll(). This is an integration test, tried many different ways to still get it to pass but could not. The issue is that for the metadata. updateWithCurrentRequestVersion() method to be invoked, many objects must not be mocks. This causes issues when trying to use verify(), as it can only take a mock, not an actual instantiated object. I also cleaned up the code a little bit --- .../ConsumerNetworkThreadUnitTest.java | 31 ++++--------------- 1 file changed, 6 insertions(+), 25 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java index d830dda5f2457..14c153ea38306 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -44,22 +44,15 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.LinkedBlockingQueue; -import static org.apache.kafka.clients.consumer.internals.ConsumerTestBuilder.DEFAULT_HEARTBEAT_INTERVAL_MS; -import static org.apache.kafka.clients.consumer.internals.ConsumerTestBuilder.DEFAULT_REQUEST_TIMEOUT_MS; import static org.apache.kafka.clients.consumer.internals.events.CompletableEvent.calculateDeadlineMs; import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; import static org.junit.jupiter.api.Assertions.*; import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.eq; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.*; public class ConsumerNetworkThreadUnitTest { + static final int DEFAULT_HEARTBEAT_INTERVAL_MS = 1000; + static final int DEFAULT_REQUEST_TIMEOUT_MS = 500; private final Time time; private final ConsumerMetadata metadata; @@ -74,14 +67,11 @@ public class ConsumerNetworkThreadUnitTest { private final NetworkClientDelegate networkClient; private final RequestManagers requestManagers; private final CompletableEventReaper applicationEventReaper; - private final LogContext logContext; - private final ConsumerConfig config; ConsumerNetworkThreadUnitTest() { - logContext = new LogContext(); - config = mock(ConsumerConfig.class); + LogContext logContext = new LogContext(); + ConsumerConfig config = mock(ConsumerConfig.class); this.time = new MockTime(); - this.client = new MockClient(time); this.networkClientDelegate = mock(NetworkClientDelegate.class); this.requestManagers = mock(RequestManagers.class); this.offsetsRequestManager = mock(OffsetsRequestManager.class); @@ -91,6 +81,7 @@ public class ConsumerNetworkThreadUnitTest { this.metadata = mock(ConsumerMetadata.class); this.applicationEventProcessor = mock(ApplicationEventProcessor.class); this.applicationEventReaper = mock(CompletableEventReaper.class); + this.client = new MockClient(time, metadata); this.networkClient = new NetworkClientDelegate( time, @@ -303,16 +294,6 @@ void testMaximumTimeToWait() { assertEquals(DEFAULT_HEARTBEAT_INTERVAL_MS, consumerNetworkThread.maximumTimeToWait()); } - // Looks like integration testing, I think this should be removed/moved elsewhere - @Test - void testEnsureMetadataUpdateOnPoll() { - MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(2, Collections.emptyMap()); - client.prepareMetadataUpdate(metadataResponse); - metadata.requestUpdate(false); - consumerNetworkThread.runOnce(); - verify(metadata, times(1)).updateWithCurrentRequestVersion(eq(metadataResponse), eq(false), anyLong()); - } - @Test void testEnsureEventsAreCompleted() { Cluster cluster = mock(Cluster.class); From 5897bed42fd9003e7982395e855fc5523ea25827 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Thu, 30 May 2024 09:41:51 -0500 Subject: [PATCH 22/61] Changed MockClient Changed instantiation of MockClient to not include metadata in constructor --- .../consumer/internals/ConsumerNetworkThreadUnitTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java index 14c153ea38306..df69084015a1c 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java @@ -81,7 +81,7 @@ public class ConsumerNetworkThreadUnitTest { this.metadata = mock(ConsumerMetadata.class); this.applicationEventProcessor = mock(ApplicationEventProcessor.class); this.applicationEventReaper = mock(CompletableEventReaper.class); - this.client = new MockClient(time, metadata); + this.client = new MockClient(time); this.networkClient = new NetworkClientDelegate( time, From 2402e6f2f43b8f5d4a0e02dc0fb1cdb883e3470d Mon Sep 17 00:00:00 2001 From: brenden20 Date: Thu, 30 May 2024 09:45:58 -0500 Subject: [PATCH 23/61] Moved all changes into original file Moved all code from ConsumerNetworkThreadUnitTest to ConsumerNetworkThreadTest and removed ConsumerNetworkThreadUnitTest --- .../internals/ConsumerNetworkThreadTest.java | 245 ++++++----- .../ConsumerNetworkThreadUnitTest.java | 406 ------------------ 2 files changed, 130 insertions(+), 521 deletions(-) delete mode 100644 clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 8c3f97dd64379..58deb71371671 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -16,33 +16,19 @@ */ package org.apache.kafka.clients.consumer.internals; -import org.apache.kafka.clients.MockClient; +import org.apache.kafka.clients.*; +import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.OffsetAndMetadata; -import org.apache.kafka.clients.consumer.internals.events.ApplicationEvent; -import org.apache.kafka.clients.consumer.internals.events.ApplicationEventProcessor; -import org.apache.kafka.clients.consumer.internals.events.AssignmentChangeEvent; -import org.apache.kafka.clients.consumer.internals.events.AsyncCommitEvent; -import org.apache.kafka.clients.consumer.internals.events.CompletableApplicationEvent; -import org.apache.kafka.clients.consumer.internals.events.CompletableEvent; -import org.apache.kafka.clients.consumer.internals.events.CompletableEventReaper; -import org.apache.kafka.clients.consumer.internals.events.ListOffsetsEvent; -import org.apache.kafka.clients.consumer.internals.events.NewTopicsMetadataUpdateRequestEvent; -import org.apache.kafka.clients.consumer.internals.events.PollEvent; -import org.apache.kafka.clients.consumer.internals.events.ResetPositionsEvent; -import org.apache.kafka.clients.consumer.internals.events.SyncCommitEvent; -import org.apache.kafka.clients.consumer.internals.events.TopicMetadataEvent; -import org.apache.kafka.clients.consumer.internals.events.ValidatePositionsEvent; +import org.apache.kafka.clients.consumer.internals.events.*; +import org.apache.kafka.common.Cluster; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.message.FindCoordinatorRequestData; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.requests.FindCoordinatorRequest; -import org.apache.kafka.common.requests.FindCoordinatorResponse; -import org.apache.kafka.common.requests.MetadataResponse; -import org.apache.kafka.common.requests.OffsetCommitRequest; -import org.apache.kafka.common.requests.OffsetCommitResponse; -import org.apache.kafka.common.requests.RequestTestUtils; +import org.apache.kafka.common.requests.*; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; import org.apache.kafka.test.TestCondition; import org.apache.kafka.test.TestUtils; @@ -53,89 +39,111 @@ import org.junit.jupiter.params.provider.ValueSource; import java.time.Duration; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.util.Optional; +import java.util.*; import java.util.concurrent.BlockingQueue; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.LinkedBlockingQueue; -import static org.apache.kafka.clients.consumer.internals.ConsumerTestBuilder.DEFAULT_HEARTBEAT_INTERVAL_MS; -import static org.apache.kafka.clients.consumer.internals.ConsumerTestBuilder.DEFAULT_REQUEST_TIMEOUT_MS; -import static org.apache.kafka.clients.consumer.internals.ConsumerTestBuilder.createDefaultGroupInformation; import static org.apache.kafka.clients.consumer.internals.events.CompletableEvent.calculateDeadlineMs; import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; +import static org.junit.jupiter.api.Assertions.*; import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.*; public class ConsumerNetworkThreadTest { + static final int DEFAULT_HEARTBEAT_INTERVAL_MS = 1000; + static final int DEFAULT_REQUEST_TIMEOUT_MS = 500; + + private final Time time; + private final ConsumerMetadata metadata; + private final BlockingQueue applicationEventsQueue; + private final ApplicationEventProcessor applicationEventProcessor; + private final OffsetsRequestManager offsetsRequestManager; + private final HeartbeatRequestManager heartbeatRequestManager; + private final CoordinatorRequestManager coordinatorRequestManager; + private final ConsumerNetworkThread consumerNetworkThread; + private final MockClient client; + private final NetworkClientDelegate networkClientDelegate; + private final NetworkClientDelegate networkClient; + private final RequestManagers requestManagers; + private final CompletableEventReaper applicationEventReaper; + + ConsumerNetworkThreadTest() { + LogContext logContext = new LogContext(); + ConsumerConfig config = mock(ConsumerConfig.class); + this.time = new MockTime(); + this.networkClientDelegate = mock(NetworkClientDelegate.class); + this.requestManagers = mock(RequestManagers.class); + this.offsetsRequestManager = mock(OffsetsRequestManager.class); + this.heartbeatRequestManager = mock(HeartbeatRequestManager.class); + this.coordinatorRequestManager = mock(CoordinatorRequestManager.class); + this.applicationEventsQueue = new LinkedBlockingQueue<>(); + this.metadata = mock(ConsumerMetadata.class); + this.applicationEventProcessor = mock(ApplicationEventProcessor.class); + this.applicationEventReaper = mock(CompletableEventReaper.class); + this.client = new MockClient(time); + + this.networkClient = new NetworkClientDelegate( + time, + config, + logContext, + client + ); - private ConsumerTestBuilder testBuilder; - private Time time; - private ConsumerMetadata metadata; - private NetworkClientDelegate networkClient; - private BlockingQueue applicationEventsQueue; - private ApplicationEventProcessor applicationEventProcessor; - private OffsetsRequestManager offsetsRequestManager; - private CommitRequestManager commitRequestManager; - private CoordinatorRequestManager coordinatorRequestManager; - private ConsumerNetworkThread consumerNetworkThread; - private final CompletableEventReaper applicationEventReaper = mock(CompletableEventReaper.class); - private MockClient client; - - @BeforeEach - public void setup() { - testBuilder = new ConsumerTestBuilder(createDefaultGroupInformation()); - time = testBuilder.time; - metadata = testBuilder.metadata; - networkClient = testBuilder.networkClientDelegate; - client = testBuilder.client; - applicationEventsQueue = testBuilder.applicationEventQueue; - applicationEventProcessor = testBuilder.applicationEventProcessor; - commitRequestManager = testBuilder.commitRequestManager.orElseThrow(IllegalStateException::new); - offsetsRequestManager = testBuilder.offsetsRequestManager; - coordinatorRequestManager = testBuilder.coordinatorRequestManager.orElseThrow(IllegalStateException::new); - consumerNetworkThread = new ConsumerNetworkThread( - testBuilder.logContext, + this.consumerNetworkThread = new ConsumerNetworkThread( + logContext, time, - testBuilder.applicationEventQueue, + applicationEventsQueue, applicationEventReaper, () -> applicationEventProcessor, - () -> testBuilder.networkClientDelegate, - () -> testBuilder.requestManagers + () -> networkClientDelegate, + () -> requestManagers ); + } + + @BeforeEach + public void setup() { consumerNetworkThread.initializeResources(); } @AfterEach public void tearDown() { - if (testBuilder != null) { - testBuilder.close(); - consumerNetworkThread.close(Duration.ZERO); - } + if (consumerNetworkThread != null) + consumerNetworkThread.close(); + } + + @Test + public void testEnsureCloseStopsRunningThread() { + // consumerNetworkThread.running is set to true in the constructor + assertTrue(consumerNetworkThread.isRunning()); + + // close() should make consumerNetworkThread.running false by calling closeInternal(Duration timeout) + consumerNetworkThread.close(); + assertFalse(consumerNetworkThread.isRunning()); + } + + @ParameterizedTest + @ValueSource(longs = {1, 100, 1000, 4999, 5001}) + public void testConsumerNetworkThreadWaitTimeComputations(long exampleTime) { + List> requestManagersList = new ArrayList<>(); + requestManagersList.add(Optional.of(coordinatorRequestManager)); + when(requestManagers.entries()).thenReturn(requestManagersList); + + NetworkClientDelegate.PollResult pollResult = new NetworkClientDelegate.PollResult(exampleTime); + + when(coordinatorRequestManager.poll(anyLong())).thenReturn(pollResult); + when(coordinatorRequestManager.maximumTimeToWait(anyLong())).thenReturn(exampleTime); + when(networkClientDelegate.addAll(pollResult)).thenReturn(pollResult.timeUntilNextPollMs); + consumerNetworkThread.runOnce(); + + verify(networkClientDelegate).poll((exampleTime < 5001 ? exampleTime : 5000), time.milliseconds()); + assertEquals(consumerNetworkThread.maximumTimeToWait(), exampleTime); } @Test public void testStartupAndTearDown() throws InterruptedException { - // The consumer is closed in ConsumerTestBuilder.ConsumerNetworkThreadTestBuilder.close() - // which is called from tearDown(). consumerNetworkThread.start(); - TestCondition isStarted = () -> consumerNetworkThread.isRunning(); + TestCondition isStarted = consumerNetworkThread::isRunning; TestCondition isClosed = () -> !(consumerNetworkThread.isRunning() || consumerNetworkThread.isAlive()); // There's a nonzero amount of time between starting the thread and having it @@ -149,12 +157,20 @@ public void testStartupAndTearDown() throws InterruptedException { "The consumer network thread did not stop within " + DEFAULT_MAX_WAIT_MS + " ms"); } + @Test + void testRequestManagersArePolledOnce() { + consumerNetworkThread.runOnce(); + requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm, times(1)).poll(anyLong()))); + requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm, times(1)).maximumTimeToWait(anyLong()))); + verify(networkClientDelegate).poll(anyLong(), anyLong()); + } + @Test public void testApplicationEvent() { ApplicationEvent e = new PollEvent(100); applicationEventsQueue.add(e); consumerNetworkThread.runOnce(); - verify(applicationEventProcessor, times(1)).process(e); + verify(applicationEventProcessor).process(e); } @Test @@ -162,7 +178,7 @@ public void testMetadataUpdateEvent() { ApplicationEvent e = new NewTopicsMetadataUpdateRequestEvent(); applicationEventsQueue.add(e); consumerNetworkThread.runOnce(); - verify(metadata).requestUpdateForNewTopics(); + verify(applicationEventProcessor).process(e); } @Test @@ -207,7 +223,7 @@ public void testResetPositionsProcessFailureIsIgnored() { ResetPositionsEvent event = new ResetPositionsEvent(calculateDeadlineMs(time, 100)); applicationEventsQueue.add(event); - assertDoesNotThrow(() -> consumerNetworkThread.runOnce()); + assertDoesNotThrow(consumerNetworkThread::runOnce); verify(applicationEventProcessor).process(any(ResetPositionsEvent.class)); } @@ -231,10 +247,7 @@ public void testAssignmentChangeEvent() { consumerNetworkThread.runOnce(); verify(applicationEventProcessor).process(any(AssignmentChangeEvent.class)); - verify(networkClient, times(1)).poll(anyLong(), anyLong()); - verify(commitRequestManager, times(1)).updateAutoCommitTimer(currentTimeMs); - // Assignment change should generate an async commit (not retried). - verify(commitRequestManager, times(1)).maybeAutoCommitAsync(); + verify(networkClientDelegate, times(1)).poll(anyLong(), anyLong()); } @Test @@ -268,32 +281,31 @@ void testPollResultTimer() { @Test void testMaximumTimeToWait() { + List> list = new ArrayList<>(); + list.add(Optional.of(heartbeatRequestManager)); // Initial value before runOnce has been called assertEquals(ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS, consumerNetworkThread.maximumTimeToWait()); + + when(requestManagers.entries()).thenReturn(list); + when(heartbeatRequestManager.maximumTimeToWait(time.milliseconds())).thenReturn((long) DEFAULT_HEARTBEAT_INTERVAL_MS); + consumerNetworkThread.runOnce(); // After runOnce has been called, it takes the default heartbeat interval from the heartbeat request manager assertEquals(DEFAULT_HEARTBEAT_INTERVAL_MS, consumerNetworkThread.maximumTimeToWait()); } @Test - void testRequestManagersArePolledOnce() { - consumerNetworkThread.runOnce(); - testBuilder.requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm, times(1)).poll(anyLong()))); - testBuilder.requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm, times(1)).maximumTimeToWait(anyLong()))); - verify(networkClient, times(1)).poll(anyLong(), anyLong()); - } + void testEnsureEventsAreCompleted() { + Cluster cluster = mock(Cluster.class); + when(metadata.fetch()).thenReturn(cluster); - @Test - void testEnsureMetadataUpdateOnPoll() { - MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(2, Collections.emptyMap()); - client.prepareMetadataUpdate(metadataResponse); - metadata.requestUpdate(false); - consumerNetworkThread.runOnce(); - verify(metadata, times(1)).updateWithCurrentRequestVersion(eq(metadataResponse), eq(false), anyLong()); - } + List list = new ArrayList<>(); + list.add(new Node(0, "host", 0)); + when(cluster.nodes()).thenReturn(list); + + Queue queue = new LinkedList<>(); + when(networkClientDelegate.unsentRequests()).thenReturn(queue); - @Test - void testEnsureEventsAreCompleted() { // Mimic the logic of CompletableEventReaper.reap(Collection): doAnswer(__ -> { Iterator i = applicationEventsQueue.iterator(); @@ -314,7 +326,7 @@ void testEnsureEventsAreCompleted() { coordinatorRequestManager.markCoordinatorUnknown("test", time.milliseconds()); client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "group-id", node)); prepareOffsetCommitRequest(new HashMap<>(), Errors.NONE, false); - CompletableApplicationEvent event1 = spy(new AsyncCommitEvent(Collections.emptyMap())); + CompletableApplicationEvent event1 = mock(AsyncCommitEvent.class); ApplicationEvent event2 = new AsyncCommitEvent(Collections.emptyMap()); CompletableFuture future = new CompletableFuture<>(); when(event1.future()).thenReturn(future); @@ -327,8 +339,11 @@ void testEnsureEventsAreCompleted() { assertTrue(applicationEventsQueue.isEmpty()); } + // Look into this one @Test void testCleanupInvokesReaper() { + Queue queue = new LinkedList<>(); + when(networkClientDelegate.unsentRequests()).thenReturn(queue); consumerNetworkThread.cleanup(); verify(applicationEventReaper).reap(applicationEventsQueue); } @@ -339,6 +354,15 @@ void testRunOnceInvokesReaper() { verify(applicationEventReaper).reap(any(Long.class)); } + private HashMap mockTopicPartitionOffset() { + final TopicPartition t0 = new TopicPartition("t0", 2); + final TopicPartition t1 = new TopicPartition("t0", 3); + HashMap topicPartitionOffsets = new HashMap<>(); + topicPartitionOffsets.put(t0, new OffsetAndMetadata(10L)); + topicPartitionOffsets.put(t1, new OffsetAndMetadata(20L)); + return topicPartitionOffsets; + } + private void prepareOffsetCommitRequest(final Map expectedOffsets, final Errors error, final boolean disconnected) { @@ -379,13 +403,4 @@ private MockClient.RequestMatcher offsetCommitRequestMatcher(final Map mockTopicPartitionOffset() { - final TopicPartition t0 = new TopicPartition("t0", 2); - final TopicPartition t1 = new TopicPartition("t0", 3); - HashMap topicPartitionOffsets = new HashMap<>(); - topicPartitionOffsets.put(t0, new OffsetAndMetadata(10L)); - topicPartitionOffsets.put(t1, new OffsetAndMetadata(20L)); - return topicPartitionOffsets; - } -} +} \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java deleted file mode 100644 index df69084015a1c..0000000000000 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadUnitTest.java +++ /dev/null @@ -1,406 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.consumer.internals; - -import org.apache.kafka.clients.*; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.clients.consumer.OffsetAndMetadata; -import org.apache.kafka.clients.consumer.internals.events.*; -import org.apache.kafka.common.Cluster; -import org.apache.kafka.common.Node; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.errors.TimeoutException; -import org.apache.kafka.common.message.FindCoordinatorRequestData; -import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.requests.*; -import org.apache.kafka.common.utils.LogContext; -import org.apache.kafka.common.utils.MockTime; -import org.apache.kafka.common.utils.Time; -import org.apache.kafka.test.TestCondition; -import org.apache.kafka.test.TestUtils; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; - -import java.time.Duration; -import java.util.*; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.LinkedBlockingQueue; - -import static org.apache.kafka.clients.consumer.internals.events.CompletableEvent.calculateDeadlineMs; -import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.*; - -public class ConsumerNetworkThreadUnitTest { - static final int DEFAULT_HEARTBEAT_INTERVAL_MS = 1000; - static final int DEFAULT_REQUEST_TIMEOUT_MS = 500; - - private final Time time; - private final ConsumerMetadata metadata; - private final BlockingQueue applicationEventsQueue; - private final ApplicationEventProcessor applicationEventProcessor; - private final OffsetsRequestManager offsetsRequestManager; - private final HeartbeatRequestManager heartbeatRequestManager; - private final CoordinatorRequestManager coordinatorRequestManager; - private final ConsumerNetworkThread consumerNetworkThread; - private final MockClient client; - private final NetworkClientDelegate networkClientDelegate; - private final NetworkClientDelegate networkClient; - private final RequestManagers requestManagers; - private final CompletableEventReaper applicationEventReaper; - - ConsumerNetworkThreadUnitTest() { - LogContext logContext = new LogContext(); - ConsumerConfig config = mock(ConsumerConfig.class); - this.time = new MockTime(); - this.networkClientDelegate = mock(NetworkClientDelegate.class); - this.requestManagers = mock(RequestManagers.class); - this.offsetsRequestManager = mock(OffsetsRequestManager.class); - this.heartbeatRequestManager = mock(HeartbeatRequestManager.class); - this.coordinatorRequestManager = mock(CoordinatorRequestManager.class); - this.applicationEventsQueue = new LinkedBlockingQueue<>(); - this.metadata = mock(ConsumerMetadata.class); - this.applicationEventProcessor = mock(ApplicationEventProcessor.class); - this.applicationEventReaper = mock(CompletableEventReaper.class); - this.client = new MockClient(time); - - this.networkClient = new NetworkClientDelegate( - time, - config, - logContext, - client - ); - - this.consumerNetworkThread = new ConsumerNetworkThread( - logContext, - time, - applicationEventsQueue, - applicationEventReaper, - () -> applicationEventProcessor, - () -> networkClientDelegate, - () -> requestManagers - ); - } - - @BeforeEach - public void setup() { - consumerNetworkThread.initializeResources(); - } - - @AfterEach - public void tearDown() { - if (consumerNetworkThread != null) - consumerNetworkThread.close(); - } - - @Test - public void testEnsureCloseStopsRunningThread() { - // consumerNetworkThread.running is set to true in the constructor - assertTrue(consumerNetworkThread.isRunning()); - - // close() should make consumerNetworkThread.running false by calling closeInternal(Duration timeout) - consumerNetworkThread.close(); - assertFalse(consumerNetworkThread.isRunning()); - } - - @ParameterizedTest - @ValueSource(longs = {1, 100, 1000, 4999, 5001}) - public void testConsumerNetworkThreadWaitTimeComputations(long exampleTime) { - List> requestManagersList = new ArrayList<>(); - requestManagersList.add(Optional.of(coordinatorRequestManager)); - when(requestManagers.entries()).thenReturn(requestManagersList); - - NetworkClientDelegate.PollResult pollResult = new NetworkClientDelegate.PollResult(exampleTime); - - when(coordinatorRequestManager.poll(anyLong())).thenReturn(pollResult); - when(coordinatorRequestManager.maximumTimeToWait(anyLong())).thenReturn(exampleTime); - when(networkClientDelegate.addAll(pollResult)).thenReturn(pollResult.timeUntilNextPollMs); - consumerNetworkThread.runOnce(); - - verify(networkClientDelegate).poll((exampleTime < 5001 ? exampleTime : 5000), time.milliseconds()); - assertEquals(consumerNetworkThread.maximumTimeToWait(), exampleTime); - } - - @Test - public void testStartupAndTearDown() throws InterruptedException { - consumerNetworkThread.start(); - TestCondition isStarted = consumerNetworkThread::isRunning; - TestCondition isClosed = () -> !(consumerNetworkThread.isRunning() || consumerNetworkThread.isAlive()); - - // There's a nonzero amount of time between starting the thread and having it - // begin to execute our code. Wait for a bit before checking... - TestUtils.waitForCondition(isStarted, - "The consumer network thread did not start within " + DEFAULT_MAX_WAIT_MS + " ms"); - - consumerNetworkThread.close(Duration.ofMillis(DEFAULT_MAX_WAIT_MS)); - - TestUtils.waitForCondition(isClosed, - "The consumer network thread did not stop within " + DEFAULT_MAX_WAIT_MS + " ms"); - } - - @Test - void testRequestManagersArePolledOnce() { - consumerNetworkThread.runOnce(); - requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm, times(1)).poll(anyLong()))); - requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm, times(1)).maximumTimeToWait(anyLong()))); - verify(networkClientDelegate).poll(anyLong(), anyLong()); - } - - @Test - public void testApplicationEvent() { - ApplicationEvent e = new PollEvent(100); - applicationEventsQueue.add(e); - consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(e); - } - - @Test - public void testMetadataUpdateEvent() { - ApplicationEvent e = new NewTopicsMetadataUpdateRequestEvent(); - applicationEventsQueue.add(e); - consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(e); - } - - @Test - public void testAsyncCommitEvent() { - ApplicationEvent e = new AsyncCommitEvent(new HashMap<>()); - applicationEventsQueue.add(e); - consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(any(AsyncCommitEvent.class)); - } - - @Test - public void testSyncCommitEvent() { - ApplicationEvent e = new SyncCommitEvent(new HashMap<>(), calculateDeadlineMs(time, 100)); - applicationEventsQueue.add(e); - consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(any(SyncCommitEvent.class)); - } - - @ParameterizedTest - @ValueSource(booleans = {true, false}) - public void testListOffsetsEventIsProcessed(boolean requireTimestamp) { - Map timestamps = Collections.singletonMap(new TopicPartition("topic1", 1), 5L); - ApplicationEvent e = new ListOffsetsEvent(timestamps, calculateDeadlineMs(time, 100), requireTimestamp); - applicationEventsQueue.add(e); - consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(any(ListOffsetsEvent.class)); - assertTrue(applicationEventsQueue.isEmpty()); - } - - @Test - public void testResetPositionsEventIsProcessed() { - ResetPositionsEvent e = new ResetPositionsEvent(calculateDeadlineMs(time, 100)); - applicationEventsQueue.add(e); - consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(any(ResetPositionsEvent.class)); - assertTrue(applicationEventsQueue.isEmpty()); - } - - @Test - public void testResetPositionsProcessFailureIsIgnored() { - doThrow(new NullPointerException()).when(offsetsRequestManager).resetPositionsIfNeeded(); - - ResetPositionsEvent event = new ResetPositionsEvent(calculateDeadlineMs(time, 100)); - applicationEventsQueue.add(event); - assertDoesNotThrow(consumerNetworkThread::runOnce); - - verify(applicationEventProcessor).process(any(ResetPositionsEvent.class)); - } - - @Test - public void testValidatePositionsEventIsProcessed() { - ValidatePositionsEvent e = new ValidatePositionsEvent(calculateDeadlineMs(time, 100)); - applicationEventsQueue.add(e); - consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(any(ValidatePositionsEvent.class)); - assertTrue(applicationEventsQueue.isEmpty()); - } - - @Test - public void testAssignmentChangeEvent() { - HashMap offset = mockTopicPartitionOffset(); - - final long currentTimeMs = time.milliseconds(); - ApplicationEvent e = new AssignmentChangeEvent(offset, currentTimeMs); - applicationEventsQueue.add(e); - - consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(any(AssignmentChangeEvent.class)); - verify(networkClientDelegate, times(1)).poll(anyLong(), anyLong()); - } - - @Test - void testFetchTopicMetadata() { - applicationEventsQueue.add(new TopicMetadataEvent("topic", Long.MAX_VALUE)); - consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(any(TopicMetadataEvent.class)); - } - - @Test - void testPollResultTimer() { - NetworkClientDelegate.UnsentRequest req = new NetworkClientDelegate.UnsentRequest( - new FindCoordinatorRequest.Builder( - new FindCoordinatorRequestData() - .setKeyType(FindCoordinatorRequest.CoordinatorType.TRANSACTION.id()) - .setKey("foobar")), - Optional.empty()); - req.setTimer(time, DEFAULT_REQUEST_TIMEOUT_MS); - - // purposely setting a non-MAX time to ensure it is returning Long.MAX_VALUE upon success - NetworkClientDelegate.PollResult success = new NetworkClientDelegate.PollResult( - 10, - Collections.singletonList(req)); - assertEquals(10, networkClient.addAll(success)); - - NetworkClientDelegate.PollResult failure = new NetworkClientDelegate.PollResult( - 10, - new ArrayList<>()); - assertEquals(10, networkClient.addAll(failure)); - } - - @Test - void testMaximumTimeToWait() { - List> list = new ArrayList<>(); - list.add(Optional.of(heartbeatRequestManager)); - // Initial value before runOnce has been called - assertEquals(ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS, consumerNetworkThread.maximumTimeToWait()); - - when(requestManagers.entries()).thenReturn(list); - when(heartbeatRequestManager.maximumTimeToWait(time.milliseconds())).thenReturn((long) DEFAULT_HEARTBEAT_INTERVAL_MS); - - consumerNetworkThread.runOnce(); - // After runOnce has been called, it takes the default heartbeat interval from the heartbeat request manager - assertEquals(DEFAULT_HEARTBEAT_INTERVAL_MS, consumerNetworkThread.maximumTimeToWait()); - } - - @Test - void testEnsureEventsAreCompleted() { - Cluster cluster = mock(Cluster.class); - when(metadata.fetch()).thenReturn(cluster); - - List list = new ArrayList<>(); - list.add(new Node(0, "host", 0)); - when(cluster.nodes()).thenReturn(list); - - Queue queue = new LinkedList<>(); - when(networkClientDelegate.unsentRequests()).thenReturn(queue); - - // Mimic the logic of CompletableEventReaper.reap(Collection): - doAnswer(__ -> { - Iterator i = applicationEventsQueue.iterator(); - - while (i.hasNext()) { - ApplicationEvent event = i.next(); - - if (event instanceof CompletableEvent) - ((CompletableEvent) event).future().completeExceptionally(new TimeoutException()); - - i.remove(); - } - - return null; - }).when(applicationEventReaper).reap(any(Collection.class)); - - Node node = metadata.fetch().nodes().get(0); - coordinatorRequestManager.markCoordinatorUnknown("test", time.milliseconds()); - client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "group-id", node)); - prepareOffsetCommitRequest(new HashMap<>(), Errors.NONE, false); - CompletableApplicationEvent event1 = mock(AsyncCommitEvent.class); - ApplicationEvent event2 = new AsyncCommitEvent(Collections.emptyMap()); - CompletableFuture future = new CompletableFuture<>(); - when(event1.future()).thenReturn(future); - applicationEventsQueue.add(event1); - applicationEventsQueue.add(event2); - assertFalse(future.isDone()); - assertFalse(applicationEventsQueue.isEmpty()); - consumerNetworkThread.cleanup(); - assertTrue(future.isCompletedExceptionally()); - assertTrue(applicationEventsQueue.isEmpty()); - } - - // Look into this one - @Test - void testCleanupInvokesReaper() { - Queue queue = new LinkedList<>(); - when(networkClientDelegate.unsentRequests()).thenReturn(queue); - consumerNetworkThread.cleanup(); - verify(applicationEventReaper).reap(applicationEventsQueue); - } - - @Test - void testRunOnceInvokesReaper() { - consumerNetworkThread.runOnce(); - verify(applicationEventReaper).reap(any(Long.class)); - } - - private HashMap mockTopicPartitionOffset() { - final TopicPartition t0 = new TopicPartition("t0", 2); - final TopicPartition t1 = new TopicPartition("t0", 3); - HashMap topicPartitionOffsets = new HashMap<>(); - topicPartitionOffsets.put(t0, new OffsetAndMetadata(10L)); - topicPartitionOffsets.put(t1, new OffsetAndMetadata(20L)); - return topicPartitionOffsets; - } - - private void prepareOffsetCommitRequest(final Map expectedOffsets, - final Errors error, - final boolean disconnected) { - Map errors = partitionErrors(expectedOffsets.keySet(), error); - client.prepareResponse(offsetCommitRequestMatcher(expectedOffsets), offsetCommitResponse(errors), disconnected); - } - - private Map partitionErrors(final Collection partitions, - final Errors error) { - final Map errors = new HashMap<>(); - for (TopicPartition partition : partitions) { - errors.put(partition, error); - } - return errors; - } - - private OffsetCommitResponse offsetCommitResponse(final Map responseData) { - return new OffsetCommitResponse(responseData); - } - - private MockClient.RequestMatcher offsetCommitRequestMatcher(final Map expectedOffsets) { - return body -> { - OffsetCommitRequest req = (OffsetCommitRequest) body; - Map offsets = req.offsets(); - if (offsets.size() != expectedOffsets.size()) - return false; - - for (Map.Entry expectedOffset : expectedOffsets.entrySet()) { - if (!offsets.containsKey(expectedOffset.getKey())) { - return false; - } else { - Long actualOffset = offsets.get(expectedOffset.getKey()); - if (!actualOffset.equals(expectedOffset.getValue())) { - return false; - } - } - } - return true; - }; - } -} \ No newline at end of file From 91d5266266c6fe0ea9730ce94b347b72757977fa Mon Sep 17 00:00:00 2001 From: brenden20 Date: Thu, 30 May 2024 10:06:40 -0500 Subject: [PATCH 24/61] Comment removal Comment removal --- .../kafka/clients/consumer/internals/ConsumerNetworkThread.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java index c9cae79199fb5..adee6594603bb 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java @@ -137,7 +137,6 @@ void initializeResources() { void runOnce() { processApplicationEvents(); - // here we get the system time and pass it to the request manager because we don't want to invoke system time all the time final long currentTimeMs = time.milliseconds(); final long pollWaitTimeMs = requestManagers.entries().stream() .filter(Optional::isPresent) @@ -308,7 +307,6 @@ void cleanup() { try { runAtClose(requestManagers.entries(), networkClientDelegate, timer); } catch (Exception e) { - // TODO: Makesure exception case is being tested log.error("Unexpected error during shutdown. Proceed with closing.", e); } finally { sendUnsentRequests(timer); From 88950a36e959b4b262ae1fdbcbab997d19e7b715 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Thu, 30 May 2024 15:22:38 -0500 Subject: [PATCH 25/61] Cleaned up testConsumerNetworkThreadWaitTimeComputations Made the first when() statement cleaner --- .../clients/consumer/internals/ConsumerNetworkThreadTest.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 58deb71371671..2466c5bd4a6be 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -125,9 +125,7 @@ public void testEnsureCloseStopsRunningThread() { @ParameterizedTest @ValueSource(longs = {1, 100, 1000, 4999, 5001}) public void testConsumerNetworkThreadWaitTimeComputations(long exampleTime) { - List> requestManagersList = new ArrayList<>(); - requestManagersList.add(Optional.of(coordinatorRequestManager)); - when(requestManagers.entries()).thenReturn(requestManagersList); + when(requestManagers.entries()).thenReturn(Collections.singletonList(Optional.of(coordinatorRequestManager))); NetworkClientDelegate.PollResult pollResult = new NetworkClientDelegate.PollResult(exampleTime); From 7a6593d18f373dafda027e6ccee9325e596c249a Mon Sep 17 00:00:00 2001 From: brenden20 Date: Thu, 30 May 2024 15:24:14 -0500 Subject: [PATCH 26/61] Cleaned up testMaximumTimeToWait() Condensed a few lines of code into one cleaner line --- .../clients/consumer/internals/ConsumerNetworkThreadTest.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 2466c5bd4a6be..4be9bfa8151e7 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -279,12 +279,10 @@ void testPollResultTimer() { @Test void testMaximumTimeToWait() { - List> list = new ArrayList<>(); - list.add(Optional.of(heartbeatRequestManager)); // Initial value before runOnce has been called assertEquals(ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS, consumerNetworkThread.maximumTimeToWait()); - when(requestManagers.entries()).thenReturn(list); + when(requestManagers.entries()).thenReturn(Collections.singletonList(Optional.of(heartbeatRequestManager))); when(heartbeatRequestManager.maximumTimeToWait(time.milliseconds())).thenReturn((long) DEFAULT_HEARTBEAT_INTERVAL_MS); consumerNetworkThread.runOnce(); From 15d35fc896d1e4e1adaaf58d0fdc258cd6a32a0b Mon Sep 17 00:00:00 2001 From: brenden20 Date: Thu, 30 May 2024 15:28:45 -0500 Subject: [PATCH 27/61] Cleaned imports and comment removal Cleaned up wildcard imports can removed extraneous comment --- .../internals/ConsumerNetworkThreadTest.java | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 4be9bfa8151e7..12265d3e84777 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -46,9 +46,18 @@ import static org.apache.kafka.clients.consumer.internals.events.CompletableEvent.calculateDeadlineMs; import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; -import static org.junit.jupiter.api.Assertions.*; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.*; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class ConsumerNetworkThreadTest { static final int DEFAULT_HEARTBEAT_INTERVAL_MS = 1000; @@ -335,7 +344,6 @@ void testEnsureEventsAreCompleted() { assertTrue(applicationEventsQueue.isEmpty()); } - // Look into this one @Test void testCleanupInvokesReaper() { Queue queue = new LinkedList<>(); From 10df9a456312a57eb84586556eda80a26b891eb4 Mon Sep 17 00:00:00 2001 From: brenden20 <118419078+brenden20@users.noreply.github.com> Date: Thu, 30 May 2024 15:38:46 -0500 Subject: [PATCH 28/61] Fix whitespace Co-authored-by: Kirk True --- .../clients/consumer/internals/ConsumerNetworkThreadTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 12265d3e84777..e8f64a02672a0 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -407,4 +407,4 @@ private MockClient.RequestMatcher offsetCommitRequestMatcher(final Map Date: Mon, 3 Jun 2024 09:49:41 -0500 Subject: [PATCH 29/61] Implementing suggestions from PR Changed some variable stuff, undid unnecessary changes, removed redundant code, fixed imports, and fixed checktyle violation --- .../internals/CommitRequestManager.java | 2 +- .../internals/ConsumerNetworkThreadTest.java | 66 +++++++++++-------- 2 files changed, 41 insertions(+), 27 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java index 96b0270fd4a0b..577cf7dee6b76 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java @@ -1290,4 +1290,4 @@ static class MemberInfo { this.memberEpoch = Optional.empty(); } } -} \ No newline at end of file +} diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index e8f64a02672a0..340b358ac76ea 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -16,17 +16,32 @@ */ package org.apache.kafka.clients.consumer.internals; -import org.apache.kafka.clients.*; +import org.apache.kafka.clients.MockClient; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.OffsetAndMetadata; -import org.apache.kafka.clients.consumer.internals.events.*; +import org.apache.kafka.clients.consumer.internals.events.ApplicationEvent; +import org.apache.kafka.clients.consumer.internals.events.ApplicationEventProcessor; +import org.apache.kafka.clients.consumer.internals.events.AssignmentChangeEvent; +import org.apache.kafka.clients.consumer.internals.events.AsyncCommitEvent; +import org.apache.kafka.clients.consumer.internals.events.CompletableApplicationEvent; +import org.apache.kafka.clients.consumer.internals.events.CompletableEvent; +import org.apache.kafka.clients.consumer.internals.events.CompletableEventReaper; +import org.apache.kafka.clients.consumer.internals.events.ListOffsetsEvent; +import org.apache.kafka.clients.consumer.internals.events.PollEvent; +import org.apache.kafka.clients.consumer.internals.events.ResetPositionsEvent; +import org.apache.kafka.clients.consumer.internals.events.SyncCommitEvent; +import org.apache.kafka.clients.consumer.internals.events.TopicMetadataEvent; +import org.apache.kafka.clients.consumer.internals.events.ValidatePositionsEvent; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.message.FindCoordinatorRequestData; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.requests.*; +import org.apache.kafka.common.requests.FindCoordinatorRequest; +import org.apache.kafka.common.requests.FindCoordinatorResponse; +import org.apache.kafka.common.requests.OffsetCommitRequest; +import org.apache.kafka.common.requests.OffsetCommitResponse; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; @@ -39,7 +54,15 @@ import org.junit.jupiter.params.provider.ValueSource; import java.time.Duration; -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Optional; +import java.util.List; +import java.util.LinkedList; import java.util.concurrent.BlockingQueue; import java.util.concurrent.CompletableFuture; import java.util.concurrent.LinkedBlockingQueue; @@ -55,7 +78,6 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -78,7 +100,6 @@ public class ConsumerNetworkThreadTest { private final CompletableEventReaper applicationEventReaper; ConsumerNetworkThreadTest() { - LogContext logContext = new LogContext(); ConsumerConfig config = mock(ConsumerConfig.class); this.time = new MockTime(); this.networkClientDelegate = mock(NetworkClientDelegate.class); @@ -86,11 +107,12 @@ public class ConsumerNetworkThreadTest { this.offsetsRequestManager = mock(OffsetsRequestManager.class); this.heartbeatRequestManager = mock(HeartbeatRequestManager.class); this.coordinatorRequestManager = mock(CoordinatorRequestManager.class); - this.applicationEventsQueue = new LinkedBlockingQueue<>(); this.metadata = mock(ConsumerMetadata.class); this.applicationEventProcessor = mock(ApplicationEventProcessor.class); this.applicationEventReaper = mock(CompletableEventReaper.class); - this.client = new MockClient(time); + this.client = mock(MockClient.class); + this.applicationEventsQueue = new LinkedBlockingQueue<>(); + LogContext logContext = new LogContext(); this.networkClient = new NetworkClientDelegate( time, @@ -143,7 +165,7 @@ public void testConsumerNetworkThreadWaitTimeComputations(long exampleTime) { when(networkClientDelegate.addAll(pollResult)).thenReturn(pollResult.timeUntilNextPollMs); consumerNetworkThread.runOnce(); - verify(networkClientDelegate).poll((exampleTime < 5001 ? exampleTime : 5000), time.milliseconds()); + verify(networkClientDelegate).poll(exampleTime < 5001 ? exampleTime : 5000, time.milliseconds()); assertEquals(consumerNetworkThread.maximumTimeToWait(), exampleTime); } @@ -167,8 +189,8 @@ public void testStartupAndTearDown() throws InterruptedException { @Test void testRequestManagersArePolledOnce() { consumerNetworkThread.runOnce(); - requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm, times(1)).poll(anyLong()))); - requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm, times(1)).maximumTimeToWait(anyLong()))); + requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm).poll(anyLong()))); + requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm).maximumTimeToWait(anyLong()))); verify(networkClientDelegate).poll(anyLong(), anyLong()); } @@ -180,14 +202,6 @@ public void testApplicationEvent() { verify(applicationEventProcessor).process(e); } - @Test - public void testMetadataUpdateEvent() { - ApplicationEvent e = new NewTopicsMetadataUpdateRequestEvent(); - applicationEventsQueue.add(e); - consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(e); - } - @Test public void testAsyncCommitEvent() { ApplicationEvent e = new AsyncCommitEvent(new HashMap<>()); @@ -230,7 +244,7 @@ public void testResetPositionsProcessFailureIsIgnored() { ResetPositionsEvent event = new ResetPositionsEvent(calculateDeadlineMs(time, 100)); applicationEventsQueue.add(event); - assertDoesNotThrow(consumerNetworkThread::runOnce); + assertDoesNotThrow(() -> consumerNetworkThread.runOnce()); verify(applicationEventProcessor).process(any(ResetPositionsEvent.class)); } @@ -246,7 +260,7 @@ public void testValidatePositionsEventIsProcessed() { @Test public void testAssignmentChangeEvent() { - HashMap offset = mockTopicPartitionOffset(); + Map offset = mockTopicPartitionOffset(); final long currentTimeMs = time.milliseconds(); ApplicationEvent e = new AssignmentChangeEvent(offset, currentTimeMs); @@ -254,7 +268,7 @@ public void testAssignmentChangeEvent() { consumerNetworkThread.runOnce(); verify(applicationEventProcessor).process(any(AssignmentChangeEvent.class)); - verify(networkClientDelegate, times(1)).poll(anyLong(), anyLong()); + verify(networkClientDelegate).poll(anyLong(), anyLong()); } @Test @@ -308,7 +322,7 @@ void testEnsureEventsAreCompleted() { list.add(new Node(0, "host", 0)); when(cluster.nodes()).thenReturn(list); - Queue queue = new LinkedList<>(); + LinkedList queue = new LinkedList<>(); when(networkClientDelegate.unsentRequests()).thenReturn(queue); // Mimic the logic of CompletableEventReaper.reap(Collection): @@ -346,7 +360,7 @@ void testEnsureEventsAreCompleted() { @Test void testCleanupInvokesReaper() { - Queue queue = new LinkedList<>(); + LinkedList queue = new LinkedList<>(); when(networkClientDelegate.unsentRequests()).thenReturn(queue); consumerNetworkThread.cleanup(); verify(applicationEventReaper).reap(applicationEventsQueue); @@ -358,10 +372,10 @@ void testRunOnceInvokesReaper() { verify(applicationEventReaper).reap(any(Long.class)); } - private HashMap mockTopicPartitionOffset() { + private Map mockTopicPartitionOffset() { final TopicPartition t0 = new TopicPartition("t0", 2); final TopicPartition t1 = new TopicPartition("t0", 3); - HashMap topicPartitionOffsets = new HashMap<>(); + final Map topicPartitionOffsets = new HashMap<>(); topicPartitionOffsets.put(t0, new OffsetAndMetadata(10L)); topicPartitionOffsets.put(t1, new OffsetAndMetadata(20L)); return topicPartitionOffsets; From 255f4dd64277804bc66a9a9363401bb4b6d03c38 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Mon, 3 Jun 2024 10:46:12 -0500 Subject: [PATCH 30/61] Add comment --- .../clients/consumer/internals/ConsumerNetworkThreadTest.java | 1 + 1 file changed, 1 insertion(+) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 340b358ac76ea..f7497cd2d0de1 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -153,6 +153,7 @@ public void testEnsureCloseStopsRunningThread() { assertFalse(consumerNetworkThread.isRunning()); } + // Add a second RM to test to ensure Math.min() is computing correctly @ParameterizedTest @ValueSource(longs = {1, 100, 1000, 4999, 5001}) public void testConsumerNetworkThreadWaitTimeComputations(long exampleTime) { From 848ab9b525ca16c45878755cab5a91a3d2657155 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Mon, 3 Jun 2024 11:33:18 -0500 Subject: [PATCH 31/61] Todo comment --- .../kafka/clients/consumer/internals/ConsumerNetworkThread.java | 1 + 1 file changed, 1 insertion(+) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java index adee6594603bb..2617900adffa7 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java @@ -289,6 +289,7 @@ private void closeInternal(final Duration timeout) { } } + // Add test to see if poll() is run once with timer of 0 /** * Check the unsent queue one last time and poll until all requests are sent or the timer runs out. */ From 524967ec6bc3d7e50ceba0e981902d0b6936a7bb Mon Sep 17 00:00:00 2001 From: brenden20 Date: Mon, 3 Jun 2024 12:51:11 -0500 Subject: [PATCH 32/61] Updated testConsumerNetworkThreadWaitTimeComputations() Updated testConsumerNetworkThreadWaitTimeComputations() to include a second request manager to ensure correct computation --- .../internals/ConsumerNetworkThreadTest.java | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index f7497cd2d0de1..921846a7c02f9 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -145,7 +145,7 @@ public void tearDown() { @Test public void testEnsureCloseStopsRunningThread() { - // consumerNetworkThread.running is set to true in the constructor + // consumerNetworkThread.running is set to true in its constructor assertTrue(consumerNetworkThread.isRunning()); // close() should make consumerNetworkThread.running false by calling closeInternal(Duration timeout) @@ -153,17 +153,25 @@ public void testEnsureCloseStopsRunningThread() { assertFalse(consumerNetworkThread.isRunning()); } - // Add a second RM to test to ensure Math.min() is computing correctly @ParameterizedTest @ValueSource(longs = {1, 100, 1000, 4999, 5001}) public void testConsumerNetworkThreadWaitTimeComputations(long exampleTime) { - when(requestManagers.entries()).thenReturn(Collections.singletonList(Optional.of(coordinatorRequestManager))); + List> list = new ArrayList<>(); + list.add(Optional.of(coordinatorRequestManager)); + list.add(Optional.of(heartbeatRequestManager)); + + when(requestManagers.entries()).thenReturn(list); NetworkClientDelegate.PollResult pollResult = new NetworkClientDelegate.PollResult(exampleTime); + NetworkClientDelegate.PollResult pollResult1 = new NetworkClientDelegate.PollResult(exampleTime + 100); - when(coordinatorRequestManager.poll(anyLong())).thenReturn(pollResult); - when(coordinatorRequestManager.maximumTimeToWait(anyLong())).thenReturn(exampleTime); + long t = time.milliseconds(); + when(coordinatorRequestManager.poll(t)).thenReturn(pollResult); + when(coordinatorRequestManager.maximumTimeToWait(t)).thenReturn(exampleTime); + when(heartbeatRequestManager.poll(t)).thenReturn(pollResult1); + when(heartbeatRequestManager.maximumTimeToWait(t)).thenReturn(exampleTime + 100); when(networkClientDelegate.addAll(pollResult)).thenReturn(pollResult.timeUntilNextPollMs); + when(networkClientDelegate.addAll(pollResult1)).thenReturn(pollResult1.timeUntilNextPollMs); consumerNetworkThread.runOnce(); verify(networkClientDelegate).poll(exampleTime < 5001 ? exampleTime : 5000, time.milliseconds()); From ac2bb3ce1e67c955a57594f06bf6498c4cce3282 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Mon, 3 Jun 2024 13:08:36 -0500 Subject: [PATCH 33/61] Added new test Added testEnsureSendUnsentRequestPollWIthZeroRunsOnce(), it checks that when ConsumerNetworkThread.sendUnsentRequests() with a timer with timeoutMs = 0 will only poll the networkClientDelegate once --- .../consumer/internals/ConsumerNetworkThread.java | 2 +- .../internals/ConsumerNetworkThreadTest.java | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java index 2617900adffa7..05a7c209ae634 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java @@ -293,7 +293,7 @@ private void closeInternal(final Duration timeout) { /** * Check the unsent queue one last time and poll until all requests are sent or the timer runs out. */ - private void sendUnsentRequests(final Timer timer) { + protected void sendUnsentRequests(final Timer timer) { if (networkClientDelegate.unsentRequests().isEmpty()) return; do { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 921846a7c02f9..4e21cf42a1016 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -45,6 +45,7 @@ import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; +import org.apache.kafka.common.utils.Timer; import org.apache.kafka.test.TestCondition; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.AfterEach; @@ -63,6 +64,7 @@ import java.util.Optional; import java.util.List; import java.util.LinkedList; +import java.util.Queue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.CompletableFuture; import java.util.concurrent.LinkedBlockingQueue; @@ -75,6 +77,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; @@ -153,6 +156,16 @@ public void testEnsureCloseStopsRunningThread() { assertFalse(consumerNetworkThread.isRunning()); } + @Test + public void testEnsureSendUnsentRequestPollWIthZeroRunsOnce() { + Timer timer = time.timer(0); + Queue queue = new LinkedList<>(); + queue.add(mock(NetworkClientDelegate.UnsentRequest.class)); + when(networkClientDelegate.unsentRequests()).thenReturn(queue); + consumerNetworkThread.sendUnsentRequests(timer); + verify(networkClientDelegate).poll(eq(0L), anyLong()); + } + @ParameterizedTest @ValueSource(longs = {1, 100, 1000, 4999, 5001}) public void testConsumerNetworkThreadWaitTimeComputations(long exampleTime) { From 4191c2d2cccdadd647ee950c81e201029ab08715 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Mon, 3 Jun 2024 13:57:17 -0500 Subject: [PATCH 34/61] Updated testConsumerNetworkThreadWaitTimeComputations() Updated testConsumerNetworkThreadWaitTimeComputations(), removed some unnecessary test cases --- .../clients/consumer/internals/ConsumerNetworkThreadTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 4e21cf42a1016..3b7d6000a6cad 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -167,7 +167,7 @@ public void testEnsureSendUnsentRequestPollWIthZeroRunsOnce() { } @ParameterizedTest - @ValueSource(longs = {1, 100, 1000, 4999, 5001}) + @ValueSource(longs = {100, 4999, 5001}) public void testConsumerNetworkThreadWaitTimeComputations(long exampleTime) { List> list = new ArrayList<>(); list.add(Optional.of(coordinatorRequestManager)); From 5fb7304892ecf5662a23b5c65111c79b012007ab Mon Sep 17 00:00:00 2001 From: brenden20 Date: Mon, 3 Jun 2024 14:55:04 -0500 Subject: [PATCH 35/61] Revert "Merge branch 'apache:trunk' into 16001" This reverts commit cf1b5e78096899281b4b523b8dc95eb042f267d8, reversing changes made to 4191c2d2cccdadd647ee950c81e201029ab08715. --- README.md | 7 +- build.gradle | 17 +- checkstyle/checkstyle.xml | 2 - .../apache/kafka/common/ShareGroupState.java | 56 ---- .../errors/FencedStateEpochException.java | 28 -- .../errors/InvalidRecordStateException.java | 30 -- .../InvalidShareSessionEpochException.java | 28 -- .../errors/ShareSessionNotFoundException.java | 28 -- .../DefaultChannelMetadataRegistry.java | 4 +- .../apache/kafka/common/protocol/ApiKeys.java | 6 +- .../apache/kafka/common/protocol/Errors.java | 10 +- .../common/requests/AbstractRequest.java | 8 - .../common/requests/AbstractResponse.java | 8 - .../requests/ShareAcknowledgeRequest.java | 127 --------- .../requests/ShareAcknowledgeResponse.java | 148 ---------- .../common/requests/ShareFetchMetadata.java | 121 -------- .../common/requests/ShareFetchRequest.java | 267 ------------------ .../common/requests/ShareFetchResponse.java | 212 -------------- .../requests/ShareGroupDescribeRequest.java | 100 ------- .../requests/ShareGroupDescribeResponse.java | 77 ----- .../requests/ShareGroupHeartbeatRequest.java | 86 ------ .../requests/ShareGroupHeartbeatResponse.java | 71 ----- .../message/FindCoordinatorRequest.json | 4 +- .../message/FindCoordinatorResponse.json | 4 +- .../common/message/ListGroupsRequest.json | 4 +- .../common/message/ListGroupsResponse.json | 4 +- .../message/ShareAcknowledgeRequest.json | 53 ---- .../message/ShareAcknowledgeResponse.json | 72 ----- .../common/message/ShareFetchRequest.json | 67 ----- .../common/message/ShareFetchResponse.json | 83 ------ .../message/ShareGroupDescribeRequest.json | 33 --- .../message/ShareGroupDescribeResponse.json | 87 ------ .../message/ShareGroupHeartbeatRequest.json | 39 --- .../message/ShareGroupHeartbeatResponse.json | 57 ---- .../common/requests/RequestResponseTest.java | 129 --------- .../connect/runtime/rest/RestClient.java | 6 +- .../rest/entities/CreateConnectorRequest.java | 2 +- .../KafkaConfigBackingStoreMockitoTest.java | 157 +--------- .../storage/KafkaConfigBackingStoreTest.java | 170 +++++++++++ .../server/builders/KafkaApisBuilder.java | 2 +- .../builders/ReplicaManagerBuilder.java | 2 +- .../src/main/scala/kafka/log/LogCleaner.scala | 17 +- .../src/main/scala/kafka/log/LogManager.scala | 2 +- .../kafka/network/RequestConvertToJson.scala | 8 - .../kafka/server/BrokerLifecycleManager.scala | 6 +- .../scala/kafka/server/BrokerServer.scala | 10 +- .../scala/kafka/server/ConfigHandler.scala | 2 +- .../ControllerConfigurationValidator.scala | 3 +- .../main/scala/kafka/server/KafkaConfig.scala | 4 +- .../main/scala/kafka/server/KafkaServer.scala | 2 +- .../scala/kafka/server/ReplicaManager.scala | 25 +- .../metadata/BrokerMetadataPublisher.scala | 16 ++ .../main/scala/kafka/zk/AdminZkClient.scala | 4 +- .../log/remote/RemoteLogManagerTest.java | 2 +- .../junit/ClusterTestExtensionsUnitTest.java | 49 +--- .../scala/unit/kafka/log/LogCleanerTest.scala | 24 +- .../scala/unit/kafka/log/LogConfigTest.scala | 26 +- .../kafka/server/ApiVersionsRequestTest.scala | 2 +- .../server/BrokerLifecycleManagerTest.scala | 2 +- .../kafka/server/ReplicaManagerTest.scala | 37 +-- .../unit/kafka/server/RequestQuotaTest.scala | 16 +- .../BrokerMetadataPublisherTest.scala | 101 ++++++- docs/security.html | 36 --- gradle/dependencies.gradle | 2 +- .../group/GroupMetadataManager.java | 28 +- .../consumer/TargetAssignmentBuilder.java | 30 +- .../group/GroupMetadataManagerTest.java | 43 --- .../consumer/TargetAssignmentBuilderTest.java | 50 ++-- .../controller/ClusterControlManager.java | 7 - .../publisher/BrokerRegistrationTracker.java | 136 --------- .../BrokerRegistrationTrackerTest.java | 151 ---------- .../storage/RemoteLogManagerConfig.java | 38 --- ...cBasedRemoteLogMetadataManagerHarness.java | 7 +- ...edRemoteLogMetadataManagerRestartTest.java | 163 ++++++----- .../storage/RemoteLogManagerConfigTest.java | 6 +- .../group/ConsumerGroupCommandTestUtils.java | 29 +- 76 files changed, 528 insertions(+), 2972 deletions(-) delete mode 100644 clients/src/main/java/org/apache/kafka/common/ShareGroupState.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/errors/FencedStateEpochException.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/errors/InvalidRecordStateException.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/errors/InvalidShareSessionEpochException.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/errors/ShareSessionNotFoundException.java rename clients/src/{test => main}/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java (93%) delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareFetchMetadata.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeResponse.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatRequest.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatResponse.java delete mode 100644 clients/src/main/resources/common/message/ShareAcknowledgeRequest.json delete mode 100644 clients/src/main/resources/common/message/ShareAcknowledgeResponse.json delete mode 100644 clients/src/main/resources/common/message/ShareFetchRequest.json delete mode 100644 clients/src/main/resources/common/message/ShareFetchResponse.json delete mode 100644 clients/src/main/resources/common/message/ShareGroupDescribeRequest.json delete mode 100644 clients/src/main/resources/common/message/ShareGroupDescribeResponse.json delete mode 100644 clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json delete mode 100644 clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json delete mode 100644 metadata/src/main/java/org/apache/kafka/image/publisher/BrokerRegistrationTracker.java delete mode 100644 metadata/src/test/java/org/apache/kafka/image/publisher/BrokerRegistrationTrackerTest.java diff --git a/README.md b/README.md index ab7dcd7685bde..27ce0dc0bce64 100644 --- a/README.md +++ b/README.md @@ -227,16 +227,11 @@ There are two code quality analysis tools that we regularly run, spotbugs and ch Checkstyle enforces a consistent coding style in Kafka. You can run checkstyle using: - ./gradlew checkstyleMain checkstyleTest spotlessCheck + ./gradlew checkstyleMain checkstyleTest The checkstyle warnings will be found in `reports/checkstyle/reports/main.html` and `reports/checkstyle/reports/test.html` files in the subproject build directories. They are also printed to the console. The build will fail if Checkstyle fails. -#### Spotless #### -The import order is a part of static check. please call `spotlessApply` to optimize the imports of Java codes before filing pull request : - - ./gradlew spotlessApply - #### Spotbugs #### Spotbugs uses static analysis to look for bugs in the code. You can run spotbugs using: diff --git a/build.gradle b/build.gradle index a2a6531d29a62..ea168ecb26fb4 100644 --- a/build.gradle +++ b/build.gradle @@ -47,9 +47,7 @@ plugins { // Updating the shadow plugin version to 8.1.1 causes issue with signing and publishing the shadowed // artifacts - see https://github.com/johnrengelman/shadow/issues/901 id 'com.github.johnrengelman.shadow' version '8.1.0' apply false - // the minimum required JRE of 6.14.0+ is 11 - // refer:https://github.com/diffplug/spotless/tree/main/plugin-gradle#requirements - id 'com.diffplug.spotless' version "6.13.0" apply false + id 'com.diffplug.spotless' version '6.14.0' apply false // 6.14.1 and newer require Java 11 at compile time, so we can't upgrade until AK 4.0 } ext { @@ -200,9 +198,6 @@ def determineCommitId() { } } -def spotlessApplyModules = [''] - - apply from: file('wrapper.gradle') if (repo != null) { @@ -798,16 +793,6 @@ subprojects { skipProjects = [ ":jmh-benchmarks", ":trogdor" ] skipConfigurations = [ "zinc" ] } - - if (project.name in spotlessApplyModules) { - apply plugin: 'com.diffplug.spotless' - spotless { - java { - importOrder('kafka', 'org.apache.kafka', 'com', 'net', 'org', 'java', 'javax', '', '\\#') - removeUnusedImports() - } - } - } } gradle.taskGraph.whenReady { taskGraph -> diff --git a/checkstyle/checkstyle.xml b/checkstyle/checkstyle.xml index 61eb7e4b245fd..aff659638928b 100644 --- a/checkstyle/checkstyle.xml +++ b/checkstyle/checkstyle.xml @@ -82,8 +82,6 @@ - - diff --git a/clients/src/main/java/org/apache/kafka/common/ShareGroupState.java b/clients/src/main/java/org/apache/kafka/common/ShareGroupState.java deleted file mode 100644 index 716421f3dea2a..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/ShareGroupState.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.common; - -import java.util.Arrays; -import java.util.Locale; -import java.util.Map; -import java.util.function.Function; -import java.util.stream.Collectors; - -/** - * The share group state. - */ -public enum ShareGroupState { - UNKNOWN("Unknown"), - STABLE("Stable"), - DEAD("Dead"), - EMPTY("Empty"); - - private final static Map NAME_TO_ENUM = Arrays.stream(values()) - .collect(Collectors.toMap(state -> state.name.toUpperCase(Locale.ROOT), Function.identity())); - - private final String name; - - ShareGroupState(String name) { - this.name = name; - } - - /** - * Case-insensitive share group state lookup by string name. - */ - public static ShareGroupState parse(String name) { - ShareGroupState state = NAME_TO_ENUM.get(name.toUpperCase(Locale.ROOT)); - return state == null ? UNKNOWN : state; - } - - @Override - public String toString() { - return name; - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/errors/FencedStateEpochException.java b/clients/src/main/java/org/apache/kafka/common/errors/FencedStateEpochException.java deleted file mode 100644 index 1e74bba199402..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/errors/FencedStateEpochException.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.errors; - -/** - * Thrown when the share coordinator rejected the request because the share-group state epoch did not match. - */ -public class FencedStateEpochException extends ApiException { - private static final long serialVersionUID = 1L; - - public FencedStateEpochException(String message) { - super(message); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/errors/InvalidRecordStateException.java b/clients/src/main/java/org/apache/kafka/common/errors/InvalidRecordStateException.java deleted file mode 100644 index ae0fef5edeaef..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/errors/InvalidRecordStateException.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.errors; - -/** - * Thrown when the acknowledgement of delivery of a record could not be completed because the record - * state is invalid. - */ -public class InvalidRecordStateException extends ApiException { - - private static final long serialVersionUID = 1L; - - public InvalidRecordStateException(String message) { - super(message); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/errors/InvalidShareSessionEpochException.java b/clients/src/main/java/org/apache/kafka/common/errors/InvalidShareSessionEpochException.java deleted file mode 100644 index e261d8b7a8e88..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/errors/InvalidShareSessionEpochException.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.errors; - -/** - * Thrown when the share session epoch is invalid. - */ -public class InvalidShareSessionEpochException extends RetriableException { - private static final long serialVersionUID = 1L; - - public InvalidShareSessionEpochException(String message) { - super(message); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/errors/ShareSessionNotFoundException.java b/clients/src/main/java/org/apache/kafka/common/errors/ShareSessionNotFoundException.java deleted file mode 100644 index 2b2249f8a5831..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/errors/ShareSessionNotFoundException.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.errors; - -/** - * Thrown when the share session was not found. - */ -public class ShareSessionNotFoundException extends RetriableException { - private static final long serialVersionUID = 1L; - - public ShareSessionNotFoundException(String message) { - super(message); - } -} diff --git a/clients/src/test/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java b/clients/src/main/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java similarity index 93% rename from clients/src/test/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java rename to clients/src/main/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java index 8985d00410c37..ae9e9a83a0c2c 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java +++ b/clients/src/main/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java @@ -22,7 +22,9 @@ public class DefaultChannelMetadataRegistry implements ChannelMetadataRegistry { @Override public void registerCipherInformation(final CipherInformation cipherInformation) { - this.cipherInformation = cipherInformation; + if (this.cipherInformation != null) { + this.cipherInformation = cipherInformation; + } } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java index ffd5737ca3162..16bec4fb72dc6 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java @@ -118,11 +118,7 @@ public enum ApiKeys { PUSH_TELEMETRY(ApiMessageType.PUSH_TELEMETRY), ASSIGN_REPLICAS_TO_DIRS(ApiMessageType.ASSIGN_REPLICAS_TO_DIRS), LIST_CLIENT_METRICS_RESOURCES(ApiMessageType.LIST_CLIENT_METRICS_RESOURCES), - DESCRIBE_TOPIC_PARTITIONS(ApiMessageType.DESCRIBE_TOPIC_PARTITIONS), - SHARE_GROUP_HEARTBEAT(ApiMessageType.SHARE_GROUP_HEARTBEAT), - SHARE_GROUP_DESCRIBE(ApiMessageType.SHARE_GROUP_DESCRIBE), - SHARE_FETCH(ApiMessageType.SHARE_FETCH), - SHARE_ACKNOWLEDGE(ApiMessageType.SHARE_ACKNOWLEDGE); + DESCRIBE_TOPIC_PARTITIONS(ApiMessageType.DESCRIBE_TOPIC_PARTITIONS); private static final Map> APIS_BY_LISTENER = new EnumMap<>(ApiMessageType.ListenerType.class); diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java index 10ae05aa850c9..900d191c8f9d4 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java @@ -41,7 +41,6 @@ import org.apache.kafka.common.errors.FencedInstanceIdException; import org.apache.kafka.common.errors.FencedLeaderEpochException; import org.apache.kafka.common.errors.FencedMemberEpochException; -import org.apache.kafka.common.errors.FencedStateEpochException; import org.apache.kafka.common.errors.FetchSessionIdNotFoundException; import org.apache.kafka.common.errors.FetchSessionTopicIdException; import org.apache.kafka.common.errors.GroupAuthorizationException; @@ -65,14 +64,12 @@ import org.apache.kafka.common.errors.InvalidPidMappingException; import org.apache.kafka.common.errors.InvalidPrincipalTypeException; import org.apache.kafka.common.errors.InvalidProducerEpochException; -import org.apache.kafka.common.errors.InvalidRecordStateException; import org.apache.kafka.common.errors.InvalidRegistrationException; import org.apache.kafka.common.errors.InvalidReplicaAssignmentException; import org.apache.kafka.common.errors.InvalidReplicationFactorException; import org.apache.kafka.common.errors.InvalidRequestException; import org.apache.kafka.common.errors.InvalidRequiredAcksException; import org.apache.kafka.common.errors.InvalidSessionTimeoutException; -import org.apache.kafka.common.errors.InvalidShareSessionEpochException; import org.apache.kafka.common.errors.InvalidTimestampException; import org.apache.kafka.common.errors.InvalidTopicException; import org.apache.kafka.common.errors.InvalidTxnStateException; @@ -112,7 +109,6 @@ import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.SaslAuthenticationException; import org.apache.kafka.common.errors.SecurityDisabledException; -import org.apache.kafka.common.errors.ShareSessionNotFoundException; import org.apache.kafka.common.errors.SnapshotNotFoundException; import org.apache.kafka.common.errors.StaleBrokerEpochException; import org.apache.kafka.common.errors.StaleMemberEpochException; @@ -398,11 +394,7 @@ public enum Errors { UNKNOWN_SUBSCRIPTION_ID(117, "Client sent a push telemetry request with an invalid or outdated subscription ID.", UnknownSubscriptionIdException::new), TELEMETRY_TOO_LARGE(118, "Client sent a push telemetry request larger than the maximum size the broker will accept.", TelemetryTooLargeException::new), INVALID_REGISTRATION(119, "The controller has considered the broker registration to be invalid.", InvalidRegistrationException::new), - TRANSACTION_ABORTABLE(120, "The server encountered an error with the transaction. The client can abort the transaction to continue using this transactional ID.", TransactionAbortableException::new), - INVALID_RECORD_STATE(121, "The record state is invalid. The acknowledgement of delivery could not be completed.", InvalidRecordStateException::new), - SHARE_SESSION_NOT_FOUND(122, "The share session was not found.", ShareSessionNotFoundException::new), - INVALID_SHARE_SESSION_EPOCH(123, "The share session epoch is invalid.", InvalidShareSessionEpochException::new), - FENCED_STATE_EPOCH(124, "The share coordinator rejected the request because the share-group state epoch did not match.", FencedStateEpochException::new); + TRANSACTION_ABORTABLE(120, "The server encountered an error with the transaction. The client can abort the transaction to continue using this transactional ID.", TransactionAbortableException::new); private static final Logger log = LoggerFactory.getLogger(Errors.class); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java index 589e163992b22..b51221f5af642 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java @@ -326,14 +326,6 @@ private static AbstractRequest doParseRequest(ApiKeys apiKey, short apiVersion, return ListClientMetricsResourcesRequest.parse(buffer, apiVersion); case DESCRIBE_TOPIC_PARTITIONS: return DescribeTopicPartitionsRequest.parse(buffer, apiVersion); - case SHARE_GROUP_HEARTBEAT: - return ShareGroupHeartbeatRequest.parse(buffer, apiVersion); - case SHARE_GROUP_DESCRIBE: - return ShareGroupDescribeRequest.parse(buffer, apiVersion); - case SHARE_FETCH: - return ShareFetchRequest.parse(buffer, apiVersion); - case SHARE_ACKNOWLEDGE: - return ShareAcknowledgeRequest.parse(buffer, apiVersion); default: throw new AssertionError(String.format("ApiKey %s is not currently handled in `parseRequest`, the " + "code should be updated to do so.", apiKey)); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java index 5534168098e9d..dbafdbf3bcb07 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java @@ -263,14 +263,6 @@ public static AbstractResponse parseResponse(ApiKeys apiKey, ByteBuffer response return ListClientMetricsResourcesResponse.parse(responseBuffer, version); case DESCRIBE_TOPIC_PARTITIONS: return DescribeTopicPartitionsResponse.parse(responseBuffer, version); - case SHARE_GROUP_HEARTBEAT: - return ShareGroupHeartbeatResponse.parse(responseBuffer, version); - case SHARE_GROUP_DESCRIBE: - return ShareGroupDescribeResponse.parse(responseBuffer, version); - case SHARE_FETCH: - return ShareFetchResponse.parse(responseBuffer, version); - case SHARE_ACKNOWLEDGE: - return ShareAcknowledgeResponse.parse(responseBuffer, version); default: throw new AssertionError(String.format("ApiKey %s is not currently handled in `parseResponse`, the " + "code should be updated to do so.", apiKey)); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java deleted file mode 100644 index 1b77b43be33c1..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.message.ShareAcknowledgeRequestData; -import org.apache.kafka.common.message.ShareAcknowledgeResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -public class ShareAcknowledgeRequest extends AbstractRequest { - - public static class Builder extends AbstractRequest.Builder { - - private final ShareAcknowledgeRequestData data; - - public Builder(ShareAcknowledgeRequestData data) { - this(data, false); - } - - public Builder(ShareAcknowledgeRequestData data, boolean enableUnstableLastVersion) { - super(ApiKeys.SHARE_ACKNOWLEDGE, enableUnstableLastVersion); - this.data = data; - } - - public static ShareAcknowledgeRequest.Builder forConsumer(String groupId, ShareFetchMetadata metadata, - Map> acknowledgementsMap) { - ShareAcknowledgeRequestData data = new ShareAcknowledgeRequestData(); - data.setGroupId(groupId); - if (metadata != null) { - data.setMemberId(metadata.memberId().toString()); - data.setShareSessionEpoch(metadata.epoch()); - } - - // Build a map of topics to acknowledge keyed by topic ID, and within each a map of partitions keyed by index - Map> ackMap = new HashMap<>(); - - for (Map.Entry> acknowledgeEntry : acknowledgementsMap.entrySet()) { - TopicIdPartition tip = acknowledgeEntry.getKey(); - Map partMap = ackMap.computeIfAbsent(tip.topicId(), k -> new HashMap<>()); - ShareAcknowledgeRequestData.AcknowledgePartition ackPartition = partMap.get(tip.partition()); - if (ackPartition == null) { - ackPartition = new ShareAcknowledgeRequestData.AcknowledgePartition() - .setPartitionIndex(tip.partition()); - partMap.put(tip.partition(), ackPartition); - } - ackPartition.setAcknowledgementBatches(acknowledgeEntry.getValue()); - } - - // Finally, build up the data to fetch - data.setTopics(new ArrayList<>()); - ackMap.forEach((topicId, partMap) -> { - ShareAcknowledgeRequestData.AcknowledgeTopic ackTopic = new ShareAcknowledgeRequestData.AcknowledgeTopic() - .setTopicId(topicId) - .setPartitions(new ArrayList<>()); - data.topics().add(ackTopic); - - partMap.forEach((index, ackPartition) -> ackTopic.partitions().add(ackPartition)); - }); - - return new ShareAcknowledgeRequest.Builder(data, true); - } - - public ShareAcknowledgeRequestData data() { - return data; - } - - @Override - public ShareAcknowledgeRequest build(short version) { - return new ShareAcknowledgeRequest(data, version); - } - - @Override - public String toString() { - return data.toString(); - } - } - - private final ShareAcknowledgeRequestData data; - - public ShareAcknowledgeRequest(ShareAcknowledgeRequestData data, short version) { - super(ApiKeys.SHARE_ACKNOWLEDGE, version); - this.data = data; - } - - @Override - public ShareAcknowledgeRequestData data() { - return data; - } - - @Override - public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { - Errors error = Errors.forException(e); - return new ShareAcknowledgeResponse(new ShareAcknowledgeResponseData() - .setThrottleTimeMs(throttleTimeMs) - .setErrorCode(error.code())); - } - - public static ShareAcknowledgeRequest parse(ByteBuffer buffer, short version) { - return new ShareAcknowledgeRequest( - new ShareAcknowledgeRequestData(new ByteBufferAccessor(buffer), version), - version - ); - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java deleted file mode 100644 index 5cab233dccac8..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.Node; -import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.message.ShareAcknowledgeResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -/** - * Possible error codes. - * - {@link Errors#GROUP_AUTHORIZATION_FAILED} - * - {@link Errors#TOPIC_AUTHORIZATION_FAILED} - * - {@link Errors#UNKNOWN_TOPIC_OR_PARTITION} - * - {@link Errors#NOT_LEADER_OR_FOLLOWER} - * - {@link Errors#UNKNOWN_TOPIC_ID} - * - {@link Errors#INVALID_RECORD_STATE} - * - {@link Errors#KAFKA_STORAGE_ERROR} - * - {@link Errors#INVALID_REQUEST} - * - {@link Errors#UNKNOWN_SERVER_ERROR} - */ -public class ShareAcknowledgeResponse extends AbstractResponse { - - private final ShareAcknowledgeResponseData data; - - public ShareAcknowledgeResponse(ShareAcknowledgeResponseData data) { - super(ApiKeys.SHARE_ACKNOWLEDGE); - this.data = data; - } - - public Errors error() { - return Errors.forCode(data.errorCode()); - } - - @Override - public ShareAcknowledgeResponseData data() { - return data; - } - - @Override - public Map errorCounts() { - HashMap counts = new HashMap<>(); - updateErrorCounts(counts, Errors.forCode(data.errorCode())); - data.responses().forEach( - topic -> topic.partitions().forEach( - partition -> updateErrorCounts(counts, Errors.forCode(partition.errorCode())) - ) - ); - return counts; - } - - @Override - public int throttleTimeMs() { - return data.throttleTimeMs(); - } - - @Override - public void maybeSetThrottleTimeMs(int throttleTimeMs) { - data.setThrottleTimeMs(throttleTimeMs); - } - - public static ShareAcknowledgeResponse parse(ByteBuffer buffer, short version) { - return new ShareAcknowledgeResponse( - new ShareAcknowledgeResponseData(new ByteBufferAccessor(buffer), version) - ); - } - - private static boolean matchingTopic(ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse previousTopic, TopicIdPartition currentTopic) { - if (previousTopic == null) - return false; - return previousTopic.topicId().equals(currentTopic.topicId()); - } - - public static ShareAcknowledgeResponseData.PartitionData partitionResponse(TopicIdPartition topicIdPartition, Errors error) { - return partitionResponse(topicIdPartition.topicPartition().partition(), error); - } - - public static ShareAcknowledgeResponseData.PartitionData partitionResponse(int partition, Errors error) { - return new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(partition) - .setErrorCode(error.code()); - } - - public static ShareAcknowledgeResponse of(Errors error, - int throttleTimeMs, - LinkedHashMap responseData, - List nodeEndpoints) { - return new ShareAcknowledgeResponse(toMessage(error, throttleTimeMs, responseData.entrySet().iterator(), nodeEndpoints)); - } - - public static ShareAcknowledgeResponseData toMessage(Errors error, int throttleTimeMs, - Iterator> partIterator, - List nodeEndpoints) { - Map topicResponseList = new LinkedHashMap<>(); - while (partIterator.hasNext()) { - Map.Entry entry = partIterator.next(); - ShareAcknowledgeResponseData.PartitionData partitionData = entry.getValue(); - // Since PartitionData alone doesn't know the partition ID, we set it here - partitionData.setPartitionIndex(entry.getKey().topicPartition().partition()); - // Checking if the topic is already present in the map - if (topicResponseList.containsKey(entry.getKey().topicId())) { - topicResponseList.get(entry.getKey().topicId()).partitions().add(partitionData); - } else { - List partitionResponses = new ArrayList<>(); - partitionResponses.add(partitionData); - topicResponseList.put(entry.getKey().topicId(), new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse() - .setTopicId(entry.getKey().topicId()) - .setPartitions(partitionResponses)); - } - } - ShareAcknowledgeResponseData data = new ShareAcknowledgeResponseData(); - // KafkaApis should only pass in node endpoints on error, otherwise this should be an empty list - nodeEndpoints.forEach(endpoint -> data.nodeEndpoints().add( - new ShareAcknowledgeResponseData.NodeEndpoint() - .setNodeId(endpoint.id()) - .setHost(endpoint.host()) - .setPort(endpoint.port()) - .setRack(endpoint.rack()))); - return data.setThrottleTimeMs(throttleTimeMs) - .setErrorCode(error.code()) - .setResponses(new ArrayList<>(topicResponseList.values())); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchMetadata.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchMetadata.java deleted file mode 100644 index 4e5bcc2237e43..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchMetadata.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.Uuid; - -public class ShareFetchMetadata { - /** - * The first epoch. When used in a ShareFetch request, indicates that the client - * wants to create a session. - */ - public static final int INITIAL_EPOCH = 0; - - /** - * An invalid epoch. When used in a ShareFetch request, indicates that the client - * wants to close an existing session. - */ - public static final int FINAL_EPOCH = -1; - - /** - * - */ - public boolean isNewSession() { - return epoch == INITIAL_EPOCH; - } - - /** - * Returns true if this is a full share fetch request. - */ - public boolean isFull() { - return (this.epoch == INITIAL_EPOCH) || (this.epoch == FINAL_EPOCH); - } - - /** - * Returns the next epoch. - * - * @param prevEpoch The previous epoch. - * @return The next epoch. - */ - public static int nextEpoch(int prevEpoch) { - if (prevEpoch < 0) { - // The next epoch after FINAL_EPOCH is always FINAL_EPOCH itself. - return FINAL_EPOCH; - } else if (prevEpoch == Integer.MAX_VALUE) { - return 1; - } else { - return prevEpoch + 1; - } - } - - /** - * The member ID. - */ - private final Uuid memberId; - - /** - * The share session epoch. - */ - private final int epoch; - - public ShareFetchMetadata(Uuid memberId, int epoch) { - this.memberId = memberId; - this.epoch = epoch; - } - - public static ShareFetchMetadata initialEpoch(Uuid memberId) { - return new ShareFetchMetadata(memberId, INITIAL_EPOCH); - } - - public ShareFetchMetadata nextEpoch() { - return new ShareFetchMetadata(memberId, nextEpoch(epoch)); - } - - public ShareFetchMetadata nextCloseExistingAttemptNew() { - return new ShareFetchMetadata(memberId, INITIAL_EPOCH); - } - - public ShareFetchMetadata finalEpoch() { - return new ShareFetchMetadata(memberId, FINAL_EPOCH); - } - - public Uuid memberId() { - return memberId; - } - - public int epoch() { - return epoch; - } - - public boolean isFinalEpoch() { - return epoch == FINAL_EPOCH; - } - - public String toString() { - StringBuilder bld = new StringBuilder(); - bld.append("(memberId=").append(memberId).append(", "); - if (epoch == INITIAL_EPOCH) { - bld.append("epoch=INITIAL)"); - } else if (epoch == FINAL_EPOCH) { - bld.append("epoch=FINAL)"); - } else { - bld.append("epoch=").append(epoch).append(")"); - } - return bld.toString(); - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java deleted file mode 100644 index 385e802a691a9..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.message.ShareFetchRequestData; -import org.apache.kafka.common.message.ShareFetchResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; - -public class ShareFetchRequest extends AbstractRequest { - - public static class Builder extends AbstractRequest.Builder { - - private final ShareFetchRequestData data; - - public Builder(ShareFetchRequestData data) { - this(data, false); - } - - public Builder(ShareFetchRequestData data, boolean enableUnstableLastVersion) { - super(ApiKeys.SHARE_FETCH, enableUnstableLastVersion); - this.data = data; - } - - public static Builder forConsumer(String groupId, ShareFetchMetadata metadata, - int maxWait, int minBytes, int maxBytes, int fetchSize, - List send, List forget, - Map> acknowledgementsMap) { - ShareFetchRequestData data = new ShareFetchRequestData(); - data.setGroupId(groupId); - int ackOnlyPartitionMaxBytes = fetchSize; - boolean isClosingShareSession = false; - if (metadata != null) { - data.setMemberId(metadata.memberId().toString()); - data.setShareSessionEpoch(metadata.epoch()); - if (metadata.isFinalEpoch()) { - isClosingShareSession = true; - ackOnlyPartitionMaxBytes = 0; - } - } - data.setMaxWaitMs(maxWait); - data.setMinBytes(minBytes); - data.setMaxBytes(maxBytes); - - // Build a map of topics to fetch keyed by topic ID, and within each a map of partitions keyed by index - Map> fetchMap = new HashMap<>(); - - // First, start by adding the list of topic-partitions we are fetching - if (!isClosingShareSession) { - for (TopicIdPartition tip : send) { - Map partMap = fetchMap.computeIfAbsent(tip.topicId(), k -> new HashMap<>()); - ShareFetchRequestData.FetchPartition fetchPartition = new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(tip.partition()) - .setPartitionMaxBytes(fetchSize); - partMap.put(tip.partition(), fetchPartition); - } - } - - // Next, add acknowledgements that we are piggybacking onto the fetch. Generally, the list of - // topic-partitions will be a subset, but if the assignment changes, there might be new entries to add - for (Map.Entry> acknowledgeEntry : acknowledgementsMap.entrySet()) { - TopicIdPartition tip = acknowledgeEntry.getKey(); - Map partMap = fetchMap.computeIfAbsent(tip.topicId(), k -> new HashMap<>()); - ShareFetchRequestData.FetchPartition fetchPartition = partMap.get(tip.partition()); - if (fetchPartition == null) { - fetchPartition = new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(tip.partition()) - .setPartitionMaxBytes(ackOnlyPartitionMaxBytes); - partMap.put(tip.partition(), fetchPartition); - } - fetchPartition.setAcknowledgementBatches(acknowledgeEntry.getValue()); - } - - // Build up the data to fetch - if (!fetchMap.isEmpty()) { - data.setTopics(new ArrayList<>()); - fetchMap.forEach((topicId, partMap) -> { - ShareFetchRequestData.FetchTopic fetchTopic = new ShareFetchRequestData.FetchTopic() - .setTopicId(topicId) - .setPartitions(new ArrayList<>()); - partMap.forEach((index, fetchPartition) -> fetchTopic.partitions().add(fetchPartition)); - data.topics().add(fetchTopic); - }); - } - - // And finally, forget the topic-partitions that are no longer in the session - if (!forget.isEmpty()) { - Map> forgetMap = new HashMap<>(); - for (TopicIdPartition tip : forget) { - List partList = forgetMap.computeIfAbsent(tip.topicId(), k -> new ArrayList<>()); - partList.add(tip.partition()); - } - data.setForgottenTopicsData(new ArrayList<>()); - forgetMap.forEach((topicId, partList) -> { - ShareFetchRequestData.ForgottenTopic forgetTopic = new ShareFetchRequestData.ForgottenTopic() - .setTopicId(topicId) - .setPartitions(new ArrayList<>()); - partList.forEach(index -> forgetTopic.partitions().add(index)); - data.forgottenTopicsData().add(forgetTopic); - }); - } - - return new Builder(data, true); - } - - public ShareFetchRequestData data() { - return data; - } - - @Override - public ShareFetchRequest build(short version) { - return new ShareFetchRequest(data, version); - } - - @Override - public String toString() { - return data.toString(); - } - } - - private final ShareFetchRequestData data; - private volatile LinkedHashMap shareFetchData = null; - private volatile List toForget = null; - - public ShareFetchRequest(ShareFetchRequestData data, short version) { - super(ApiKeys.SHARE_FETCH, version); - this.data = data; - } - - @Override - public ShareFetchRequestData data() { - return data; - } - - @Override - public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { - Errors error = Errors.forException(e); - return new ShareFetchResponse(new ShareFetchResponseData() - .setThrottleTimeMs(throttleTimeMs) - .setErrorCode(error.code())); - } - - public static ShareFetchRequest parse(ByteBuffer buffer, short version) { - return new ShareFetchRequest( - new ShareFetchRequestData(new ByteBufferAccessor(buffer), version), - version - ); - } - - public static final class SharePartitionData { - public final Uuid topicId; - public final int maxBytes; - - public SharePartitionData( - Uuid topicId, - int maxBytes - ) { - this.topicId = topicId; - this.maxBytes = maxBytes; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ShareFetchRequest.SharePartitionData that = (ShareFetchRequest.SharePartitionData) o; - return Objects.equals(topicId, that.topicId) && - maxBytes == that.maxBytes; - } - - @Override - public int hashCode() { - return Objects.hash(topicId, maxBytes); - } - - @Override - public String toString() { - return "SharePartitionData(" + - "topicId=" + topicId + - ", maxBytes=" + maxBytes + - ')'; - } - } - - public int minBytes() { - return data.minBytes(); - } - - public int maxBytes() { - return data.maxBytes(); - } - - public int maxWait() { - return data.maxWaitMs(); - } - - public Map shareFetchData(Map topicNames) { - if (shareFetchData == null) { - synchronized (this) { - if (shareFetchData == null) { - // Assigning the lazy-initialized `shareFetchData` in the last step - // to avoid other threads accessing a half-initialized object. - final LinkedHashMap shareFetchDataTmp = new LinkedHashMap<>(); - data.topics().forEach(shareFetchTopic -> { - String name = topicNames.get(shareFetchTopic.topicId()); - shareFetchTopic.partitions().forEach(shareFetchPartition -> { - // Topic name may be null here if the topic name was unable to be resolved using the topicNames map. - shareFetchDataTmp.put(new TopicIdPartition(shareFetchTopic.topicId(), new TopicPartition(name, shareFetchPartition.partitionIndex())), - new ShareFetchRequest.SharePartitionData( - shareFetchTopic.topicId(), - shareFetchPartition.partitionMaxBytes() - ) - ); - }); - }); - shareFetchData = shareFetchDataTmp; - } - } - } - return shareFetchData; - } - - public List forgottenTopics(Map topicNames) { - if (toForget == null) { - synchronized (this) { - if (toForget == null) { - // Assigning the lazy-initialized `toForget` in the last step - // to avoid other threads accessing a half-initialized object. - final List toForgetTmp = new ArrayList<>(); - data.forgottenTopicsData().forEach(forgottenTopic -> { - String name = topicNames.get(forgottenTopic.topicId()); - // Topic name may be null here if the topic name was unable to be resolved using the topicNames map. - forgottenTopic.partitions().forEach(partitionId -> toForgetTmp.add(new TopicIdPartition(forgottenTopic.topicId(), new TopicPartition(name, partitionId)))); - }); - toForget = toForgetTmp; - } - } - } - return toForget; - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java deleted file mode 100644 index b33969e0efa41..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.Node; -import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.message.ShareFetchResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.ObjectSerializationCache; -import org.apache.kafka.common.record.MemoryRecords; -import org.apache.kafka.common.record.Records; - -import java.nio.ByteBuffer; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.Iterator; -import java.util.Collections; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; - - -/** - * Possible error codes. - * - {@link Errors#GROUP_AUTHORIZATION_FAILED} - * - {@link Errors#TOPIC_AUTHORIZATION_FAILED} - * - {@link Errors#UNKNOWN_TOPIC_OR_PARTITION} - * - {@link Errors#NOT_LEADER_OR_FOLLOWER} - * - {@link Errors#UNKNOWN_TOPIC_ID} - * - {@link Errors#INVALID_RECORD_STATE} - * - {@link Errors#KAFKA_STORAGE_ERROR} - * - {@link Errors#CORRUPT_MESSAGE} - * - {@link Errors#INVALID_REQUEST} - * - {@link Errors#UNKNOWN_SERVER_ERROR} - */ -public class ShareFetchResponse extends AbstractResponse { - - private final ShareFetchResponseData data; - - private volatile LinkedHashMap responseData = null; - - public ShareFetchResponse(ShareFetchResponseData data) { - super(ApiKeys.SHARE_FETCH); - this.data = data; - } - - public Errors error() { - return Errors.forCode(data.errorCode()); - } - - @Override - public ShareFetchResponseData data() { - return data; - } - - @Override - public Map errorCounts() { - HashMap counts = new HashMap<>(); - updateErrorCounts(counts, Errors.forCode(data.errorCode())); - data.responses().forEach( - topic -> topic.partitions().forEach( - partition -> updateErrorCounts(counts, Errors.forCode(partition.errorCode())) - ) - ); - return counts; - } - - public LinkedHashMap responseData(Map topicNames) { - if (responseData == null) { - synchronized (this) { - // Assigning the lazy-initialized `responseData` in the last step - // to avoid other threads accessing a half-initialized object. - if (responseData == null) { - final LinkedHashMap responseDataTmp = new LinkedHashMap<>(); - data.responses().forEach(topicResponse -> { - String name = topicNames.get(topicResponse.topicId()); - if (name != null) { - topicResponse.partitions().forEach(partitionData -> responseDataTmp.put(new TopicIdPartition(topicResponse.topicId(), - new TopicPartition(name, partitionData.partitionIndex())), partitionData)); - } - }); - responseData = responseDataTmp; - } - } - } - return responseData; - } - - @Override - public int throttleTimeMs() { - return data.throttleTimeMs(); - } - - @Override - public void maybeSetThrottleTimeMs(int throttleTimeMs) { - data.setThrottleTimeMs(throttleTimeMs); - } - - public static ShareFetchResponse parse(ByteBuffer buffer, short version) { - return new ShareFetchResponse( - new ShareFetchResponseData(new ByteBufferAccessor(buffer), version) - ); - } - - /** - * Returns `partition.records` as `Records` (instead of `BaseRecords`). If `records` is `null`, returns `MemoryRecords.EMPTY`. - * - *

If this response was deserialized after a share fetch, this method should never fail. An example where this would - * fail is a down-converted response (e.g. LazyDownConversionRecords) on the broker (before it's serialized and - * sent on the wire). - * - * @param partition partition data - * @return Records or empty record if the records in PartitionData is null. - */ - public static Records recordsOrFail(ShareFetchResponseData.PartitionData partition) { - if (partition.records() == null) return MemoryRecords.EMPTY; - if (partition.records() instanceof Records) return (Records) partition.records(); - throw new ClassCastException("The record type is " + partition.records().getClass().getSimpleName() + ", which is not a subtype of " + - Records.class.getSimpleName() + ". This method is only safe to call if the `ShareFetchResponse` was deserialized from bytes."); - } - - /** - * Convenience method to find the size of a response. - * - * @param version The version of the request - * @param partIterator The partition iterator. - * @return The response size in bytes. - */ - public static int sizeOf(short version, - Iterator> partIterator) { - // Since the throttleTimeMs and metadata field sizes are constant and fixed, we can - // use arbitrary values here without affecting the result. - ShareFetchResponseData data = toMessage(Errors.NONE, 0, partIterator, Collections.emptyList()); - ObjectSerializationCache cache = new ObjectSerializationCache(); - return 4 + data.size(cache, version); - } - - /** - * @return The size in bytes of the records. 0 is returned if records of input partition is null. - */ - public static int recordsSize(ShareFetchResponseData.PartitionData partition) { - return partition.records() == null ? 0 : partition.records().sizeInBytes(); - } - - public static ShareFetchResponse of(Errors error, - int throttleTimeMs, - LinkedHashMap responseData, - List nodeEndpoints) { - return new ShareFetchResponse(toMessage(error, throttleTimeMs, responseData.entrySet().iterator(), nodeEndpoints)); - } - - public static ShareFetchResponseData toMessage(Errors error, int throttleTimeMs, - Iterator> partIterator, - List nodeEndpoints) { - Map topicResponseList = new LinkedHashMap<>(); - while (partIterator.hasNext()) { - Map.Entry entry = partIterator.next(); - ShareFetchResponseData.PartitionData partitionData = entry.getValue(); - // Since PartitionData alone doesn't know the partition ID, we set it here - partitionData.setPartitionIndex(entry.getKey().topicPartition().partition()); - // Checking if the topic is already present in the map - if (topicResponseList.containsKey(entry.getKey().topicId())) { - topicResponseList.get(entry.getKey().topicId()).partitions().add(partitionData); - } else { - List partitionResponses = new ArrayList<>(); - partitionResponses.add(partitionData); - topicResponseList.put(entry.getKey().topicId(), new ShareFetchResponseData.ShareFetchableTopicResponse() - .setTopicId(entry.getKey().topicId()) - .setPartitions(partitionResponses)); - } - } - ShareFetchResponseData data = new ShareFetchResponseData(); - // KafkaApis should only pass in node endpoints on error, otherwise this should be an empty list - nodeEndpoints.forEach(endpoint -> data.nodeEndpoints().add( - new ShareFetchResponseData.NodeEndpoint() - .setNodeId(endpoint.id()) - .setHost(endpoint.host()) - .setPort(endpoint.port()) - .setRack(endpoint.rack()))); - return data.setThrottleTimeMs(throttleTimeMs) - .setErrorCode(error.code()) - .setResponses(new ArrayList<>(topicResponseList.values())); - } - - public static ShareFetchResponseData.PartitionData partitionResponse(TopicIdPartition topicIdPartition, Errors error) { - return partitionResponse(topicIdPartition.topicPartition().partition(), error); - } - - public static ShareFetchResponseData.PartitionData partitionResponse(int partition, Errors error) { - return new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) - .setErrorCode(error.code()); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java deleted file mode 100644 index 25c02e4a83c5e..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.message.ShareGroupDescribeRequestData; -import org.apache.kafka.common.message.ShareGroupDescribeResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; - -import java.nio.ByteBuffer; -import java.util.List; -import java.util.stream.Collectors; - -public class ShareGroupDescribeRequest extends AbstractRequest { - - public static class Builder extends AbstractRequest.Builder { - - private final ShareGroupDescribeRequestData data; - - public Builder(ShareGroupDescribeRequestData data) { - this(data, false); - } - - public Builder(ShareGroupDescribeRequestData data, boolean enableUnstableLastVersion) { - super(ApiKeys.SHARE_GROUP_DESCRIBE, enableUnstableLastVersion); - this.data = data; - } - - @Override - public ShareGroupDescribeRequest build(short version) { - return new ShareGroupDescribeRequest(data, version); - } - - @Override - public String toString() { - return data.toString(); - } - } - - private final ShareGroupDescribeRequestData data; - - public ShareGroupDescribeRequest(ShareGroupDescribeRequestData data, short version) { - super(ApiKeys.SHARE_GROUP_DESCRIBE, version); - this.data = data; - } - - @Override - public ShareGroupDescribeResponse getErrorResponse(int throttleTimeMs, Throwable e) { - ShareGroupDescribeResponseData data = new ShareGroupDescribeResponseData() - .setThrottleTimeMs(throttleTimeMs); - // Set error for each group - short errorCode = Errors.forException(e).code(); - this.data.groupIds().forEach( - groupId -> data.groups().add( - new ShareGroupDescribeResponseData.DescribedGroup() - .setGroupId(groupId) - .setErrorCode(errorCode) - ) - ); - return new ShareGroupDescribeResponse(data); - } - - @Override - public ShareGroupDescribeRequestData data() { - return data; - } - - public static ShareGroupDescribeRequest parse(ByteBuffer buffer, short version) { - return new ShareGroupDescribeRequest( - new ShareGroupDescribeRequestData(new ByteBufferAccessor(buffer), version), - version - ); - } - - public static List getErrorDescribedGroupList( - List groupIds, - Errors error - ) { - return groupIds.stream() - .map(groupId -> new ShareGroupDescribeResponseData.DescribedGroup() - .setGroupId(groupId) - .setErrorCode(error.code()) - ).collect(Collectors.toList()); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeResponse.java deleted file mode 100644 index 95dd371eedfa7..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeResponse.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.message.ShareGroupDescribeResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; - -import java.nio.ByteBuffer; -import java.util.HashMap; -import java.util.Map; - -/** - * Possible error codes. - * - * - {@link Errors#GROUP_AUTHORIZATION_FAILED} - * - {@link Errors#NOT_COORDINATOR} - * - {@link Errors#COORDINATOR_NOT_AVAILABLE} - * - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS} - * - {@link Errors#INVALID_REQUEST} - * - {@link Errors#INVALID_GROUP_ID} - * - {@link Errors#GROUP_ID_NOT_FOUND} - */ -public class ShareGroupDescribeResponse extends AbstractResponse { - - private final ShareGroupDescribeResponseData data; - - public ShareGroupDescribeResponse(ShareGroupDescribeResponseData data) { - super(ApiKeys.SHARE_GROUP_DESCRIBE); - this.data = data; - } - - @Override - public ShareGroupDescribeResponseData data() { - return data; - } - - @Override - public Map errorCounts() { - HashMap counts = new HashMap<>(); - data.groups().forEach( - group -> updateErrorCounts(counts, Errors.forCode(group.errorCode())) - ); - return counts; - } - - @Override - public int throttleTimeMs() { - return data.throttleTimeMs(); - } - - @Override - public void maybeSetThrottleTimeMs(int throttleTimeMs) { - data.setThrottleTimeMs(throttleTimeMs); - } - - public static ShareGroupDescribeResponse parse(ByteBuffer buffer, short version) { - return new ShareGroupDescribeResponse( - new ShareGroupDescribeResponseData(new ByteBufferAccessor(buffer), version) - ); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatRequest.java deleted file mode 100644 index 7e112ef29dd14..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatRequest.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import java.nio.ByteBuffer; - -import org.apache.kafka.common.message.ShareGroupHeartbeatRequestData; -import org.apache.kafka.common.message.ShareGroupHeartbeatResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; - -public class ShareGroupHeartbeatRequest extends AbstractRequest { - /** - * A member epoch of -1 means that the member wants to leave the group. - */ - public static final int LEAVE_GROUP_MEMBER_EPOCH = -1; - - /** - * A member epoch of 0 means that the member wants to join the group. - */ - public static final int JOIN_GROUP_MEMBER_EPOCH = 0; - - public static class Builder extends AbstractRequest.Builder { - private final ShareGroupHeartbeatRequestData data; - - public Builder(ShareGroupHeartbeatRequestData data) { - this(data, true); - } - - public Builder(ShareGroupHeartbeatRequestData data, boolean enableUnstableLastVersion) { - super(ApiKeys.SHARE_GROUP_HEARTBEAT, enableUnstableLastVersion); - this.data = data; - } - - @Override - public ShareGroupHeartbeatRequest build(short version) { - return new ShareGroupHeartbeatRequest(data, version); - } - - @Override - public String toString() { - return data.toString(); - } - } - - private final ShareGroupHeartbeatRequestData data; - - public ShareGroupHeartbeatRequest(ShareGroupHeartbeatRequestData data, short version) { - super(ApiKeys.SHARE_GROUP_HEARTBEAT, version); - this.data = data; - } - - @Override - public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { - return new ShareGroupHeartbeatResponse( - new ShareGroupHeartbeatResponseData() - .setThrottleTimeMs(throttleTimeMs) - .setErrorCode(Errors.forException(e).code()) - ); - } - - @Override - public ShareGroupHeartbeatRequestData data() { - return data; - } - - public static ShareGroupHeartbeatRequest parse(ByteBuffer buffer, short version) { - return new ShareGroupHeartbeatRequest(new ShareGroupHeartbeatRequestData( - new ByteBufferAccessor(buffer), version), version); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatResponse.java deleted file mode 100644 index de05d44aebecb..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatResponse.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.message.ShareGroupHeartbeatResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.Errors; - -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.Map; - -/** - * Possible error codes. - * - * - {@link Errors#GROUP_AUTHORIZATION_FAILED} - * - {@link Errors#NOT_COORDINATOR} - * - {@link Errors#COORDINATOR_NOT_AVAILABLE} - * - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS} - * - {@link Errors#INVALID_REQUEST} - * - {@link Errors#UNKNOWN_MEMBER_ID} - * - {@link Errors#GROUP_MAX_SIZE_REACHED} - */ -public class ShareGroupHeartbeatResponse extends AbstractResponse { - private final ShareGroupHeartbeatResponseData data; - - public ShareGroupHeartbeatResponse(ShareGroupHeartbeatResponseData data) { - super(ApiKeys.SHARE_GROUP_HEARTBEAT); - this.data = data; - } - - @Override - public ShareGroupHeartbeatResponseData data() { - return data; - } - - @Override - public Map errorCounts() { - return Collections.singletonMap(Errors.forCode(data.errorCode()), 1); - } - - @Override - public int throttleTimeMs() { - return data.throttleTimeMs(); - } - - @Override - public void maybeSetThrottleTimeMs(int throttleTimeMs) { - data.setThrottleTimeMs(throttleTimeMs); - } - - public static ShareGroupHeartbeatResponse parse(ByteBuffer buffer, short version) { - return new ShareGroupHeartbeatResponse(new ShareGroupHeartbeatResponseData( - new ByteBufferAccessor(buffer), version)); - } -} diff --git a/clients/src/main/resources/common/message/FindCoordinatorRequest.json b/clients/src/main/resources/common/message/FindCoordinatorRequest.json index 43e6fe5014b26..42b2f4c891ad5 100644 --- a/clients/src/main/resources/common/message/FindCoordinatorRequest.json +++ b/clients/src/main/resources/common/message/FindCoordinatorRequest.json @@ -27,9 +27,7 @@ // Version 4 adds support for batching via CoordinatorKeys (KIP-699) // // Version 5 adds support for new error code TRANSACTION_ABORTABLE (KIP-890). - // - // Version 6 adds support for share groups (KIP-932). - "validVersions": "0-6", + "validVersions": "0-5", "deprecatedVersions": "0", "flexibleVersions": "3+", "fields": [ diff --git a/clients/src/main/resources/common/message/FindCoordinatorResponse.json b/clients/src/main/resources/common/message/FindCoordinatorResponse.json index be0479f908c96..860d655a252b2 100644 --- a/clients/src/main/resources/common/message/FindCoordinatorResponse.json +++ b/clients/src/main/resources/common/message/FindCoordinatorResponse.json @@ -26,9 +26,7 @@ // Version 4 adds support for batching via Coordinators (KIP-699) // // Version 5 adds support for new error code TRANSACTION_ABORTABLE (KIP-890). - // - // Version 6 adds support for share groups (KIP-932). - "validVersions": "0-6", + "validVersions": "0-5", "flexibleVersions": "3+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "1+", "ignorable": true, diff --git a/clients/src/main/resources/common/message/ListGroupsRequest.json b/clients/src/main/resources/common/message/ListGroupsRequest.json index a872165d516cf..32defaa203382 100644 --- a/clients/src/main/resources/common/message/ListGroupsRequest.json +++ b/clients/src/main/resources/common/message/ListGroupsRequest.json @@ -25,9 +25,7 @@ // Version 4 adds the StatesFilter field (KIP-518). // // Version 5 adds the TypesFilter field (KIP-848). - // - // Version 6 adds support for share groups (KIP-932). - "validVersions": "0-6", + "validVersions": "0-5", "flexibleVersions": "3+", "fields": [ { "name": "StatesFilter", "type": "[]string", "versions": "4+", diff --git a/clients/src/main/resources/common/message/ListGroupsResponse.json b/clients/src/main/resources/common/message/ListGroupsResponse.json index 77f1c89e34a38..fc4077c080f46 100644 --- a/clients/src/main/resources/common/message/ListGroupsResponse.json +++ b/clients/src/main/resources/common/message/ListGroupsResponse.json @@ -27,9 +27,7 @@ // Version 4 adds the GroupState field (KIP-518). // // Version 5 adds the GroupType field (KIP-848). - // - // Version 6 adds support for share groups (KIP-932). - "validVersions": "0-6", + "validVersions": "0-5", "flexibleVersions": "3+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "1+", "ignorable": true, diff --git a/clients/src/main/resources/common/message/ShareAcknowledgeRequest.json b/clients/src/main/resources/common/message/ShareAcknowledgeRequest.json deleted file mode 100644 index db534cb4c1c13..0000000000000 --- a/clients/src/main/resources/common/message/ShareAcknowledgeRequest.json +++ /dev/null @@ -1,53 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 79, - "type": "request", - "listeners": ["broker"], - "name": "ShareAcknowledgeRequest", - "validVersions": "0", - "flexibleVersions": "0+", - // The ShareAcknowledgeRequest API is added as part of KIP-932 and is still under - // development. Hence, the API is not exposed by default by brokers unless - // explicitly enabled. - "latestVersionUnstable": true, - "fields": [ - { "name": "GroupId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "entityType": "groupId", - "about": "The group identifier." }, - { "name": "MemberId", "type": "string", "versions": "0+", "nullableVersions": "0+", - "about": "The member ID." }, - { "name": "ShareSessionEpoch", "type": "int32", "versions": "0+", - "about": "The current share session epoch: 0 to open a share session; -1 to close it; otherwise increments for consecutive requests." }, - { "name": "Topics", "type": "[]AcknowledgeTopic", "versions": "0+", - "about": "The topics containing records to acknowledge.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The unique topic ID."}, - { "name": "Partitions", "type": "[]AcknowledgePartition", "versions": "0+", - "about": "The partitions containing records to acknowledge.", "fields": [ - { "name": "PartitionIndex", "type": "int32", "versions": "0+", - "about": "The partition index." }, - { "name": "AcknowledgementBatches", "type": "[]AcknowledgementBatch", "versions": "0+", - "about": "Record batches to acknowledge.", "fields": [ - { "name": "FirstOffset", "type": "int64", "versions": "0+", - "about": "First offset of batch of records to acknowledge."}, - { "name": "LastOffset", "type": "int64", "versions": "0+", - "about": "Last offset (inclusive) of batch of records to acknowledge."}, - { "name": "AcknowledgeTypes", "type": "[]int8", "versions": "0+", - "about": "Array of acknowledge types - 0:Gap,1:Accept,2:Release,3:Reject."} - ]} - ]} - ]} - ] -} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json b/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json deleted file mode 100644 index 638ca10c64b3b..0000000000000 --- a/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 79, - "type": "response", - "name": "ShareAcknowledgeResponse", - "validVersions": "0", - "flexibleVersions": "0+", - // Supported errors: - // - GROUP_AUTHORIZATION_FAILED (version 0+) - // - TOPIC_AUTHORIZATION_FAILED (version 0+) - // - UNKNOWN_TOPIC_OR_PARTITION (version 0+) - // - SHARE_SESSION_NOT_FOUND (version 0+) - // - INVALID_SHARE_SESSION_EPOCH (version 0+) - // - NOT_LEADER_OR_FOLLOWER (version 0+) - // - UNKNOWN_TOPIC_ID (version 0+) - // - INVALID_RECORD_STATE (version 0+) - // - KAFKA_STORAGE_ERROR (version 0+) - // - INVALID_REQUEST (version 0+) - // - UNKNOWN_SERVER_ERROR (version 0+) - "fields": [ - { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "ignorable": true, - "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+", "ignorable": true, - "about": "The top level response error code." }, - { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The top-level error message, or null if there was no error." }, - { "name": "Responses", "type": "[]ShareAcknowledgeTopicResponse", "versions": "0+", - "about": "The response topics.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", "ignorable": true, "about": "The unique topic ID."}, - { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", - "about": "The topic partitions.", "fields": [ - { "name": "PartitionIndex", "type": "int32", "versions": "0+", - "about": "The partition index." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The error code, or 0 if there was no error." }, - { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The error message, or null if there was no error." }, - { "name": "CurrentLeader", "type": "LeaderIdAndEpoch", "versions": "0+", "fields": [ - { "name": "LeaderId", "type": "int32", "versions": "0+", - "about": "The ID of the current leader or -1 if the leader is unknown." }, - { "name": "LeaderEpoch", "type": "int32", "versions": "0+", - "about": "The latest known leader epoch." } - ]} - ]} - ]}, - { "name": "NodeEndpoints", "type": "[]NodeEndpoint", "versions": "0+", - "about": "Endpoints for all current leaders enumerated in PartitionData with error NOT_LEADER_OR_FOLLOWER.", "fields": [ - { "name": "NodeId", "type": "int32", "versions": "0+", - "mapKey": true, "entityType": "brokerId", "about": "The ID of the associated node." }, - { "name": "Host", "type": "string", "versions": "0+", - "about": "The node's hostname." }, - { "name": "Port", "type": "int32", "versions": "0+", - "about": "The node's port." }, - { "name": "Rack", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The rack of the node, or null if it has not been assigned to a rack." } - ]} - ] -} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareFetchRequest.json b/clients/src/main/resources/common/message/ShareFetchRequest.json deleted file mode 100644 index d0b59dcb26a80..0000000000000 --- a/clients/src/main/resources/common/message/ShareFetchRequest.json +++ /dev/null @@ -1,67 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 78, - "type": "request", - "listeners": ["broker"], - "name": "ShareFetchRequest", - "validVersions": "0", - "flexibleVersions": "0+", - // The ShareFetchRequest API is added as part of KIP-932 and is still under - // development. Hence, the API is not exposed by default by brokers unless - // explicitly enabled. - "latestVersionUnstable": true, - "fields": [ - { "name": "GroupId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "entityType": "groupId", - "about": "The group identifier." }, - { "name": "MemberId", "type": "string", "versions": "0+", "nullableVersions": "0+", - "about": "The member ID." }, - { "name": "ShareSessionEpoch", "type": "int32", "versions": "0+", - "about": "The current share session epoch: 0 to open a share session; -1 to close it; otherwise increments for consecutive requests." }, - { "name": "MaxWaitMs", "type": "int32", "versions": "0+", - "about": "The maximum time in milliseconds to wait for the response." }, - { "name": "MinBytes", "type": "int32", "versions": "0+", - "about": "The minimum bytes to accumulate in the response." }, - { "name": "MaxBytes", "type": "int32", "versions": "0+", "default": "0x7fffffff", "ignorable": true, - "about": "The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored." }, - { "name": "Topics", "type": "[]FetchTopic", "versions": "0+", - "about": "The topics to fetch.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", "ignorable": true, "about": "The unique topic ID."}, - { "name": "Partitions", "type": "[]FetchPartition", "versions": "0+", - "about": "The partitions to fetch.", "fields": [ - { "name": "PartitionIndex", "type": "int32", "versions": "0+", - "about": "The partition index." }, - { "name": "PartitionMaxBytes", "type": "int32", "versions": "0+", - "about": "The maximum bytes to fetch from this partition. 0 when only acknowledgement with no fetching is required. See KIP-74 for cases where this limit may not be honored." }, - { "name": "AcknowledgementBatches", "type": "[]AcknowledgementBatch", "versions": "0+", - "about": "Record batches to acknowledge.", "fields": [ - { "name": "FirstOffset", "type": "int64", "versions": "0+", - "about": "First offset of batch of records to acknowledge."}, - { "name": "LastOffset", "type": "int64", "versions": "0+", - "about": "Last offset (inclusive) of batch of records to acknowledge."}, - { "name": "AcknowledgeTypes", "type": "[]int8", "versions": "0+", - "about": "Array of acknowledge types - 0:Gap,1:Accept,2:Release,3:Reject."} - ]} - ]} - ]}, - { "name": "ForgottenTopicsData", "type": "[]ForgottenTopic", "versions": "0+", "ignorable": false, - "about": "The partitions to remove from this share session.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", "ignorable": true, "about": "The unique topic ID."}, - { "name": "Partitions", "type": "[]int32", "versions": "0+", - "about": "The partitions indexes to forget." } - ]} - ] -} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareFetchResponse.json b/clients/src/main/resources/common/message/ShareFetchResponse.json deleted file mode 100644 index 5338e1208a7bc..0000000000000 --- a/clients/src/main/resources/common/message/ShareFetchResponse.json +++ /dev/null @@ -1,83 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 78, - "type": "response", - "name": "ShareFetchResponse", - "validVersions": "0", - "flexibleVersions": "0+", - // Supported errors for ErrorCode and AcknowledgeErrorCode: - // - GROUP_AUTHORIZATION_FAILED (version 0+) - // - TOPIC_AUTHORIZATION_FAILED (version 0+) - // - SHARE_SESSION_NOT_FOUND (version 0+) - // - INVALID_SHARE_SESSION_EPOCH (version 0+) - // - UNKNOWN_TOPIC_OR_PARTITION (version 0+) - // - NOT_LEADER_OR_FOLLOWER (version 0+) - // - UNKNOWN_TOPIC_ID (version 0+) - // - INVALID_RECORD_STATE (version 0+) - only for AcknowledgeErrorCode - // - KAFKA_STORAGE_ERROR (version 0+) - // - CORRUPT_MESSAGE (version 0+) - // - INVALID_REQUEST (version 0+) - // - UNKNOWN_SERVER_ERROR (version 0+) - "fields": [ - { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "ignorable": true, - "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+", "ignorable": true, - "about": "The top-level response error code." }, - { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The top-level error message, or null if there was no error." }, - { "name": "Responses", "type": "[]ShareFetchableTopicResponse", "versions": "0+", - "about": "The response topics.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", "ignorable": true, "about": "The unique topic ID."}, - { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", - "about": "The topic partitions.", "fields": [ - { "name": "PartitionIndex", "type": "int32", "versions": "0+", - "about": "The partition index." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The fetch error code, or 0 if there was no fetch error." }, - { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The fetch error message, or null if there was no fetch error." }, - { "name": "AcknowledgeErrorCode", "type": "int16", "versions": "0+", - "about": "The acknowledge error code, or 0 if there was no acknowledge error." }, - { "name": "AcknowledgeErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The acknowledge error message, or null if there was no acknowledge error." }, - { "name": "CurrentLeader", "type": "LeaderIdAndEpoch", "versions": "0+", "fields": [ - { "name": "LeaderId", "type": "int32", "versions": "0+", - "about": "The ID of the current leader or -1 if the leader is unknown." }, - { "name": "LeaderEpoch", "type": "int32", "versions": "0+", - "about": "The latest known leader epoch." } - ]}, - { "name": "Records", "type": "records", "versions": "0+", "nullableVersions": "0+", "about": "The record data."}, - { "name": "AcquiredRecords", "type": "[]AcquiredRecords", "versions": "0+", "about": "The acquired records.", "fields": [ - {"name": "FirstOffset", "type": "int64", "versions": "0+", "about": "The earliest offset in this batch of acquired records."}, - {"name": "LastOffset", "type": "int64", "versions": "0+", "about": "The last offset of this batch of acquired records."}, - {"name": "DeliveryCount", "type": "int16", "versions": "0+", "about": "The delivery count of this batch of acquired records."} - ]} - ]} - ]}, - { "name": "NodeEndpoints", "type": "[]NodeEndpoint", "versions": "0+", - "about": "Endpoints for all current leaders enumerated in PartitionData with error NOT_LEADER_OR_FOLLOWER.", "fields": [ - { "name": "NodeId", "type": "int32", "versions": "0+", - "mapKey": true, "entityType": "brokerId", "about": "The ID of the associated node." }, - { "name": "Host", "type": "string", "versions": "0+", - "about": "The node's hostname." }, - { "name": "Port", "type": "int32", "versions": "0+", - "about": "The node's port." }, - { "name": "Rack", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The rack of the node, or null if it has not been assigned to a rack." } - ]} - ] -} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json b/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json deleted file mode 100644 index c95790c9b198f..0000000000000 --- a/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json +++ /dev/null @@ -1,33 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 77, - "type": "request", - "listeners": ["broker"], - "name": "ShareGroupDescribeRequest", - "validVersions": "0", - "flexibleVersions": "0+", - // The ShareGroupDescribeRequest API is added as part of KIP-932 and is still under - // development. Hence, the API is not exposed by default by brokers unless - // explicitly enabled. - "latestVersionUnstable": true, - "fields": [ - { "name": "GroupIds", "type": "[]string", "versions": "0+", "entityType": "groupId", - "about": "The ids of the groups to describe" }, - { "name": "IncludeAuthorizedOperations", "type": "bool", "versions": "0+", - "about": "Whether to include authorized operations." } - ] -} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareGroupDescribeResponse.json b/clients/src/main/resources/common/message/ShareGroupDescribeResponse.json deleted file mode 100644 index c093b788bfc2f..0000000000000 --- a/clients/src/main/resources/common/message/ShareGroupDescribeResponse.json +++ /dev/null @@ -1,87 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 77, - "type": "response", - "name": "ShareGroupDescribeResponse", - "validVersions": "0", - "flexibleVersions": "0+", - // Supported errors: - // - GROUP_AUTHORIZATION_FAILED (version 0+) - // - NOT_COORDINATOR (version 0+) - // - COORDINATOR_NOT_AVAILABLE (version 0+) - // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) - // - INVALID_REQUEST (version 0+) - // - INVALID_GROUP_ID (version 0+) - // - GROUP_ID_NOT_FOUND (version 0+) - "fields": [ - { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", - "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, - { "name": "Groups", "type": "[]DescribedGroup", "versions": "0+", - "about": "Each described group.", - "fields": [ - { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The describe error, or 0 if there was no error." }, - { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The top-level error message, or null if there was no error." }, - { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", - "about": "The group ID string." }, - { "name": "GroupState", "type": "string", "versions": "0+", - "about": "The group state string, or the empty string." }, - { "name": "GroupEpoch", "type": "int32", "versions": "0+", - "about": "The group epoch." }, - { "name": "AssignmentEpoch", "type": "int32", "versions": "0+", - "about": "The assignment epoch." }, - { "name": "AssignorName", "type": "string", "versions": "0+", - "about": "The selected assignor." }, - { "name": "Members", "type": "[]Member", "versions": "0+", - "about": "The members.", - "fields": [ - { "name": "MemberId", "type": "string", "versions": "0+", - "about": "The member ID." }, - { "name": "RackId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The member rack ID." }, - { "name": "MemberEpoch", "type": "int32", "versions": "0+", - "about": "The current member epoch." }, - { "name": "ClientId", "type": "string", "versions": "0+", - "about": "The client ID." }, - { "name": "ClientHost", "type": "string", "versions": "0+", - "about": "The client host." }, - { "name": "SubscribedTopicNames", "type": "[]string", "versions": "0+", "entityType": "topicName", - "about": "The subscribed topic names." }, - { "name": "Assignment", "type": "Assignment", "versions": "0+", - "about": "The current assignment." } - ]}, - { "name": "AuthorizedOperations", "type": "int32", "versions": "0+", "default": "-2147483648", - "about": "32-bit bitfield to represent authorized operations for this group." } - ] - } - ], - "commonStructs": [ - { "name": "TopicPartitions", "versions": "0+", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", - "about": "The topic ID." }, - { "name": "TopicName", "type": "string", "versions": "0+", "entityType": "topicName", - "about": "The topic name." }, - { "name": "Partitions", "type": "[]int32", "versions": "0+", - "about": "The partitions." } - ]}, - { "name": "Assignment", "versions": "0+", "fields": [ - { "name": "TopicPartitions", "type": "[]TopicPartitions", "versions": "0+", - "about": "The assigned topic-partitions to the member." } - ]} - ] -} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json b/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json deleted file mode 100644 index 7d28c116454d3..0000000000000 --- a/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json +++ /dev/null @@ -1,39 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 76, - "type": "request", - "listeners": ["broker"], - "name": "ShareGroupHeartbeatRequest", - "validVersions": "0", - "flexibleVersions": "0+", - // The ShareGroupHeartbeatRequest API is added as part of KIP-932 and is still under - // development. Hence, the API is not exposed by default by brokers unless - // explicitly enabled. - "latestVersionUnstable": true, - "fields": [ - { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", - "about": "The group identifier." }, - { "name": "MemberId", "type": "string", "versions": "0+", - "about": "The member ID generated by the coordinator. The member ID must be kept during the entire lifetime of the member." }, - { "name": "MemberEpoch", "type": "int32", "versions": "0+", - "about": "The current member epoch; 0 to join the group; -1 to leave the group." }, - { "name": "RackId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "null if not provided or if it didn't change since the last heartbeat; the rack ID of consumer otherwise." }, - { "name": "SubscribedTopicNames", "type": "[]string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "null if it didn't change since the last heartbeat; the subscribed topic names otherwise." } - ] -} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json b/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json deleted file mode 100644 index e692839f29bf9..0000000000000 --- a/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json +++ /dev/null @@ -1,57 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "apiKey": 76, - "type": "response", - "name": "ShareGroupHeartbeatResponse", - "validVersions": "0", - "flexibleVersions": "0+", - // Supported errors: - // - GROUP_AUTHORIZATION_FAILED (version 0+) - // - NOT_COORDINATOR (version 0+) - // - COORDINATOR_NOT_AVAILABLE (version 0+) - // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) - // - INVALID_REQUEST (version 0+) - // - UNKNOWN_MEMBER_ID (version 0+) - // - GROUP_MAX_SIZE_REACHED (version 0+) - "fields": [ - { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", - "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The top-level error code, or 0 if there was no error" }, - { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The top-level error message, or null if there was no error." }, - { "name": "MemberId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The member ID generated by the coordinator. Only provided when the member joins with MemberEpoch == 0." }, - { "name": "MemberEpoch", "type": "int32", "versions": "0+", - "about": "The member epoch." }, - { "name": "HeartbeatIntervalMs", "type": "int32", "versions": "0+", - "about": "The heartbeat interval in milliseconds." }, - { "name": "Assignment", "type": "Assignment", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "null if not provided; the assignment otherwise.", "fields": [ - { "name": "TopicPartitions", "type": "[]TopicPartitions", "versions": "0+", - "about": "The partitions assigned to the member." } - ]} - ], - "commonStructs": [ - { "name": "TopicPartitions", "versions": "0+", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", - "about": "The topic ID." }, - { "name": "Partitions", "type": "[]int32", "versions": "0+", - "about": "The partitions." } - ]} - ] -} \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java index 82487bd418429..512a7cea76681 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java @@ -20,7 +20,6 @@ import org.apache.kafka.common.ElectionType; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.Node; -import org.apache.kafka.common.ShareGroupState; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.Uuid; @@ -211,14 +210,6 @@ import org.apache.kafka.common.message.SaslAuthenticateResponseData; import org.apache.kafka.common.message.SaslHandshakeRequestData; import org.apache.kafka.common.message.SaslHandshakeResponseData; -import org.apache.kafka.common.message.ShareAcknowledgeRequestData; -import org.apache.kafka.common.message.ShareAcknowledgeResponseData; -import org.apache.kafka.common.message.ShareFetchRequestData; -import org.apache.kafka.common.message.ShareFetchResponseData; -import org.apache.kafka.common.message.ShareGroupDescribeRequestData; -import org.apache.kafka.common.message.ShareGroupDescribeResponseData; -import org.apache.kafka.common.message.ShareGroupHeartbeatRequestData; -import org.apache.kafka.common.message.ShareGroupHeartbeatResponseData; import org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaPartitionState; import org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaTopicState; import org.apache.kafka.common.message.StopReplicaResponseData; @@ -1010,10 +1001,6 @@ public void testErrorCountsIncludesNone() { assertEquals(1, createTxnOffsetCommitResponse().errorCounts().get(Errors.NONE)); assertEquals(1, createUpdateMetadataResponse().errorCounts().get(Errors.NONE)); assertEquals(1, createWriteTxnMarkersResponse().errorCounts().get(Errors.NONE)); - assertEquals(1, createShareGroupHeartbeatResponse().errorCounts().get(Errors.NONE)); - assertEquals(1, createShareGroupDescribeResponse().errorCounts().get(Errors.NONE)); - assertEquals(2, createShareFetchResponse().errorCounts().get(Errors.NONE)); - assertEquals(2, createShareAcknowledgeResponse().errorCounts().get(Errors.NONE)); } private AbstractRequest getRequest(ApiKeys apikey, short version) { @@ -1094,10 +1081,6 @@ private AbstractRequest getRequest(ApiKeys apikey, short version) { case ASSIGN_REPLICAS_TO_DIRS: return createAssignReplicasToDirsRequest(version); case LIST_CLIENT_METRICS_RESOURCES: return createListClientMetricsResourcesRequest(version); case DESCRIBE_TOPIC_PARTITIONS: return createDescribeTopicPartitionsRequest(version); - case SHARE_GROUP_HEARTBEAT: return createShareGroupHeartbeatRequest(version); - case SHARE_GROUP_DESCRIBE: return createShareGroupDescribeRequest(version); - case SHARE_FETCH: return createShareFetchRequest(version); - case SHARE_ACKNOWLEDGE: return createShareAcknowledgeRequest(version); default: throw new IllegalArgumentException("Unknown API key " + apikey); } } @@ -1180,10 +1163,6 @@ private AbstractResponse getResponse(ApiKeys apikey, short version) { case ASSIGN_REPLICAS_TO_DIRS: return createAssignReplicasToDirsResponse(); case LIST_CLIENT_METRICS_RESOURCES: return createListClientMetricsResourcesResponse(); case DESCRIBE_TOPIC_PARTITIONS: return createDescribeTopicPartitionsResponse(); - case SHARE_GROUP_HEARTBEAT: return createShareGroupHeartbeatResponse(); - case SHARE_GROUP_DESCRIBE: return createShareGroupDescribeResponse(); - case SHARE_FETCH: return createShareFetchResponse(); - case SHARE_ACKNOWLEDGE: return createShareAcknowledgeResponse(); default: throw new IllegalArgumentException("Unknown API key " + apikey); } } @@ -1351,114 +1330,6 @@ private ConsumerGroupHeartbeatResponse createConsumerGroupHeartbeatResponse() { return new ConsumerGroupHeartbeatResponse(data); } - private ShareGroupHeartbeatRequest createShareGroupHeartbeatRequest(short version) { - ShareGroupHeartbeatRequestData data = new ShareGroupHeartbeatRequestData() - .setGroupId("group") - .setMemberId("memberid") - .setMemberEpoch(10) - .setRackId("rackid") - .setSubscribedTopicNames(Arrays.asList("foo", "bar")); - return new ShareGroupHeartbeatRequest.Builder(data).build(version); - } - - private ShareGroupHeartbeatResponse createShareGroupHeartbeatResponse() { - ShareGroupHeartbeatResponseData data = new ShareGroupHeartbeatResponseData() - .setErrorCode(Errors.NONE.code()) - .setThrottleTimeMs(1000) - .setMemberId("memberid") - .setMemberEpoch(11) - .setAssignment(new ShareGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Arrays.asList( - new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(Uuid.randomUuid()) - .setPartitions(Arrays.asList(0, 1, 2)), - new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(Uuid.randomUuid()) - .setPartitions(Arrays.asList(3, 4, 5)) - )) - ); - return new ShareGroupHeartbeatResponse(data); - } - - private ShareGroupDescribeRequest createShareGroupDescribeRequest(short version) { - ShareGroupDescribeRequestData data = new ShareGroupDescribeRequestData() - .setGroupIds(Collections.singletonList("group")) - .setIncludeAuthorizedOperations(false); - return new ShareGroupDescribeRequest.Builder(data).build(version); - } - - private ShareGroupDescribeResponse createShareGroupDescribeResponse() { - ShareGroupDescribeResponseData data = new ShareGroupDescribeResponseData() - .setGroups(Collections.singletonList( - new ShareGroupDescribeResponseData.DescribedGroup() - .setGroupId("group") - .setErrorCode((short) 0) - .setErrorMessage(Errors.forCode((short) 0).message()) - .setGroupState(ShareGroupState.EMPTY.toString()) - .setMembers(new ArrayList<>(0)) - )) - .setThrottleTimeMs(1000); - return new ShareGroupDescribeResponse(data); - } - - private ShareFetchRequest createShareFetchRequest(short version) { - ShareFetchRequestData data = new ShareFetchRequestData() - .setGroupId("group") - .setMemberId(Uuid.randomUuid().toString()) - .setTopics(singletonList(new ShareFetchRequestData.FetchTopic() - .setTopicId(Uuid.randomUuid()) - .setPartitions(singletonList(new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0))))); - return new ShareFetchRequest.Builder(data).build(version); - } - - private ShareFetchResponse createShareFetchResponse() { - ShareFetchResponseData data = new ShareFetchResponseData(); - MemoryRecords records = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("blah".getBytes())); - ShareFetchResponseData.PartitionData partition = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code()) - .setRecords(records) - .setAcquiredRecords(singletonList(new ShareFetchResponseData.AcquiredRecords() - .setFirstOffset(0) - .setLastOffset(0) - .setDeliveryCount((short) 1))); - ShareFetchResponseData.ShareFetchableTopicResponse response = new ShareFetchResponseData.ShareFetchableTopicResponse() - .setTopicId(Uuid.randomUuid()) - .setPartitions(singletonList(partition)); - - data.setResponses(singletonList(response)); - data.setThrottleTimeMs(345); - data.setErrorCode(Errors.NONE.code()); - return new ShareFetchResponse(data); - } - - private ShareAcknowledgeRequest createShareAcknowledgeRequest(short version) { - ShareAcknowledgeRequestData data = new ShareAcknowledgeRequestData() - .setMemberId(Uuid.randomUuid().toString()) - .setTopics(singletonList(new ShareAcknowledgeRequestData.AcknowledgeTopic() - .setTopicId(Uuid.randomUuid()) - .setPartitions(singletonList(new ShareAcknowledgeRequestData.AcknowledgePartition() - .setPartitionIndex(0) - .setAcknowledgementBatches(singletonList(new ShareAcknowledgeRequestData.AcknowledgementBatch() - .setFirstOffset(0) - .setLastOffset(0) - .setAcknowledgeTypes(Collections.singletonList((byte) 0)))))))); - return new ShareAcknowledgeRequest.Builder(data).build(version); - } - - private ShareAcknowledgeResponse createShareAcknowledgeResponse() { - ShareAcknowledgeResponseData data = new ShareAcknowledgeResponseData(); - data.setResponses(singletonList(new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse() - .setTopicId(Uuid.randomUuid()) - .setPartitions(singletonList(new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code()))))); - data.setThrottleTimeMs(345); - data.setErrorCode(Errors.NONE.code()); - return new ShareAcknowledgeResponse(data); - } - private ControllerRegistrationRequest createControllerRegistrationRequest(short version) { ControllerRegistrationRequestData data = new ControllerRegistrationRequestData(). setControllerId(3). diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java index 37673ee05577d..9a47a0e7530bb 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java @@ -190,15 +190,15 @@ private HttpResponse httpRequest(HttpClient client, String url, String me "Unexpected status code when handling forwarded request: " + responseCode); } } catch (IOException | InterruptedException | TimeoutException | ExecutionException e) { - log.error("IO error forwarding REST request to {} :", url, e); + log.error("IO error forwarding REST request: ", e); throw new ConnectRestException(Response.Status.INTERNAL_SERVER_ERROR, "IO Error trying to forward REST request: " + e.getMessage(), e); } catch (ConnectRestException e) { // catching any explicitly thrown ConnectRestException-s to preserve its status code // and to avoid getting it overridden by the more generic catch (Throwable) clause down below - log.error("Error forwarding REST request to {} :", url, e); + log.error("Error forwarding REST request", e); throw e; } catch (Throwable t) { - log.error("Error forwarding REST request to {} :", url, t); + log.error("Error forwarding REST request", t); throw new ConnectRestException(Response.Status.INTERNAL_SERVER_ERROR, "Error trying to forward REST request: " + t.getMessage(), t); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java index 9d338936dbbf0..da8e235e42411 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java @@ -47,7 +47,7 @@ public Map config() { return config; } - @JsonProperty("initial_state") + @JsonProperty public InitialState initialState() { return initialState; } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java index 3ec037734f116..6ebac341032a3 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java @@ -61,7 +61,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -160,7 +159,7 @@ public class KafkaConfigBackingStoreMockitoTest { new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(1)), new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(2)) ); - private static final Struct TARGET_STATE_STARTED = new Struct(KafkaConfigBackingStore.TARGET_STATE_V0).put("state", "STARTED"); + private static final Struct TARGET_STATE_PAUSED = new Struct(KafkaConfigBackingStore.TARGET_STATE_V1) .put("state", "PAUSED") .put("state.v2", "PAUSED"); @@ -1185,147 +1184,6 @@ public void testRestoreRestartRequestInconsistentState() { verify(configLog).stop(); } - @Test - public void testPutTaskConfigsZeroTasks() throws Exception { - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); - verify(configLog).start(); - - // Records to be read by consumer as it reads to the end of the log - doAnswer(expectReadToEnd(new LinkedHashMap<>())). - doAnswer(expectReadToEnd(Collections.singletonMap(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) - .when(configLog).readToEnd(); - - expectConvertWriteRead( - COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(0), - "tasks", 0); // We have 0 tasks - - // Bootstrap as if we had already added the connector, but no tasks had been added yet - addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); - - - // Null before writing - ClusterConfigState configState = configStorage.snapshot(); - assertEquals(-1, configState.offset()); - - // Writing task configs should block until all the writes have been performed and the root record update - // has completed - List> taskConfigs = Collections.emptyList(); - configStorage.putTaskConfigs("connector1", taskConfigs); - - // Validate root config by listing all connectors and tasks - configState = configStorage.snapshot(); - assertEquals(1, configState.offset()); - String connectorName = CONNECTOR_IDS.get(0); - assertEquals(Collections.singletonList(connectorName), new ArrayList<>(configState.connectors())); - assertEquals(Collections.emptyList(), configState.tasks(connectorName)); - assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); - - // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks - verify(configUpdateListener).onTaskConfigUpdate(Collections.emptyList()); - - configStorage.stop(); - verify(configLog).stop(); - } - - @Test - public void testBackgroundUpdateTargetState() throws Exception { - // verify that we handle target state changes correctly when they come up through the log - List> existingRecords = Arrays.asList( - new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), - CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); - LinkedHashMap deserializedOnStartup = new LinkedHashMap<>(); - deserializedOnStartup.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); - deserializedOnStartup.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); - deserializedOnStartup.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); - deserializedOnStartup.put(CONFIGS_SERIALIZED.get(3), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); - logOffset = 5; - - expectStart(existingRecords, deserializedOnStartup); - when(configLog.partitionCount()).thenReturn(1); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); - verify(configLog).start(); - - // Should see a single connector with initial state started - ClusterConfigState configState = configStorage.snapshot(); - assertEquals(Collections.singleton(CONNECTOR_IDS.get(0)), configStorage.connectorTargetStates.keySet()); - assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); - - LinkedHashMap serializedAfterStartup = new LinkedHashMap<>(); - serializedAfterStartup.put(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); - serializedAfterStartup.put(TARGET_STATE_KEYS.get(1), CONFIGS_SERIALIZED.get(1)); - doAnswer(expectReadToEnd(serializedAfterStartup)).when(configLog).readToEnd(); - - Map deserializedAfterStartup = new HashMap<>(); - deserializedAfterStartup.put(TARGET_STATE_KEYS.get(0), TARGET_STATE_PAUSED); - deserializedAfterStartup.put(TARGET_STATE_KEYS.get(1), TARGET_STATE_STOPPED); - expectRead(serializedAfterStartup, deserializedAfterStartup); - - // Should see two connectors now, one paused and one stopped - configStorage.refresh(0, TimeUnit.SECONDS); - verify(configUpdateListener).onConnectorTargetStateChange(CONNECTOR_IDS.get(0)); - configState = configStorage.snapshot(); - - assertEquals(new HashSet<>(CONNECTOR_IDS), configStorage.connectorTargetStates.keySet()); - assertEquals(TargetState.PAUSED, configState.targetState(CONNECTOR_IDS.get(0))); - assertEquals(TargetState.STOPPED, configState.targetState(CONNECTOR_IDS.get(1))); - - configStorage.stop(); - verify(configStorage).stop(); - } - - @Test - public void testSameTargetState() throws Exception { - // verify that we handle target state changes correctly when they come up through the log - List> existingRecords = Arrays.asList( - new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), - CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); - LinkedHashMap deserialized = new LinkedHashMap<>(); - deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(3), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); - logOffset = 5; - - expectStart(existingRecords, deserialized); - - when(configLog.partitionCount()).thenReturn(1); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); - verify(configLog).start(); - - ClusterConfigState configState = configStorage.snapshot(); - expectRead(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0), TARGET_STATE_STARTED); - // Should see a single connector with initial state paused - assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); - - expectRead(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0), TARGET_STATE_STARTED); - // on resume update listener shouldn't be called - verify(configUpdateListener, never()).onConnectorTargetStateChange(anyString()); - - configStorage.stop(); - verify(configStorage).stop(); - } - - @Test public void testPutLogLevel() throws Exception { final String logger1 = "org.apache.zookeeper"; @@ -1435,12 +1293,6 @@ private void expectRead(LinkedHashMap serializedValues, } } - private void expectRead(final String key, final byte[] serializedValue, Struct deserializedValue) { - LinkedHashMap serializedData = new LinkedHashMap<>(); - serializedData.put(key, serializedValue); - expectRead(serializedData, Collections.singletonMap(key, deserializedValue)); - } - // This map needs to maintain ordering private Answer> expectReadToEnd(final Map serializedConfigs) { return invocation -> { @@ -1463,11 +1315,4 @@ private Map structToMap(Struct struct) { for (Field field : struct.schema().fields()) result.put(field.name(), struct.get(field)); return result; } - - private void addConnector(String connectorName, Map connectorConfig, List> taskConfigs) { - for (int i = 0; i < taskConfigs.size(); i++) - configStorage.taskConfigs.put(new ConnectorTaskId(connectorName, i), taskConfigs.get(i)); - configStorage.connectorConfigs.put(connectorName, connectorConfig); - configStorage.connectorTaskCounts.put(connectorName, taskConfigs.size()); - } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java index 2e7b388413c55..ae5f82cd3eeb2 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java @@ -28,6 +28,7 @@ import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.SchemaAndValue; import org.apache.kafka.connect.data.Struct; +import org.apache.kafka.connect.runtime.TargetState; import org.apache.kafka.connect.runtime.WorkerConfig; import org.apache.kafka.connect.runtime.distributed.DistributedConfig; import org.apache.kafka.connect.util.Callback; @@ -51,11 +52,13 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import static org.apache.kafka.connect.storage.KafkaConfigBackingStore.INCLUDE_TASKS_FIELD_NAME; @@ -427,6 +430,167 @@ public void testPutTaskConfigsStartsOnlyReconfiguredTasks() throws Exception { PowerMock.verifyAll(); } + @Test + public void testPutTaskConfigsZeroTasks() throws Exception { + expectConfigure(); + expectStart(Collections.emptyList(), Collections.emptyMap()); + + // Task configs should read to end, write to the log, read to end, write root. + expectReadToEnd(new LinkedHashMap<>()); + expectConvertWriteRead( + COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(0), + "tasks", 0); // We have 0 tasks + // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks + configUpdateListener.onTaskConfigUpdate(Collections.emptyList()); + EasyMock.expectLastCall(); + + // Records to be read by consumer as it reads to the end of the log + LinkedHashMap serializedConfigs = new LinkedHashMap<>(); + serializedConfigs.put(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); + expectReadToEnd(serializedConfigs); + + expectPartitionCount(1); + expectStop(); + + PowerMock.replayAll(); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + configStorage.start(); + + // Bootstrap as if we had already added the connector, but no tasks had been added yet + whiteboxAddConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); + + // Null before writing + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(-1, configState.offset()); + + // Writing task configs should block until all the writes have been performed and the root record update + // has completed + List> taskConfigs = Collections.emptyList(); + configStorage.putTaskConfigs("connector1", taskConfigs); + + // Validate root config by listing all connectors and tasks + configState = configStorage.snapshot(); + assertEquals(1, configState.offset()); + String connectorName = CONNECTOR_IDS.get(0); + assertEquals(Collections.singletonList(connectorName), new ArrayList<>(configState.connectors())); + assertEquals(Collections.emptyList(), configState.tasks(connectorName)); + assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); + + configStorage.stop(); + + PowerMock.verifyAll(); + } + + @Test + public void testBackgroundUpdateTargetState() throws Exception { + // verify that we handle target state changes correctly when they come up through the log + + expectConfigure(); + List> existingRecords = Arrays.asList( + new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), + CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); + LinkedHashMap deserializedOnStartup = new LinkedHashMap<>(); + deserializedOnStartup.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); + deserializedOnStartup.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); + deserializedOnStartup.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); + deserializedOnStartup.put(CONFIGS_SERIALIZED.get(3), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); + logOffset = 5; + + expectStart(existingRecords, deserializedOnStartup); + + LinkedHashMap serializedAfterStartup = new LinkedHashMap<>(); + serializedAfterStartup.put(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); + serializedAfterStartup.put(TARGET_STATE_KEYS.get(1), CONFIGS_SERIALIZED.get(1)); + + Map deserializedAfterStartup = new HashMap<>(); + deserializedAfterStartup.put(TARGET_STATE_KEYS.get(0), TARGET_STATE_PAUSED); + deserializedAfterStartup.put(TARGET_STATE_KEYS.get(1), TARGET_STATE_STOPPED); + + expectRead(serializedAfterStartup, deserializedAfterStartup); + + configUpdateListener.onConnectorTargetStateChange(CONNECTOR_IDS.get(0)); + EasyMock.expectLastCall(); + + expectPartitionCount(1); + expectStop(); + + PowerMock.replayAll(); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + configStorage.start(); + + // Should see a single connector with initial state started + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(Collections.singleton(CONNECTOR_IDS.get(0)), configStorage.connectorTargetStates.keySet()); + assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); + + // Should see two connectors now, one paused and one stopped + configStorage.refresh(0, TimeUnit.SECONDS); + configState = configStorage.snapshot(); + assertEquals(new HashSet<>(CONNECTOR_IDS), configStorage.connectorTargetStates.keySet()); + assertEquals(TargetState.PAUSED, configState.targetState(CONNECTOR_IDS.get(0))); + assertEquals(TargetState.STOPPED, configState.targetState(CONNECTOR_IDS.get(1))); + + configStorage.stop(); + + PowerMock.verifyAll(); + } + + @Test + public void testSameTargetState() throws Exception { + // verify that we handle target state changes correctly when they come up through the log + + expectConfigure(); + List> existingRecords = Arrays.asList( + new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), + CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); + LinkedHashMap deserialized = new LinkedHashMap<>(); + deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(3), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); + logOffset = 5; + + expectStart(existingRecords, deserialized); + + // on resume update listener shouldn't be called + configUpdateListener.onConnectorTargetStateChange(EasyMock.anyString()); + EasyMock.expectLastCall().andStubThrow(new AssertionError("unexpected call to onConnectorTargetStateChange")); + + expectRead(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0), TARGET_STATE_STARTED); + + expectPartitionCount(1); + expectStop(); + + PowerMock.replayAll(); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + configStorage.start(); + + // Should see a single connector with initial state paused + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); + + configStorage.refresh(0, TimeUnit.SECONDS); + + configStorage.stop(); + + PowerMock.verifyAll(); + } + private void expectConfigure() throws Exception { PowerMock.expectPrivate(configStorage, "createKafkaBasedLog", EasyMock.capture(capturedTopic), EasyMock.capture(capturedProducerProps), @@ -472,6 +636,12 @@ private void expectRead(LinkedHashMap serializedValues, } } + private void expectRead(final String key, final byte[] serializedValue, Struct deserializedValue) { + LinkedHashMap serializedData = new LinkedHashMap<>(); + serializedData.put(key, serializedValue); + expectRead(serializedData, Collections.singletonMap(key, deserializedValue)); + } + // Expect a conversion & write to the underlying log, followed by a subsequent read when the data is consumed back // from the log. Validate the data that is captured when the conversion is performed matches the specified data // (by checking a single field's value) diff --git a/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java b/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java index 1d422461678f5..6ffd741f4fc64 100644 --- a/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java +++ b/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java @@ -179,7 +179,7 @@ public KafkaApis build() { if (metrics == null) throw new RuntimeException("You must set metrics"); if (quotas == null) throw new RuntimeException("You must set quotas"); if (fetchManager == null) throw new RuntimeException("You must set fetchManager"); - if (brokerTopicStats == null) brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig().enableRemoteStorageSystem()); + if (brokerTopicStats == null) brokerTopicStats = new BrokerTopicStats(config.isRemoteLogStorageSystemEnabled()); if (apiVersionManager == null) throw new RuntimeException("You must set apiVersionManager"); return new KafkaApis(requestChannel, diff --git a/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java b/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java index 5e8cf2dcdc64c..82aa75909abba 100644 --- a/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java +++ b/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java @@ -185,7 +185,7 @@ public ReplicaManager build() { if (metadataCache == null) throw new RuntimeException("You must set metadataCache"); if (logDirFailureChannel == null) throw new RuntimeException("You must set logDirFailureChannel"); if (alterPartitionManager == null) throw new RuntimeException("You must set alterIsrManager"); - if (brokerTopicStats == null) brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig().enableRemoteStorageSystem()); + if (brokerTopicStats == null) brokerTopicStats = new BrokerTopicStats(config.isRemoteLogStorageSystemEnabled()); // Initialize metrics in the end just before passing it to ReplicaManager to ensure ReplicaManager closes the // metrics correctly. There might be a resource leak if it is initialized and an exception occurs between // its initialization and creation of ReplicaManager. diff --git a/core/src/main/scala/kafka/log/LogCleaner.scala b/core/src/main/scala/kafka/log/LogCleaner.scala index 1265e979373cd..0b166c62535e9 100644 --- a/core/src/main/scala/kafka/log/LogCleaner.scala +++ b/core/src/main/scala/kafka/log/LogCleaner.scala @@ -158,21 +158,14 @@ class LogCleaner(initialConfig: CleanerConfig, } } - /** - * Stop the background cleaner threads - */ - private[this] def shutdownCleaners(): Unit = { - info("Shutting down the log cleaner.") - cleaners.foreach(_.shutdown()) - cleaners.clear() - } - /** * Stop the background cleaner threads */ def shutdown(): Unit = { + info("Shutting down the log cleaner.") try { - shutdownCleaners() + cleaners.foreach(_.shutdown()) + cleaners.clear() } finally { removeMetrics() } @@ -227,8 +220,8 @@ class LogCleaner(initialConfig: CleanerConfig, info(s"Updating logCleanerIoMaxBytesPerSecond: $maxIoBytesPerSecond") throttler.updateDesiredRatePerSec(maxIoBytesPerSecond) } - // call shutdownCleaners() instead of shutdown to avoid unnecessary deletion of metrics - shutdownCleaners() + + shutdown() startup() } diff --git a/core/src/main/scala/kafka/log/LogManager.scala b/core/src/main/scala/kafka/log/LogManager.scala index d7599e569ab25..3bc6533117cba 100755 --- a/core/src/main/scala/kafka/log/LogManager.scala +++ b/core/src/main/scala/kafka/log/LogManager.scala @@ -1562,7 +1562,7 @@ object LogManager { keepPartitionMetadataFile: Boolean): LogManager = { val defaultProps = config.extractLogConfigMap - LogConfig.validateBrokerLogConfigValues(defaultProps, config.remoteLogManagerConfig.enableRemoteStorageSystem()) + LogConfig.validateBrokerLogConfigValues(defaultProps, config.isRemoteLogStorageSystemEnabled) val defaultLogConfig = new LogConfig(defaultProps) val cleanerConfig = LogCleaner.cleanerConfig(config) diff --git a/core/src/main/scala/kafka/network/RequestConvertToJson.scala b/core/src/main/scala/kafka/network/RequestConvertToJson.scala index 0900b94ef9f4f..54986f52c85a3 100644 --- a/core/src/main/scala/kafka/network/RequestConvertToJson.scala +++ b/core/src/main/scala/kafka/network/RequestConvertToJson.scala @@ -95,10 +95,6 @@ object RequestConvertToJson { case req: RenewDelegationTokenRequest => RenewDelegationTokenRequestDataJsonConverter.write(req.data, request.version) case req: SaslAuthenticateRequest => SaslAuthenticateRequestDataJsonConverter.write(req.data, request.version) case req: SaslHandshakeRequest => SaslHandshakeRequestDataJsonConverter.write(req.data, request.version) - case req: ShareAcknowledgeRequest => ShareAcknowledgeRequestDataJsonConverter.write(req.data, request.version) - case req: ShareFetchRequest => ShareFetchRequestDataJsonConverter.write(req.data, request.version) - case req: ShareGroupDescribeRequest => ShareGroupDescribeRequestDataJsonConverter.write(req.data, request.version) - case req: ShareGroupHeartbeatRequest => ShareGroupHeartbeatRequestDataJsonConverter.write(req.data, request.version) case req: StopReplicaRequest => StopReplicaRequestDataJsonConverter.write(req.data, request.version) case req: SyncGroupRequest => SyncGroupRequestDataJsonConverter.write(req.data, request.version) case req: TxnOffsetCommitRequest => TxnOffsetCommitRequestDataJsonConverter.write(req.data, request.version) @@ -182,10 +178,6 @@ object RequestConvertToJson { case res: RenewDelegationTokenResponse => RenewDelegationTokenResponseDataJsonConverter.write(res.data, version) case res: SaslAuthenticateResponse => SaslAuthenticateResponseDataJsonConverter.write(res.data, version) case res: SaslHandshakeResponse => SaslHandshakeResponseDataJsonConverter.write(res.data, version) - case res: ShareAcknowledgeResponse => ShareAcknowledgeResponseDataJsonConverter.write(res.data, version) - case res: ShareFetchResponse => ShareFetchResponseDataJsonConverter.write(res.data, version) - case res: ShareGroupDescribeResponse => ShareGroupDescribeResponseDataJsonConverter.write(res.data, version) - case res: ShareGroupHeartbeatResponse => ShareGroupHeartbeatResponseDataJsonConverter.write(res.data, version) case res: StopReplicaResponse => StopReplicaResponseDataJsonConverter.write(res.data, version) case res: SyncGroupResponse => SyncGroupResponseDataJsonConverter.write(res.data, version) case res: TxnOffsetCommitResponse => TxnOffsetCommitResponseDataJsonConverter.write(res.data, version) diff --git a/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala b/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala index 51bc16fb09d17..5f3fdc81887ef 100644 --- a/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala +++ b/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala @@ -264,11 +264,11 @@ class BrokerLifecycleManager( new OfflineDirBrokerFailureEvent(directory)) } - def resendBrokerRegistrationUnlessZkMode(): Unit = { - eventQueue.append(new ResendBrokerRegistrationUnlessZkModeEvent()) + def handleKraftJBODMetadataVersionUpdate(): Unit = { + eventQueue.append(new KraftJBODMetadataVersionUpdateEvent()) } - private class ResendBrokerRegistrationUnlessZkModeEvent extends EventQueue.Event { + private class KraftJBODMetadataVersionUpdateEvent extends EventQueue.Event { override def run(): Unit = { if (!isZkBroker) { registered = false diff --git a/core/src/main/scala/kafka/server/BrokerServer.scala b/core/src/main/scala/kafka/server/BrokerServer.scala index 5e299fc0e02a8..112a03c50a9a4 100644 --- a/core/src/main/scala/kafka/server/BrokerServer.scala +++ b/core/src/main/scala/kafka/server/BrokerServer.scala @@ -37,7 +37,7 @@ import org.apache.kafka.common.utils.{LogContext, Time} import org.apache.kafka.common.{ClusterResource, TopicPartition, Uuid} import org.apache.kafka.coordinator.group.metrics.{GroupCoordinatorMetrics, GroupCoordinatorRuntimeMetrics} import org.apache.kafka.coordinator.group.{CoordinatorRecord, GroupCoordinator, GroupCoordinatorConfig, GroupCoordinatorService, CoordinatorRecordSerde} -import org.apache.kafka.image.publisher.{BrokerRegistrationTracker, MetadataPublisher} +import org.apache.kafka.image.publisher.MetadataPublisher import org.apache.kafka.metadata.{BrokerState, ListenerInfo, VersionRange} import org.apache.kafka.security.CredentialProvider import org.apache.kafka.server.{AssignmentsManager, ClientMetricsManager, NodeToControllerChannelManager} @@ -139,8 +139,6 @@ class BrokerServer( var brokerMetadataPublisher: BrokerMetadataPublisher = _ - var brokerRegistrationTracker: BrokerRegistrationTracker = _ - val brokerFeatures: BrokerFeatures = BrokerFeatures.createDefault(config.unstableFeatureVersionsEnabled) def kafkaYammerMetrics: KafkaYammerMetrics = KafkaYammerMetrics.INSTANCE @@ -186,7 +184,7 @@ class BrokerServer( kafkaScheduler.startup() /* register broker metrics */ - brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig.enableRemoteStorageSystem()) + brokerTopicStats = new BrokerTopicStats(config.isRemoteLogStorageSystemEnabled) quotaManagers = QuotaFactory.instantiate(config, metrics, time, s"broker-${config.nodeId}-") @@ -484,10 +482,6 @@ class BrokerServer( lifecycleManager ) metadataPublishers.add(brokerMetadataPublisher) - brokerRegistrationTracker = new BrokerRegistrationTracker(config.brokerId, - logManager.directoryIdsSet.toList.asJava, - () => lifecycleManager.resendBrokerRegistrationUnlessZkMode()) - metadataPublishers.add(brokerRegistrationTracker) // Register parts of the broker that can be reconfigured via dynamic configs. This needs to // be done before we publish the dynamic configs, so that we don't miss anything. diff --git a/core/src/main/scala/kafka/server/ConfigHandler.scala b/core/src/main/scala/kafka/server/ConfigHandler.scala index ed9260b21947b..1d5702e76e49d 100644 --- a/core/src/main/scala/kafka/server/ConfigHandler.scala +++ b/core/src/main/scala/kafka/server/ConfigHandler.scala @@ -70,7 +70,7 @@ class TopicConfigHandler(private val replicaManager: ReplicaManager, val logs = logManager.logsByTopic(topic) val wasRemoteLogEnabledBeforeUpdate = logs.exists(_.remoteLogEnabled()) - logManager.updateTopicConfig(topic, props, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) + logManager.updateTopicConfig(topic, props, kafkaConfig.isRemoteLogStorageSystemEnabled) maybeBootstrapRemoteLogComponents(topic, logs, wasRemoteLogEnabledBeforeUpdate) } diff --git a/core/src/main/scala/kafka/server/ControllerConfigurationValidator.scala b/core/src/main/scala/kafka/server/ControllerConfigurationValidator.scala index f957b65ddd105..15eb1eff04aa3 100644 --- a/core/src/main/scala/kafka/server/ControllerConfigurationValidator.scala +++ b/core/src/main/scala/kafka/server/ControllerConfigurationValidator.scala @@ -107,8 +107,7 @@ class ControllerConfigurationValidator(kafkaConfig: KafkaConfig) extends Configu throw new InvalidConfigurationException("Null value not supported for topic configs: " + nullTopicConfigs.mkString(",")) } - LogConfig.validate(properties, kafkaConfig.extractLogConfigMap, - kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) + LogConfig.validate(properties, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) case BROKER => validateBrokerName(resource.name()) case CLIENT_METRICS => val properties = new Properties() diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index 94a7b349af927..822310838298c 100755 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -17,7 +17,7 @@ package kafka.server -import java.util +import java.{lang, util} import java.util.concurrent.TimeUnit import java.util.{Collections, Properties} import kafka.cluster.EndPoint @@ -1205,6 +1205,8 @@ class KafkaConfig private(doLog: Boolean, val props: java.util.Map[_, _], dynami def usesTopicId: Boolean = usesSelfManagedQuorum || interBrokerProtocolVersion.isTopicIdsSupported + + val isRemoteLogStorageSystemEnabled: lang.Boolean = getBoolean(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP) def logLocalRetentionBytes: java.lang.Long = getLong(RemoteLogManagerConfig.LOG_LOCAL_RETENTION_BYTES_PROP) def logLocalRetentionMs: java.lang.Long = getLong(RemoteLogManagerConfig.LOG_LOCAL_RETENTION_MS_PROP) diff --git a/core/src/main/scala/kafka/server/KafkaServer.scala b/core/src/main/scala/kafka/server/KafkaServer.scala index 738adab0fb0c1..933a5df536a5f 100755 --- a/core/src/main/scala/kafka/server/KafkaServer.scala +++ b/core/src/main/scala/kafka/server/KafkaServer.scala @@ -276,7 +276,7 @@ class KafkaServer( createCurrentControllerIdMetric() /* register broker metrics */ - _brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig.enableRemoteStorageSystem()) + _brokerTopicStats = new BrokerTopicStats(config.isRemoteLogStorageSystemEnabled) quotaManagers = QuotaFactory.instantiate(config, metrics, time, threadNamePrefix.getOrElse("")) KafkaBroker.notifyClusterListeners(clusterId, kafkaMetricsReporters ++ metrics.reporters.asScala) diff --git a/core/src/main/scala/kafka/server/ReplicaManager.scala b/core/src/main/scala/kafka/server/ReplicaManager.scala index a2a070bcd0331..aa56269a2f40d 100644 --- a/core/src/main/scala/kafka/server/ReplicaManager.scala +++ b/core/src/main/scala/kafka/server/ReplicaManager.scala @@ -33,7 +33,6 @@ import kafka.zk.KafkaZkClient import org.apache.kafka.common.errors._ import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.message.DeleteRecordsResponseData.DeleteRecordsPartitionResult -import org.apache.kafka.common.message.DescribeLogDirsResponseData.DescribeLogDirsTopic import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState import org.apache.kafka.common.message.LeaderAndIsrResponseData.{LeaderAndIsrPartitionError, LeaderAndIsrTopicError} import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopic @@ -68,7 +67,7 @@ import java.util import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.locks.Lock import java.util.concurrent.{CompletableFuture, Future, RejectedExecutionException, TimeUnit} -import java.util.{Collections, Optional, OptionalInt, OptionalLong} +import java.util.{Optional, OptionalInt, OptionalLong} import scala.collection.{Map, Seq, Set, mutable} import scala.compat.java8.OptionConverters._ import scala.jdk.CollectionConverters._ @@ -1250,9 +1249,9 @@ class ReplicaManager(val config: KafkaConfig, val fileStore = Files.getFileStore(file) val totalBytes = adjustForLargeFileSystems(fileStore.getTotalSpace) val usableBytes = adjustForLargeFileSystems(fileStore.getUsableSpace) - val topicInfos = logsByDir.get(absolutePath) match { + logsByDir.get(absolutePath) match { case Some(logs) => - logs.groupBy(_.topicPartition.topic).map { case (topic, logs) => + val topicInfos = logs.groupBy(_.topicPartition.topic).map{case (topic, logs) => new DescribeLogDirsResponseData.DescribeLogDirsTopic().setName(topic).setPartitions( logs.filter { log => partitions.contains(log.topicPartition) @@ -1263,19 +1262,17 @@ class ReplicaManager(val config: KafkaConfig, .setOffsetLag(getLogEndOffsetLag(log.topicPartition, log.logEndOffset, log.isFuture)) .setIsFutureKey(log.isFuture) }.toList.asJava) - }.filterNot(_.partitions().isEmpty).toList.asJava + }.toList.asJava + + new DescribeLogDirsResponseData.DescribeLogDirsResult().setLogDir(absolutePath) + .setErrorCode(Errors.NONE.code).setTopics(topicInfos) + .setTotalBytes(totalBytes).setUsableBytes(usableBytes) case None => - Collections.emptyList[DescribeLogDirsTopic]() + new DescribeLogDirsResponseData.DescribeLogDirsResult().setLogDir(absolutePath) + .setErrorCode(Errors.NONE.code) + .setTotalBytes(totalBytes).setUsableBytes(usableBytes) } - val describeLogDirsResult = new DescribeLogDirsResponseData.DescribeLogDirsResult() - .setLogDir(absolutePath).setTopics(topicInfos) - .setErrorCode(Errors.NONE.code) - .setTotalBytes(totalBytes).setUsableBytes(usableBytes) - if (!topicInfos.isEmpty) - describeLogDirsResult.setTopics(topicInfos) - describeLogDirsResult - } catch { case e: KafkaStorageException => warn("Unable to describe replica dirs for %s".format(absolutePath), e) diff --git a/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala b/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala index ee7bfa2157ee7..048a665757b74 100644 --- a/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala +++ b/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala @@ -29,6 +29,7 @@ import org.apache.kafka.coordinator.group.GroupCoordinator import org.apache.kafka.image.loader.LoaderManifest import org.apache.kafka.image.publisher.MetadataPublisher import org.apache.kafka.image.{MetadataDelta, MetadataImage, TopicDelta} +import org.apache.kafka.server.common.MetadataVersion import org.apache.kafka.server.fault.FaultHandler import java.util.concurrent.CompletableFuture @@ -128,6 +129,21 @@ class BrokerMetadataPublisher( debug(s"Publishing metadata at offset $highestOffsetAndEpoch with $metadataVersionLogMsg.") } + Option(delta.featuresDelta()).foreach { featuresDelta => + featuresDelta.metadataVersionChange().ifPresent{ metadataVersion => + info(s"Updating metadata.version to ${metadataVersion.featureLevel()} at offset $highestOffsetAndEpoch.") + val currentMetadataVersion = delta.image().features().metadataVersion() + if (currentMetadataVersion.isLessThan(MetadataVersion.IBP_3_7_IV2) && metadataVersion.isAtLeast(MetadataVersion.IBP_3_7_IV2)) { + info( + s"""Resending BrokerRegistration with existing incarnation-id to inform the + |controller about log directories in the broker following metadata update: + |previousMetadataVersion: ${delta.image().features().metadataVersion()} + |newMetadataVersion: $metadataVersion""".stripMargin.linesIterator.mkString(" ").trim) + brokerLifecycleManager.handleKraftJBODMetadataVersionUpdate() + } + } + } + // Apply topic deltas. Option(delta.topicsDelta()).foreach { topicsDelta => try { diff --git a/core/src/main/scala/kafka/zk/AdminZkClient.scala b/core/src/main/scala/kafka/zk/AdminZkClient.scala index 604e03c7ed436..efecfe854bbf2 100644 --- a/core/src/main/scala/kafka/zk/AdminZkClient.scala +++ b/core/src/main/scala/kafka/zk/AdminZkClient.scala @@ -163,7 +163,7 @@ class AdminZkClient(zkClient: KafkaZkClient, LogConfig.validate(config, kafkaConfig.map(_.extractLogConfigMap).getOrElse(Collections.emptyMap()), - kafkaConfig.exists(_.remoteLogManagerConfig.enableRemoteStorageSystem())) + kafkaConfig.exists(_.isRemoteLogStorageSystemEnabled)) } private def writeTopicPartitionAssignment(topic: String, replicaAssignment: Map[Int, ReplicaAssignment], @@ -481,7 +481,7 @@ class AdminZkClient(zkClient: KafkaZkClient, // remove the topic overrides LogConfig.validate(configs, kafkaConfig.map(_.extractLogConfigMap).getOrElse(Collections.emptyMap()), - kafkaConfig.exists(_.remoteLogManagerConfig.enableRemoteStorageSystem())) + kafkaConfig.exists(_.isRemoteLogStorageSystemEnabled)) } /** diff --git a/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java b/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java index 50b581fdf4ee5..0ba5d63a8da8a 100644 --- a/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java +++ b/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java @@ -222,7 +222,7 @@ void setUp() throws Exception { props.setProperty(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, "true"); props.setProperty(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP, "100"); remoteLogManagerConfig = createRLMConfig(props); - brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).remoteLogManagerConfig().enableRemoteStorageSystem()); + brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).isRemoteLogStorageSystemEnabled()); remoteLogManager = new RemoteLogManager(remoteLogManagerConfig, brokerId, logDir, clusterId, time, tp -> Optional.of(mockLog), diff --git a/core/src/test/java/kafka/test/junit/ClusterTestExtensionsUnitTest.java b/core/src/test/java/kafka/test/junit/ClusterTestExtensionsUnitTest.java index c0944080547d6..7a1ae920a6f44 100644 --- a/core/src/test/java/kafka/test/junit/ClusterTestExtensionsUnitTest.java +++ b/core/src/test/java/kafka/test/junit/ClusterTestExtensionsUnitTest.java @@ -17,64 +17,29 @@ package kafka.test.junit; -import kafka.test.ClusterConfig; import kafka.test.annotation.ClusterTemplate; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.extension.ExtensionContext; - -import java.lang.reflect.Method; -import java.util.Collections; -import java.util.List; - import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class ClusterTestExtensionsUnitTest { - - static List cfgEmpty() { - return Collections.emptyList(); - } - - @SuppressWarnings({"unchecked", "rawtypes"}) - private ExtensionContext buildExtensionContext(String methodName) throws Exception { - ExtensionContext extensionContext = mock(ExtensionContext.class); - Class clazz = ClusterTestExtensionsUnitTest.class; - Method method = clazz.getDeclaredMethod(methodName); - when(extensionContext.getRequiredTestClass()).thenReturn(clazz); - when(extensionContext.getRequiredTestMethod()).thenReturn(method); - return extensionContext; - } - @Test - void testProcessClusterTemplate() throws Exception { + void testProcessClusterTemplate() { ClusterTestExtensions ext = new ClusterTestExtensions(); - ExtensionContext context = buildExtensionContext("cfgEmpty"); + ExtensionContext context = mock(ExtensionContext.class); ClusterTemplate annot = mock(ClusterTemplate.class); - when(annot.value()).thenReturn("").thenReturn(" ").thenReturn("cfgEmpty"); - - Assertions.assertEquals( - "ClusterTemplate value can't be empty string.", - Assertions.assertThrows(IllegalStateException.class, () -> - ext.processClusterTemplate(context, annot) - ).getMessage() - ); - + when(annot.value()).thenReturn("").thenReturn(" "); - Assertions.assertEquals( - "ClusterTemplate value can't be empty string.", - Assertions.assertThrows(IllegalStateException.class, () -> - ext.processClusterTemplate(context, annot) - ).getMessage() + Assertions.assertThrows(IllegalStateException.class, () -> + ext.processClusterTemplate(context, annot) ); - Assertions.assertEquals( - "ClusterConfig generator method should provide at least one config", - Assertions.assertThrows(IllegalStateException.class, () -> - ext.processClusterTemplate(context, annot) - ).getMessage() + Assertions.assertThrows(IllegalStateException.class, () -> + ext.processClusterTemplate(context, annot) ); } } diff --git a/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala b/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala index b61eb28530ca9..99b1e35e4eed9 100644 --- a/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala @@ -27,7 +27,7 @@ import org.apache.kafka.common.errors.CorruptRecordException import org.apache.kafka.common.record._ import org.apache.kafka.common.utils.Utils import org.apache.kafka.coordinator.transaction.TransactionLogConfigs -import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics} +import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.util.MockTime import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, CleanerConfig, LogAppendInfo, LogConfig, LogDirFailureChannel, LogFileUtils, LogSegment, LogSegments, LogStartOffsetIncrementReason, OffsetMap, ProducerStateManager, ProducerStateManagerConfig} import org.apache.kafka.storage.internals.utils.Throttler @@ -80,6 +80,7 @@ class LogCleanerTest extends Logging { logs = new Pool[TopicPartition, UnifiedLog](), logDirFailureChannel = new LogDirFailureChannel(1), time = time) + val metricsToVerify = new java.util.HashMap[String, java.util.List[java.util.Map[String, String]]]() logCleaner.cleanerManager.gaugeMetricNameWithTag.asScala.foreach { metricNameAndTags => val tags = new java.util.ArrayList[java.util.Map[String, String]]() @@ -119,27 +120,6 @@ class LogCleanerTest extends Logging { } } - @Test - def testMetricsActiveAfterReconfiguration(): Unit = { - val logCleaner = new LogCleaner(new CleanerConfig(true), - logDirs = Array(TestUtils.tempDir()), - logs = new Pool[TopicPartition, UnifiedLog](), - logDirFailureChannel = new LogDirFailureChannel(1), - time = time) - - try { - logCleaner.startup() - var nonexistent = LogCleaner.MetricNames.diff(KafkaYammerMetrics.defaultRegistry.allMetrics().keySet().asScala.map(_.getName)) - assertEquals(0, nonexistent.size, s"$nonexistent should be existent") - - logCleaner.reconfigure(new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181")), - new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181"))) - - nonexistent = LogCleaner.MetricNames.diff(KafkaYammerMetrics.defaultRegistry.allMetrics().keySet().asScala.map(_.getName)) - assertEquals(0, nonexistent.size, s"$nonexistent should be existent") - } finally logCleaner.shutdown() - } - /** * Test simple log cleaning */ diff --git a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala index 2670d6e6f7736..ed91c936edc10 100644 --- a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala @@ -297,7 +297,7 @@ class LogConfigTest { props.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, localRetentionMs.toString) props.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, localRetentionBytes.toString) assertThrows(classOf[ConfigException], - () => LogConfig.validate(props, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) + () => LogConfig.validate(props, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) } @Test @@ -309,17 +309,17 @@ class LogConfigTest { val logProps = new Properties() logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE) logProps.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) + LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, "delete,compact") assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, "compact,delete") assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) } @ParameterizedTest(name = "testEnableRemoteLogStorage with sysRemoteStorageEnabled: {0}") @@ -332,10 +332,10 @@ class LogConfigTest { val logProps = new Properties() logProps.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") if (sysRemoteStorageEnabled) { - LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) + LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) } else { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) assertTrue(message.getMessage.contains("Tiered Storage functionality is disabled in the broker")) } } @@ -355,10 +355,10 @@ class LogConfigTest { logProps.put(TopicConfig.RETENTION_MS_CONFIG, "500") if (sysRemoteStorageEnabled) { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) assertTrue(message.getMessage.contains(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG)) } else { - LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) + LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) } } @@ -377,10 +377,10 @@ class LogConfigTest { logProps.put(TopicConfig.RETENTION_BYTES_CONFIG, "128") if (sysRemoteStorageEnabled) { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) assertTrue(message.getMessage.contains(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG)) } else { - LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) + LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) } } @@ -395,10 +395,10 @@ class LogConfigTest { if (sysRemoteStorageEnabled) { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) + () => LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) assertTrue(message.getMessage.contains(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG)) } else { - LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) + LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) } } diff --git a/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala b/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala index 3a69669d349e5..a7415b5d50a2e 100644 --- a/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala @@ -95,7 +95,7 @@ class ApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVersio @ClusterTemplate("testApiVersionsRequestIncludesUnreleasedApisTemplate") @ClusterTest(types = Array(Type.KRAFT, Type.CO_KRAFT), serverProperties = Array( - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "false"), new ClusterConfigProperty(key = "unstable.feature.versions.enable", value = "true"), )) def testApiVersionsRequestIncludesUnreleasedApis(): Unit = { diff --git a/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala b/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala index b0162dc635842..34f9d139a03cc 100644 --- a/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala @@ -285,7 +285,7 @@ class BrokerLifecycleManagerTest { assertEquals(1000L, manager.brokerEpoch) // Trigger JBOD MV update - manager.resendBrokerRegistrationUnlessZkMode() + manager.handleKraftJBODMetadataVersionUpdate() // Accept new registration, response sets epoch to 1200 nextRegistrationRequest(1200L) diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala index 151ffb9e1847d..6b655ea7837eb 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala @@ -4095,7 +4095,7 @@ class ReplicaManagerTest { val config = new AbstractConfig(RemoteLogManagerConfig.CONFIG_DEF, props) val remoteLogManagerConfig = new RemoteLogManagerConfig(config) val mockLog = mock(classOf[UnifiedLog]) - val brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).remoteLogManagerConfig.enableRemoteStorageSystem()) + val brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).isRemoteLogStorageSystemEnabled) val remoteLogManager = new RemoteLogManager( remoteLogManagerConfig, 0, @@ -4195,7 +4195,7 @@ class ReplicaManagerTest { val config = new AbstractConfig(RemoteLogManagerConfig.CONFIG_DEF, props) val remoteLogManagerConfig = new RemoteLogManagerConfig(config) val dummyLog = mock(classOf[UnifiedLog]) - val brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).remoteLogManagerConfig.enableRemoteStorageSystem()) + val brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).isRemoteLogStorageSystemEnabled) val remoteLogManager = new RemoteLogManager( remoteLogManagerConfig, 0, @@ -6450,39 +6450,6 @@ class ReplicaManagerTest { assertEquals(Errors.NONE.code, response.errorCode) assertTrue(response.totalBytes > 0) assertTrue(response.usableBytes >= 0) - assertFalse(response.topics().isEmpty) - response.topics().forEach(t => assertFalse(t.partitions().isEmpty)) - } - } finally { - replicaManager.shutdown(checkpointHW = false) - } - } - - @Test - def testDescribeLogDirsWithoutAnyPartitionTopic(): Unit = { - val noneTopic = "none-topic" - val topicPartition = 0 - val topicId = Uuid.randomUuid() - val followerBrokerId = 0 - val leaderBrokerId = 1 - val leaderEpoch = 1 - val leaderEpochIncrement = 2 - val countDownLatch = new CountDownLatch(1) - val offsetFromLeader = 5 - - // Prepare the mocked components for the test - val (replicaManager, mockLogMgr) = prepareReplicaManagerAndLogManager(new MockTimer(time), - topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId, leaderBrokerId, countDownLatch, - expectTruncation = false, localLogOffset = Some(10), offsetFromLeader = offsetFromLeader, topicId = Some(topicId)) - - try { - val responses = replicaManager.describeLogDirs(Set(new TopicPartition(noneTopic, topicPartition))) - assertEquals(mockLogMgr.liveLogDirs.size, responses.size) - responses.foreach { response => - assertEquals(Errors.NONE.code, response.errorCode) - assertTrue(response.totalBytes > 0) - assertTrue(response.usableBytes >= 0) - assertTrue(response.topics().isEmpty) } } finally { replicaManager.shutdown(checkpointHW = false) diff --git a/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala b/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala index 97efd9bcf4cc0..456d075f91655 100644 --- a/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala +++ b/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala @@ -704,10 +704,10 @@ class RequestQuotaTest extends BaseRequestTest { new ConsumerGroupDescribeRequest.Builder(new ConsumerGroupDescribeRequestData(), true) case ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS => - new GetTelemetrySubscriptionsRequest.Builder(new GetTelemetrySubscriptionsRequestData()) + new GetTelemetrySubscriptionsRequest.Builder(new GetTelemetrySubscriptionsRequestData(), true) case ApiKeys.PUSH_TELEMETRY => - new PushTelemetryRequest.Builder(new PushTelemetryRequestData()) + new PushTelemetryRequest.Builder(new PushTelemetryRequestData(), true) case ApiKeys.ASSIGN_REPLICAS_TO_DIRS => new AssignReplicasToDirsRequest.Builder(new AssignReplicasToDirsRequestData()) @@ -718,18 +718,6 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.DESCRIBE_TOPIC_PARTITIONS => new DescribeTopicPartitionsRequest.Builder(new DescribeTopicPartitionsRequestData()) - case ApiKeys.SHARE_GROUP_HEARTBEAT => - new ShareGroupHeartbeatRequest.Builder(new ShareGroupHeartbeatRequestData(), true) - - case ApiKeys.SHARE_GROUP_DESCRIBE => - new ShareGroupDescribeRequest.Builder(new ShareGroupDescribeRequestData(), true) - - case ApiKeys.SHARE_FETCH => - new ShareFetchRequest.Builder(new ShareFetchRequestData(), true) - - case ApiKeys.SHARE_ACKNOWLEDGE => - new ShareAcknowledgeRequest.Builder(new ShareAcknowledgeRequestData(), true) - case _ => throw new IllegalArgumentException("Unsupported API key " + apiKey) } diff --git a/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala b/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala index 26f4fb3daee8c..c2926c3b67db9 100644 --- a/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala +++ b/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala @@ -30,6 +30,7 @@ import org.apache.kafka.clients.admin.AlterConfigOp.OpType.SET import org.apache.kafka.clients.admin.{Admin, AlterConfigOp, ConfigEntry, NewTopic} import org.apache.kafka.common.config.ConfigResource import org.apache.kafka.common.config.ConfigResource.Type.BROKER +import org.apache.kafka.common.metadata.FeatureLevelRecord import org.apache.kafka.common.utils.Exit import org.apache.kafka.coordinator.group.GroupCoordinator import org.apache.kafka.image.{MetadataDelta, MetadataImage, MetadataImageTest, MetadataProvenance} @@ -42,7 +43,7 @@ import org.junit.jupiter.api.Assertions.{assertEquals, assertNotNull, assertTrue import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} import org.mockito.ArgumentMatchers.any import org.mockito.Mockito -import org.mockito.Mockito.{doThrow, mock, verify} +import org.mockito.Mockito.{clearInvocations, doThrow, mock, times, verify, verifyNoInteractions} import org.mockito.invocation.InvocationOnMock import org.mockito.stubbing.Answer @@ -220,4 +221,102 @@ class BrokerMetadataPublisherTest { verify(groupCoordinator).onNewMetadataImage(image, delta) } + + @Test + def testMetadataVersionUpdateToIBP_3_7_IV2OrAboveTriggersBrokerReRegistration(): Unit = { + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(0, "")) + val metadataCache = new KRaftMetadataCache(0) + val logManager = mock(classOf[LogManager]) + val replicaManager = mock(classOf[ReplicaManager]) + val groupCoordinator = mock(classOf[GroupCoordinator]) + val faultHandler = mock(classOf[FaultHandler]) + val brokerLifecycleManager = mock(classOf[BrokerLifecycleManager]) + + val metadataPublisher = new BrokerMetadataPublisher( + config, + metadataCache, + logManager, + replicaManager, + groupCoordinator, + mock(classOf[TransactionCoordinator]), + mock(classOf[DynamicConfigPublisher]), + mock(classOf[DynamicClientQuotaPublisher]), + mock(classOf[ScramPublisher]), + mock(classOf[DelegationTokenPublisher]), + mock(classOf[AclPublisher]), + faultHandler, + faultHandler, + brokerLifecycleManager, + ) + + var image = MetadataImage.EMPTY + var delta = new MetadataDelta.Builder() + .setImage(image) + .build() + + // We first upgrade metadata version to 3_6_IV2 + delta.replay(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(MetadataVersion.IBP_3_6_IV2.featureLevel())) + var newImage = delta.apply(new MetadataProvenance(100, 4, 2000)) + + metadataPublisher.onMetadataUpdate(delta, newImage, + LogDeltaManifest.newBuilder() + .provenance(MetadataProvenance.EMPTY) + .leaderAndEpoch(LeaderAndEpoch.UNKNOWN) + .numBatches(1) + .elapsedNs(100) + .numBytes(42) + .build()) + + // This should NOT trigger broker reregistration + verifyNoInteractions(brokerLifecycleManager) + + // We then upgrade to IBP_3_7_IV2 + image = newImage + delta = new MetadataDelta.Builder() + .setImage(image) + .build() + delta.replay(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(MetadataVersion.IBP_3_7_IV2.featureLevel())) + newImage = delta.apply(new MetadataProvenance(100, 4, 2000)) + + metadataPublisher.onMetadataUpdate(delta, newImage, + LogDeltaManifest.newBuilder() + .provenance(MetadataProvenance.EMPTY) + .leaderAndEpoch(LeaderAndEpoch.UNKNOWN) + .numBatches(1) + .elapsedNs(100) + .numBytes(42) + .build()) + + // This SHOULD trigger a broker registration + verify(brokerLifecycleManager, times(1)).handleKraftJBODMetadataVersionUpdate() + clearInvocations(brokerLifecycleManager) + + // Finally upgrade to IBP_3_8_IV0 + image = newImage + delta = new MetadataDelta.Builder() + .setImage(image) + .build() + delta.replay(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(MetadataVersion.IBP_3_8_IV0.featureLevel())) + newImage = delta.apply(new MetadataProvenance(200, 4, 3000)) + + metadataPublisher.onMetadataUpdate(delta, newImage, + LogDeltaManifest.newBuilder() + .provenance(MetadataProvenance.EMPTY) + .leaderAndEpoch(LeaderAndEpoch.UNKNOWN) + .numBatches(1) + .elapsedNs(100) + .numBytes(42) + .build()) + + // This should NOT trigger broker reregistration + verify(brokerLifecycleManager, times(0)).handleKraftJBODMetadataVersionUpdate() + + metadataPublisher.close() + } } diff --git a/docs/security.html b/docs/security.html index e3495f4b5188b..7eb0c2cb346f8 100644 --- a/docs/security.html +++ b/docs/security.html @@ -2267,42 +2267,6 @@

classicGroupMaxSize) { log.info("Cannot downgrade consumer group {} to classic group because its group size is greater than classic group max size.", consumerGroup.groupId()); - return false; } return true; } @@ -1906,28 +1904,24 @@ private Assignment updateTargetAssignment( .withInvertedTargetAssignment(group.invertedTargetAssignment()) .withTopicsImage(metadataImage.topics()) .addOrUpdateMember(updatedMember.memberId(), updatedMember); - + TargetAssignmentBuilder.TargetAssignmentResult assignmentResult; + // A new static member is replacing an older one with the same subscriptions. + // We just need to remove the older member and add the newer one. The new member should + // reuse the target assignment of the older member. if (staticMemberReplaced) { - // A new static member is replacing an older one with the same subscriptions. - // We just need to remove the older member and add the newer one. The new member should - // reuse the target assignment of the older member. - assignmentResultBuilder.removeMember(member.memberId()); + assignmentResult = assignmentResultBuilder + .removeMember(member.memberId()) + .build(); + } else { + assignmentResult = assignmentResultBuilder + .build(); } - TargetAssignmentBuilder.TargetAssignmentResult assignmentResult = - assignmentResultBuilder.build(); - log.info("[GroupId {}] Computed a new target assignment for epoch {} with '{}' assignor: {}.", group.groupId(), groupEpoch, preferredServerAssignor, assignmentResult.targetAssignment()); records.addAll(assignmentResult.records()); - - MemberAssignment newMemberAssignment = assignmentResult.targetAssignment().get(updatedMember.memberId()); - if (newMemberAssignment != null) { - return new Assignment(newMemberAssignment.targetPartitions()); - } else { - return Assignment.EMPTY; - } + return assignmentResult.targetAssignment().get(updatedMember.memberId()); } catch (PartitionAssignorException ex) { String msg = String.format("Failed to compute a new target assignment for epoch %d: %s", groupEpoch, ex.getMessage()); diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilder.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilder.java index daea9938bf45d..57d6039fa0ba8 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilder.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilder.java @@ -64,11 +64,11 @@ public static class TargetAssignmentResult { /** * The new target assignment for the group. */ - private final Map targetAssignment; + private final Map targetAssignment; TargetAssignmentResult( List records, - Map targetAssignment + Map targetAssignment ) { Objects.requireNonNull(records); Objects.requireNonNull(targetAssignment); @@ -86,7 +86,7 @@ public List records() { /** * @return The target assignment. */ - public Map targetAssignment() { + public Map targetAssignment() { return targetAssignment; } } @@ -347,26 +347,38 @@ public TargetAssignmentResult build() throws PartitionAssignorException { // Compute delta from previous to new target assignment and create the // relevant records. List records = new ArrayList<>(); + Map newTargetAssignment = new HashMap<>(); - for (String memberId : memberSpecs.keySet()) { + memberSpecs.keySet().forEach(memberId -> { Assignment oldMemberAssignment = targetAssignment.get(memberId); Assignment newMemberAssignment = newMemberAssignment(newGroupAssignment, memberId); - if (!newMemberAssignment.equals(oldMemberAssignment)) { - // If the member had no assignment or had a different assignment, we - // create a record for the new assignment. + newTargetAssignment.put(memberId, newMemberAssignment); + + if (oldMemberAssignment == null) { + // If the member had no assignment, we always create a record for it. records.add(newTargetAssignmentRecord( groupId, memberId, newMemberAssignment.partitions() )); + } else { + // If the member had an assignment, we only create a record if the + // new assignment is different. + if (!newMemberAssignment.equals(oldMemberAssignment)) { + records.add(newTargetAssignmentRecord( + groupId, + memberId, + newMemberAssignment.partitions() + )); + } } - } + }); // Bump the target assignment epoch. records.add(newTargetAssignmentEpochRecord(groupId, groupEpoch)); - return new TargetAssignmentResult(records, newGroupAssignment.members()); + return new TargetAssignmentResult(records, newTargetAssignment); } private Assignment newMemberAssignment( diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java index abf48fd64158a..3664a7a61d295 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java @@ -13166,49 +13166,6 @@ public void testClassicGroupLeaveToConsumerGroupWithoutValidLeaveGroupMember() { assertEquals(Collections.emptyList(), leaveResult.records()); } - @Test - public void testNoConversionWhenSizeExceedsClassicMaxGroupSize() throws Exception { - String groupId = "group-id"; - String nonClassicMemberId = "1"; - - List protocols = Collections.singletonList( - new ConsumerGroupMemberMetadataValue.ClassicProtocol() - .setName("range") - .setMetadata(new byte[0]) - ); - - ConsumerGroupMember member = new ConsumerGroupMember.Builder(nonClassicMemberId).build(); - ConsumerGroupMember classicMember1 = new ConsumerGroupMember.Builder("2") - .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSupportedProtocols(protocols)) - .build(); - ConsumerGroupMember classicMember2 = new ConsumerGroupMember.Builder("3") - .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSupportedProtocols(protocols)) - .build(); - - GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withClassicGroupMaxSize(1) - .withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.DOWNGRADE) - .withConsumerGroup( - new ConsumerGroupBuilder(groupId, 10) - .withMember(member) - .withMember(classicMember1) - .withMember(classicMember2) - ) - .build(); - - assertEquals(Group.GroupType.CONSUMER, context.groupMetadataManager.group(groupId).type()); - - context.consumerGroupHeartbeat( - new ConsumerGroupHeartbeatRequestData() - .setGroupId(groupId) - .setMemberId(nonClassicMemberId) - .setMemberEpoch(LEAVE_GROUP_MEMBER_EPOCH) - .setRebalanceTimeoutMs(5000) - ); - - assertEquals(Group.GroupType.CONSUMER, context.groupMetadataManager.group(groupId).type()); - } - private static void checkJoinGroupResponse( JoinGroupResponseData expectedResponse, JoinGroupResponseData actualResponse, diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilderTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilderTest.java index e2e572b6bf9f1..d5ba038f31895 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilderTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilderTest.java @@ -337,12 +337,12 @@ public void testAssignmentHasNotChanged() { 20 )), result.records()); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-1", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2, 3), mkTopicAssignment(barTopicId, 1, 2, 3) ))); - expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-2", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 4, 5, 6), mkTopicAssignment(barTopicId, 4, 5, 6) ))); @@ -400,12 +400,12 @@ public void testAssignmentSwapped() { 20 ), result.records().get(2)); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-2", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2, 3), mkTopicAssignment(barTopicId, 1, 2, 3) ))); - expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-1", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 4, 5, 6), mkTopicAssignment(barTopicId, 4, 5, 6) ))); @@ -474,16 +474,16 @@ public void testNewMember() { 20 ), result.records().get(3)); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-1", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) ))); - expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-2", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4), mkTopicAssignment(barTopicId, 3, 4) ))); - expectedAssignment.put("member-3", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-3", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 5, 6), mkTopicAssignment(barTopicId, 5, 6) ))); @@ -561,16 +561,16 @@ public void testUpdateMember() { 20 ), result.records().get(3)); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-1", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) ))); - expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-2", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4), mkTopicAssignment(barTopicId, 3, 4) ))); - expectedAssignment.put("member-3", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-3", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 5, 6), mkTopicAssignment(barTopicId, 5, 6) ))); @@ -639,16 +639,16 @@ public void testPartialAssignmentUpdate() { 20 ), result.records().get(2)); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-1", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) ))); - expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-2", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 3, 4, 5) ))); - expectedAssignment.put("member-3", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-3", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 6), mkTopicAssignment(barTopicId, 6) ))); @@ -713,12 +713,12 @@ public void testDeleteMember() { 20 ), result.records().get(2)); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-1", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2, 3), mkTopicAssignment(barTopicId, 1, 2, 3) ))); - expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-2", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 4, 5, 6), mkTopicAssignment(barTopicId, 4, 5, 6) ))); @@ -788,17 +788,17 @@ public void testReplaceStaticMember() { 20 ), result.records().get(1)); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-1", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) ))); - expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-2", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4), mkTopicAssignment(barTopicId, 3, 4) ))); - expectedAssignment.put("member-3-a", new MemberAssignment(mkAssignment( + expectedAssignment.put("member-3-a", new Assignment(mkAssignment( mkTopicAssignment(fooTopicId, 5, 6), mkTopicAssignment(barTopicId, 5, 6) ))); diff --git a/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java index 8b9c5b19eae4f..0974c31d1b263 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java @@ -408,13 +408,6 @@ public ControllerResult registerBroker( setBrokerEpoch(brokerEpoch). setRack(request.rack()). setEndPoints(listenerInfo.toBrokerRegistrationRecord()); - - if (existing != null && request.incarnationId().equals(existing.incarnationId())) { - log.info("Amending registration of broker {}", request.brokerId()); - record.setFenced(existing.fenced()); - record.setInControlledShutdown(existing.inControlledShutdown()); - } - for (BrokerRegistrationRequestData.Feature feature : request.features()) { record.features().add(processRegistrationFeature(brokerId, finalizedFeatures, feature)); } diff --git a/metadata/src/main/java/org/apache/kafka/image/publisher/BrokerRegistrationTracker.java b/metadata/src/main/java/org/apache/kafka/image/publisher/BrokerRegistrationTracker.java deleted file mode 100644 index 51ac2bdfa4bd3..0000000000000 --- a/metadata/src/main/java/org/apache/kafka/image/publisher/BrokerRegistrationTracker.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.image.publisher; - -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.utils.LogContext; -import org.apache.kafka.image.MetadataDelta; -import org.apache.kafka.image.MetadataImage; -import org.apache.kafka.image.loader.LoaderManifest; -import org.apache.kafka.metadata.BrokerRegistration; -import org.apache.kafka.server.common.MetadataVersion; -import org.slf4j.Logger; - -import java.util.List; - -/** - * Tracks the registration of a specific broker, and executes a callback if it should be refreshed. - * - * This tracker handles cases where we might want to re-register the broker. The only such case - * right now is during the transition from non-JBOD mode, to JBOD mode. In other words, the - * transition from a MetadataVersion less than 3.7-IV2, to one greater than or equal to 3.7-IV2. - * In this case, the broker registration will start out containing no directories, and we need to - * resend the BrokerRegistrationRequest to fix that. - * - * As much as possible, the goal here is to keep things simple. We just compare the desired state - * with the actual state, and try to make changes only if necessary. - */ -public class BrokerRegistrationTracker implements MetadataPublisher { - private final Logger log; - private final int id; - private final Runnable refreshRegistrationCallback; - - /** - * Create the tracker. - * - * @param id The ID of this broker. - * @param targetDirectories The directories managed by this broker. - * @param refreshRegistrationCallback Callback to run if we need to refresh the registration. - */ - public BrokerRegistrationTracker( - int id, - List targetDirectories, - Runnable refreshRegistrationCallback - ) { - this.log = new LogContext("[BrokerRegistrationTracker id=" + id + "] "). - logger(BrokerRegistrationTracker.class); - this.id = id; - this.refreshRegistrationCallback = refreshRegistrationCallback; - } - - @Override - public String name() { - return "BrokerRegistrationTracker(id=" + id + ")"; - } - - @Override - public void onMetadataUpdate( - MetadataDelta delta, - MetadataImage newImage, - LoaderManifest manifest - ) { - boolean checkBrokerRegistration = false; - if (delta.featuresDelta() != null) { - if (delta.metadataVersionChanged().isPresent()) { - if (log.isTraceEnabled()) { - log.trace("Metadata version change is present: {}", - delta.metadataVersionChanged()); - } - checkBrokerRegistration = true; - } - } - if (delta.clusterDelta() != null) { - if (delta.clusterDelta().changedBrokers().get(id) != null) { - if (log.isTraceEnabled()) { - log.trace("Broker change is present: {}", - delta.clusterDelta().changedBrokers().get(id)); - } - checkBrokerRegistration = true; - } - } - if (checkBrokerRegistration) { - if (brokerRegistrationNeedsRefresh(newImage.features().metadataVersion(), - delta.clusterDelta().broker(id))) { - refreshRegistrationCallback.run(); - } - } - } - - /** - * Check if the current broker registration needs to be refreshed. - * - * @param metadataVersion The current metadata version. - * @param registration The current broker registration, or null if there is none. - * @return True only if we should refresh. - */ - boolean brokerRegistrationNeedsRefresh( - MetadataVersion metadataVersion, - BrokerRegistration registration - ) { - // If there is no existing registration, the BrokerLifecycleManager must still be sending it. - // So we don't need to do anything yet. - if (registration == null) { - log.debug("No current broker registration to check."); - return false; - } - // Check to see if the directory list has changed. Note that this check could certainly be - // triggered spuriously. For example, if the broker's directory list has been changed in the - // past, and we are in the process of replaying that change log, we will end up here. - // That's fine because resending the broker registration does not cause any problems. And, - // of course, as soon as a snapshot is made, we will no longer need to worry about those - // old metadata log entries being replayed on startup. - if (metadataVersion.isAtLeast(MetadataVersion.IBP_3_7_IV2) && - registration.directories().isEmpty()) { - log.info("Current directory set is empty, but MV supports JBOD. Resending " + - "broker registration."); - return true; - } - log.debug("Broker registration does not need to be resent."); - return false; - } -} diff --git a/metadata/src/test/java/org/apache/kafka/image/publisher/BrokerRegistrationTrackerTest.java b/metadata/src/test/java/org/apache/kafka/image/publisher/BrokerRegistrationTrackerTest.java deleted file mode 100644 index 855a96cd8aaf3..0000000000000 --- a/metadata/src/test/java/org/apache/kafka/image/publisher/BrokerRegistrationTrackerTest.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.image.publisher; - -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.metadata.FeatureLevelRecord; -import org.apache.kafka.common.metadata.RegisterBrokerRecord; -import org.apache.kafka.image.MetadataDelta; -import org.apache.kafka.image.MetadataImage; -import org.apache.kafka.image.MetadataProvenance; -import org.apache.kafka.image.loader.LogDeltaManifest; -import org.apache.kafka.raft.LeaderAndEpoch; -import org.apache.kafka.server.common.MetadataVersion; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; - -import java.util.Arrays; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.junit.jupiter.api.Assertions.assertEquals; - -@Timeout(value = 40) -public class BrokerRegistrationTrackerTest { - static final Uuid INCARNATION_ID = Uuid.fromString("jyjLbk31Tpa53pFrU9Y-Ng"); - - static final Uuid A = Uuid.fromString("Ahw3vXfnThqeZbb7HD1w6Q"); - - static final Uuid B = Uuid.fromString("BjOacT0OTNqIvUWIlKhahg"); - - static final Uuid C = Uuid.fromString("CVHi_iv2Rvy5_1rtPdasfg"); - - static class BrokerRegistrationTrackerTestContext { - AtomicInteger numCalls = new AtomicInteger(0); - BrokerRegistrationTracker tracker = new BrokerRegistrationTracker(1, - Arrays.asList(B, A), () -> numCalls.incrementAndGet()); - - MetadataImage image = MetadataImage.EMPTY; - - void onMetadataUpdate(MetadataDelta delta) { - MetadataProvenance provenance = new MetadataProvenance(0, 0, 0); - image = delta.apply(provenance); - LogDeltaManifest manifest = new LogDeltaManifest.Builder(). - provenance(provenance). - leaderAndEpoch(LeaderAndEpoch.UNKNOWN). - numBatches(1). - elapsedNs(1). - numBytes(1). - build(); - tracker.onMetadataUpdate(delta, image, manifest); - } - - MetadataDelta newDelta() { - return new MetadataDelta.Builder(). - setImage(image). - build(); - } - } - - @Test - public void testTrackerName() { - BrokerRegistrationTrackerTestContext ctx = new BrokerRegistrationTrackerTestContext(); - assertEquals("BrokerRegistrationTracker(id=1)", ctx.tracker.name()); - } - - @Test - public void testMetadataVersionUpdateWithoutRegistrationDoesNothing() { - BrokerRegistrationTrackerTestContext ctx = new BrokerRegistrationTrackerTestContext(); - MetadataDelta delta = ctx.newDelta(); - delta.replay(new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(MetadataVersion.IBP_3_7_IV2.featureLevel())); - ctx.onMetadataUpdate(delta); - assertEquals(0, ctx.numCalls.get()); - } - - @Test - public void testBrokerUpdateWithoutNewMvDoesNothing() { - BrokerRegistrationTrackerTestContext ctx = new BrokerRegistrationTrackerTestContext(); - MetadataDelta delta = ctx.newDelta(); - delta.replay(new RegisterBrokerRecord(). - setBrokerId(1). - setIncarnationId(INCARNATION_ID). - setLogDirs(Arrays.asList(A, B, C))); - ctx.onMetadataUpdate(delta); - assertEquals(0, ctx.numCalls.get()); - } - - @ParameterizedTest - @ValueSource(booleans = {false, true}) - public void testBrokerUpdateWithNewMv(boolean jbodMv) { - BrokerRegistrationTrackerTestContext ctx = new BrokerRegistrationTrackerTestContext(); - MetadataDelta delta = ctx.newDelta(); - delta.replay(new RegisterBrokerRecord(). - setBrokerId(1). - setIncarnationId(INCARNATION_ID). - setLogDirs(Arrays.asList())); - delta.replay(new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(jbodMv ? MetadataVersion.IBP_3_7_IV2.featureLevel() : - MetadataVersion.IBP_3_7_IV1.featureLevel())); - ctx.onMetadataUpdate(delta); - if (jbodMv) { - assertEquals(1, ctx.numCalls.get()); - } else { - assertEquals(0, ctx.numCalls.get()); - } - } - - @ParameterizedTest - @ValueSource(booleans = {false, true}) - public void testBrokerUpdateWithNewMvWithTwoDeltas(boolean jbodMv) { - BrokerRegistrationTrackerTestContext ctx = new BrokerRegistrationTrackerTestContext(); - MetadataDelta delta = ctx.newDelta(); - delta.replay(new RegisterBrokerRecord(). - setBrokerId(1). - setIncarnationId(INCARNATION_ID). - setLogDirs(Arrays.asList())); - ctx.onMetadataUpdate(delta); - // No calls are made because MetadataVersion is 3.0-IV1 initially - assertEquals(0, ctx.numCalls.get()); - - delta = ctx.newDelta(); - delta.replay(new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(jbodMv ? MetadataVersion.IBP_3_7_IV2.featureLevel() : - MetadataVersion.IBP_3_7_IV1.featureLevel())); - ctx.onMetadataUpdate(delta); - if (jbodMv) { - assertEquals(1, ctx.numCalls.get()); - } else { - assertEquals(0, ctx.numCalls.get()); - } - } -} diff --git a/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java b/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java index d6cf615c781b3..6ea752886a992 100644 --- a/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java +++ b/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java @@ -100,16 +100,6 @@ public final class RemoteLogManagerConfig { "segments, fetch remote log indexes and clean up remote log segments."; public static final int DEFAULT_REMOTE_LOG_MANAGER_THREAD_POOL_SIZE = 10; - public static final String REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP = "remote.log.manager.copier.thread.pool.size"; - public static final String REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_DOC = "Size of the thread pool used in " + - "scheduling tasks to copy segments."; - public static final int DEFAULT_REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE = 10; - - public static final String REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP = "remote.log.manager.expiration.thread.pool.size"; - public static final String REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_DOC = "Size of the thread pool used in" + - " scheduling tasks to clean up remote log segments."; - public static final int DEFAULT_REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE = 10; - public static final String REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP = "remote.log.manager.task.interval.ms"; public static final String REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_DOC = "Interval at which remote log manager runs the scheduled tasks like copy " + "segments, and clean up remote log segments."; @@ -251,18 +241,6 @@ public final class RemoteLogManagerConfig { atLeast(1), MEDIUM, REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_DOC) - .defineInternal(REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP, - INT, - DEFAULT_REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE, - atLeast(1), - MEDIUM, - REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_DOC) - .defineInternal(REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP, - INT, - DEFAULT_REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE, - atLeast(1), - MEDIUM, - REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_DOC) .define(REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP, LONG, DEFAULT_REMOTE_LOG_MANAGER_TASK_INTERVAL_MS, @@ -355,8 +333,6 @@ public final class RemoteLogManagerConfig { private final String remoteLogMetadataManagerClassPath; private final long remoteLogIndexFileCacheTotalSizeBytes; private final int remoteLogManagerThreadPoolSize; - private final int remoteLogManagerCopierThreadPoolSize; - private final int remoteLogManagerExpirationThreadPoolSize; private final long remoteLogManagerTaskIntervalMs; private final long remoteLogManagerTaskRetryBackoffMs; private final long remoteLogManagerTaskRetryBackoffMaxMs; @@ -385,8 +361,6 @@ public RemoteLogManagerConfig(AbstractConfig config) { config.getString(REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_PROP), config.getLong(REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_PROP), config.getInt(REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_PROP), - config.getInt(REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP), - config.getInt(REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP), config.getLong(REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP), config.getLong(REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MS_PROP), config.getLong(REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MAX_MS_PROP), @@ -419,8 +393,6 @@ public RemoteLogManagerConfig(boolean enableRemoteStorageSystem, String remoteLogMetadataManagerListenerName, long remoteLogIndexFileCacheTotalSizeBytes, int remoteLogManagerThreadPoolSize, - int remoteLogManagerCopierThreadPoolSize, - int remoteLogManagerExpirationThreadPoolSize, long remoteLogManagerTaskIntervalMs, long remoteLogManagerTaskRetryBackoffMs, long remoteLogManagerTaskRetryBackoffMaxMs, @@ -446,8 +418,6 @@ public RemoteLogManagerConfig(boolean enableRemoteStorageSystem, this.remoteLogMetadataManagerClassPath = remoteLogMetadataManagerClassPath; this.remoteLogIndexFileCacheTotalSizeBytes = remoteLogIndexFileCacheTotalSizeBytes; this.remoteLogManagerThreadPoolSize = remoteLogManagerThreadPoolSize; - this.remoteLogManagerCopierThreadPoolSize = remoteLogManagerCopierThreadPoolSize; - this.remoteLogManagerExpirationThreadPoolSize = remoteLogManagerExpirationThreadPoolSize; this.remoteLogManagerTaskIntervalMs = remoteLogManagerTaskIntervalMs; this.remoteLogManagerTaskRetryBackoffMs = remoteLogManagerTaskRetryBackoffMs; this.remoteLogManagerTaskRetryBackoffMaxMs = remoteLogManagerTaskRetryBackoffMaxMs; @@ -496,14 +466,6 @@ public int remoteLogManagerThreadPoolSize() { return remoteLogManagerThreadPoolSize; } - public int remoteLogManagerCopierThreadPoolSize() { - return remoteLogManagerCopierThreadPoolSize; - } - - public int remoteLogManagerExpirationThreadPoolSize() { - return remoteLogManagerExpirationThreadPoolSize; - } - public long remoteLogManagerTaskIntervalMs() { return remoteLogManagerTaskIntervalMs; } diff --git a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerHarness.java b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerHarness.java index 7af78e750a84f..a063fa8820a82 100644 --- a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerHarness.java +++ b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerHarness.java @@ -53,6 +53,12 @@ public void initialize(Set topicIdPartitions, initializeRemoteLogMetadataManager(topicIdPartitions, startConsumerThread, RemoteLogMetadataTopicPartitioner::new, remotePartitionMetadataStoreSupplier); } + public void initializeRemoteLogMetadataManager(Set topicIdPartitions, + boolean startConsumerThread, + Function remoteLogMetadataTopicPartitioner) { + initializeRemoteLogMetadataManager(topicIdPartitions, startConsumerThread, remoteLogMetadataTopicPartitioner, RemotePartitionMetadataStore::new); + } + public void initializeRemoteLogMetadataManager(Set topicIdPartitions, boolean startConsumerThread, Function remoteLogMetadataTopicPartitioner, @@ -64,7 +70,6 @@ public void initializeRemoteLogMetadataManager(Set topicIdPart .startConsumerThread(startConsumerThread) .remoteLogMetadataTopicPartitioner(remoteLogMetadataTopicPartitioner) .remotePartitionMetadataStore(remotePartitionMetadataStoreSupplier) - .overrideRemoteLogMetadataManagerProps(overrideRemoteLogMetadataManagerProps()) .build(); } diff --git a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerRestartTest.java b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerRestartTest.java index 84b98dcb5be1d..c599259ed9416 100644 --- a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerRestartTest.java +++ b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerRestartTest.java @@ -16,11 +16,6 @@ */ package org.apache.kafka.server.log.remote.metadata.storage; -import kafka.test.ClusterInstance; -import kafka.test.annotation.ClusterTest; -import kafka.test.junit.ClusterTestExtensions; -import org.apache.kafka.clients.admin.Admin; -import org.apache.kafka.clients.admin.NewTopic; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; @@ -29,99 +24,139 @@ import org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId; import org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata; import org.apache.kafka.test.TestUtils; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import scala.collection.JavaConverters; +import scala.collection.Seq; +import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import static org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManagerConfig.LOG_DIR; -@ExtendWith(value = ClusterTestExtensions.class) -@Tag("integration") +@SuppressWarnings("deprecation") // Added for Scala 2.12 compatibility for usages of JavaConverters public class TopicBasedRemoteLogMetadataManagerRestartTest { private static final int SEG_SIZE = 1024 * 1024; private final Time time = new MockTime(1); private final String logDir = TestUtils.tempDirectory("_rlmm_segs_").getAbsolutePath(); - private final ClusterInstance clusterInstance; - TopicBasedRemoteLogMetadataManagerRestartTest(ClusterInstance clusterInstance) { // Constructor injections - this.clusterInstance = clusterInstance; + private TopicBasedRemoteLogMetadataManagerHarness remoteLogMetadataManagerHarness; + + @BeforeEach + public void setup() { + // Start the cluster and initialize TopicBasedRemoteLogMetadataManager. + remoteLogMetadataManagerHarness = new TopicBasedRemoteLogMetadataManagerHarness() { + protected Map overrideRemoteLogMetadataManagerProps() { + Map props = new HashMap<>(); + props.put(LOG_DIR, logDir); + return props; + } + }; + remoteLogMetadataManagerHarness.initialize(Collections.emptySet(), true); } - private TopicBasedRemoteLogMetadataManager createTopicBasedRemoteLogMetadataManager() { - return RemoteLogMetadataManagerTestUtils.builder() - .topicIdPartitions(Collections.emptySet()) - .bootstrapServers(clusterInstance.bootstrapServers()) - .startConsumerThread(true) - .remoteLogMetadataTopicPartitioner(RemoteLogMetadataTopicPartitioner::new) - .overrideRemoteLogMetadataManagerProps(Collections.singletonMap(LOG_DIR, logDir)) - .build(); + private void startTopicBasedRemoteLogMetadataManagerHarness(boolean startConsumerThread) { + remoteLogMetadataManagerHarness.initializeRemoteLogMetadataManager(Collections.emptySet(), startConsumerThread, RemoteLogMetadataTopicPartitioner::new); } - @ClusterTest(brokers = 3) + @AfterEach + public void teardown() throws IOException { + if (remoteLogMetadataManagerHarness != null) { + remoteLogMetadataManagerHarness.close(); + } + } + + private void stopTopicBasedRemoteLogMetadataManagerHarness() { + remoteLogMetadataManagerHarness.closeRemoteLogMetadataManager(); + } + + private TopicBasedRemoteLogMetadataManager topicBasedRlmm() { + return remoteLogMetadataManagerHarness.remoteLogMetadataManager(); + } + + @Test public void testRLMMAPIsAfterRestart() throws Exception { // Create topics. String leaderTopic = "new-leader"; + HashMap> assignedLeaderTopicReplicas = new HashMap<>(); + List leaderTopicReplicas = new ArrayList<>(); + // Set broker id 0 as the first entry which is taken as the leader. + leaderTopicReplicas.add(0); + leaderTopicReplicas.add(1); + leaderTopicReplicas.add(2); + assignedLeaderTopicReplicas.put(0, JavaConverters.asScalaBuffer(leaderTopicReplicas)); + remoteLogMetadataManagerHarness.createTopicWithAssignment( + leaderTopic, JavaConverters.mapAsScalaMap(assignedLeaderTopicReplicas), + remoteLogMetadataManagerHarness.listenerName()); + String followerTopic = "new-follower"; - try (Admin admin = clusterInstance.createAdminClient()) { - // Set broker id 0 as the first entry which is taken as the leader. - NewTopic newLeaderTopic = new NewTopic(leaderTopic, Collections.singletonMap(0, Arrays.asList(0, 1, 2))); - // Set broker id 1 as the first entry which is taken as the leader. - NewTopic newFollowerTopic = new NewTopic(followerTopic, Collections.singletonMap(0, Arrays.asList(1, 2, 0))); - admin.createTopics(Arrays.asList(newLeaderTopic, newFollowerTopic)).all().get(); - } - clusterInstance.waitForTopic(leaderTopic, 1); - clusterInstance.waitForTopic(followerTopic, 1); + HashMap> assignedFollowerTopicReplicas = new HashMap<>(); + List followerTopicReplicas = new ArrayList<>(); + // Set broker id 1 as the first entry which is taken as the leader. + followerTopicReplicas.add(1); + followerTopicReplicas.add(2); + followerTopicReplicas.add(0); + assignedFollowerTopicReplicas.put(0, JavaConverters.asScalaBuffer(followerTopicReplicas)); + remoteLogMetadataManagerHarness.createTopicWithAssignment(followerTopic, + JavaConverters.mapAsScalaMap(assignedFollowerTopicReplicas), + remoteLogMetadataManagerHarness.listenerName()); final TopicIdPartition leaderTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(leaderTopic, 0)); final TopicIdPartition followerTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(followerTopic, 0)); + + // Register these partitions to RLMM. + topicBasedRlmm().onPartitionLeadershipChanges(Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition)); + + // Add segments for these partitions, but they are not available as they have not yet been subscribed. RemoteLogSegmentMetadata leaderSegmentMetadata = new RemoteLogSegmentMetadata( new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L)); + topicBasedRlmm().addRemoteLogSegmentMetadata(leaderSegmentMetadata).get(); + RemoteLogSegmentMetadata followerSegmentMetadata = new RemoteLogSegmentMetadata( new RemoteLogSegmentId(followerTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L)); + topicBasedRlmm().addRemoteLogSegmentMetadata(followerSegmentMetadata).get(); - try (TopicBasedRemoteLogMetadataManager topicBasedRemoteLogMetadataManager = createTopicBasedRemoteLogMetadataManager()) { - // Register these partitions to RemoteLogMetadataManager. - topicBasedRemoteLogMetadataManager.onPartitionLeadershipChanges( - Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition)); + // Stop TopicBasedRemoteLogMetadataManager only. + stopTopicBasedRemoteLogMetadataManagerHarness(); - // Add segments for these partitions, but they are not available as they have not yet been subscribed. - topicBasedRemoteLogMetadataManager.addRemoteLogSegmentMetadata(leaderSegmentMetadata).get(); - topicBasedRemoteLogMetadataManager.addRemoteLogSegmentMetadata(followerSegmentMetadata).get(); - } + // Start TopicBasedRemoteLogMetadataManager + startTopicBasedRemoteLogMetadataManagerHarness(true); - try (TopicBasedRemoteLogMetadataManager topicBasedRemoteLogMetadataManager = createTopicBasedRemoteLogMetadataManager()) { - // Register these partitions to RemoteLogMetadataManager, which loads the respective metadata snapshots. - topicBasedRemoteLogMetadataManager.onPartitionLeadershipChanges( - Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition)); - - // Check for the stored entries from the earlier run. - TestUtils.waitForCondition(() -> - TestUtils.sameElementsWithoutOrder(Collections.singleton(leaderSegmentMetadata).iterator(), - topicBasedRemoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition)), - "Remote log segment metadata not available"); - TestUtils.waitForCondition(() -> - TestUtils.sameElementsWithoutOrder(Collections.singleton(followerSegmentMetadata).iterator(), - topicBasedRemoteLogMetadataManager.listRemoteLogSegments(followerTopicIdPartition)), - "Remote log segment metadata not available"); - // Add one more segment - RemoteLogSegmentMetadata leaderSegmentMetadata2 = new RemoteLogSegmentMetadata( - new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), - 101, 200, -1L, 0, - time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 101L)); - topicBasedRemoteLogMetadataManager.addRemoteLogSegmentMetadata(leaderSegmentMetadata2).get(); - - // Check that both the stored segment and recently added segment are available. - Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Arrays.asList(leaderSegmentMetadata, leaderSegmentMetadata2).iterator(), - topicBasedRemoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition))); - } + // Register these partitions to RLMM, which loads the respective metadata snapshots. + topicBasedRlmm().onPartitionLeadershipChanges( + Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition)); + + // Check for the stored entries from the earlier run. + TestUtils.waitForCondition(() -> + TestUtils.sameElementsWithoutOrder(Collections.singleton(leaderSegmentMetadata).iterator(), + topicBasedRlmm().listRemoteLogSegments(leaderTopicIdPartition)), + "Remote log segment metadata not available"); + TestUtils.waitForCondition(() -> + TestUtils.sameElementsWithoutOrder(Collections.singleton(followerSegmentMetadata).iterator(), + topicBasedRlmm().listRemoteLogSegments(followerTopicIdPartition)), + "Remote log segment metadata not available"); + // Add one more segment + RemoteLogSegmentMetadata leaderSegmentMetadata2 = new RemoteLogSegmentMetadata( + new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), + 101, 200, -1L, 0, + time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 101L)); + topicBasedRlmm().addRemoteLogSegmentMetadata(leaderSegmentMetadata2).get(); + + // Check that both the stored segment and recently added segment are available. + Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Arrays.asList(leaderSegmentMetadata, leaderSegmentMetadata2).iterator(), + topicBasedRlmm().listRemoteLogSegments(leaderTopicIdPartition))); } -} +} \ No newline at end of file diff --git a/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfigTest.java b/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfigTest.java index 4e3c2fc26cb66..45fd6669e7d4f 100644 --- a/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfigTest.java +++ b/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfigTest.java @@ -47,7 +47,7 @@ public void testValidConfigs(boolean useDefaultRemoteLogMetadataManagerClass) { RemoteLogManagerConfig expectedRemoteLogManagerConfig = new RemoteLogManagerConfig(true, "dummy.remote.storage.class", "dummy.remote.storage.class.path", remoteLogMetadataManagerClass, "dummy.remote.log.metadata.class.path", - "listener.name", 1024 * 1024L, 1, 1, 1, 60000L, 100L, 60000L, 0.3, 10, 100, 100, + "listener.name", 1024 * 1024L, 1, 60000L, 100L, 60000L, 0.3, 10, 100, 100, rsmPrefix, rsmProps, rlmmPrefix, rlmmProps, Long.MAX_VALUE, 11, 1, Long.MAX_VALUE, 11, 1); @@ -81,10 +81,6 @@ private Map extractProps(RemoteLogManagerConfig remoteLogManager remoteLogManagerConfig.remoteLogIndexFileCacheTotalSizeBytes()); props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_PROP, remoteLogManagerConfig.remoteLogManagerThreadPoolSize()); - props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP, - remoteLogManagerConfig.remoteLogManagerCopierThreadPoolSize()); - props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP, - remoteLogManagerConfig.remoteLogManagerExpirationThreadPoolSize()); props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP, remoteLogManagerConfig.remoteLogManagerTaskIntervalMs()); props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MS_PROP, diff --git a/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommandTestUtils.java b/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommandTestUtils.java index e5177ddaead2c..5ea53a1c38269 100644 --- a/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommandTestUtils.java +++ b/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommandTestUtils.java @@ -43,37 +43,11 @@ import static kafka.test.annotation.Type.CO_KRAFT; import static kafka.test.annotation.Type.KRAFT; -import static kafka.test.annotation.Type.ZK; import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG; import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG; import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG; import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG; -/** - * The old test framework {@link kafka.api.BaseConsumerTest#getTestQuorumAndGroupProtocolParametersAll} test for the following cases: - *
    - *
  • (ZK / KRAFT servers) with (group.coordinator.new.enable=false) with (classic group protocol) = 2 cases
  • - *
  • (KRAFT server) with (group.coordinator.new.enable=true) with (classic group protocol) = 1 case
  • - *
  • (KRAFT server) with (group.coordinator.new.enable=true) with (consumer group protocol) = 1 case
  • - *
- *

- * The new test framework run seven cases for the following cases: - *

    - *
  • (ZK / KRAFT / CO_KRAFT servers) with (group.coordinator.new.enable=false) with (classic group protocol) = 3 cases
  • - *
  • (KRAFT / CO_KRAFT servers) with (group.coordinator.new.enable=true) with (classic group protocol) = 2 cases
  • - *
  • (KRAFT / CO_KRAFT servers) with (group.coordinator.new.enable=true) with (consumer group protocol) = 2 cases
  • - *
- *

- * We can reduce the number of cases as same as the old test framework by using the following methods: - *

    - *
  • {@link #forConsumerGroupCoordinator} for the case of (consumer group protocol)
  • - *
  • (CO_KRAFT servers) with (group.coordinator.new.enable=true) with (classic / consumer group protocols) = 2 cases
  • - *
- *
    - *
  • {@link #forClassicGroupCoordinator} for the case of (classic group protocol)
  • - *
  • (ZK / KRAFT servers) with (group.coordinator.new.enable=false) with (classic group protocol) = 2 cases
  • - *
- */ class ConsumerGroupCommandTestUtils { private ConsumerGroupCommandTestUtils() { @@ -92,8 +66,8 @@ static List forConsumerGroupCoordinator() { serverProperties.put(GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, "classic,consumer"); return Collections.singletonList(ClusterConfig.defaultBuilder() + .setTypes(Stream.of(KRAFT, CO_KRAFT).collect(Collectors.toSet())) .setFeatures(Collections.singletonMap(Features.GROUP_VERSION, GroupVersion.GV_1.featureLevel())) - .setTypes(Collections.singleton(CO_KRAFT)) .setServerProperties(serverProperties) .setTags(Collections.singletonList("consumerGroupCoordinator")) .build()); @@ -106,7 +80,6 @@ static List forClassicGroupCoordinator() { serverProperties.put(NEW_GROUP_COORDINATOR_ENABLE_CONFIG, "false"); return Collections.singletonList(ClusterConfig.defaultBuilder() - .setTypes(Stream.of(ZK, KRAFT).collect(Collectors.toSet())) .setServerProperties(serverProperties) .setTags(Collections.singletonList("classicGroupCoordinator")) .build()); From 879e066df6cdc42094dbe3d6db92bdd22d0f2c1f Mon Sep 17 00:00:00 2001 From: brenden20 Date: Tue, 4 Jun 2024 12:56:50 -0500 Subject: [PATCH 36/61] Update ConsumerNetworkThread.java --- .../kafka/clients/consumer/internals/ConsumerNetworkThread.java | 1 - 1 file changed, 1 deletion(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java index 05a7c209ae634..876171f49c823 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java @@ -289,7 +289,6 @@ private void closeInternal(final Duration timeout) { } } - // Add test to see if poll() is run once with timer of 0 /** * Check the unsent queue one last time and poll until all requests are sent or the timer runs out. */ From 2435d829c0ff50fceb7a5fbf1db06e75e01b9988 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Tue, 4 Jun 2024 13:25:20 -0500 Subject: [PATCH 37/61] testRequestManagersArePolledOnce()Updated --- .../consumer/internals/ConsumerNetworkThreadTest.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 3b7d6000a6cad..0c3d556d95ba6 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -210,6 +210,12 @@ public void testStartupAndTearDown() throws InterruptedException { @Test void testRequestManagersArePolledOnce() { + List> list = new ArrayList<>(); + list.add(Optional.of(coordinatorRequestManager)); + list.add(Optional.of(heartbeatRequestManager)); + list.add(Optional.of(offsetsRequestManager)); + + when(requestManagers.entries()).thenReturn(list); consumerNetworkThread.runOnce(); requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm).poll(anyLong()))); requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm).maximumTimeToWait(anyLong()))); From 8bac05a55442c10346233df2e568a5d8383cb11e Mon Sep 17 00:00:00 2001 From: brenden20 Date: Tue, 4 Jun 2024 13:48:58 -0500 Subject: [PATCH 38/61] Small test updates --- .../consumer/internals/ConsumerNetworkThreadTest.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 0c3d556d95ba6..d90f4c0b812d5 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -214,11 +214,13 @@ void testRequestManagersArePolledOnce() { list.add(Optional.of(coordinatorRequestManager)); list.add(Optional.of(heartbeatRequestManager)); list.add(Optional.of(offsetsRequestManager)); - + when(requestManagers.entries()).thenReturn(list); + when(coordinatorRequestManager.poll(anyLong())).thenReturn(mock(NetworkClientDelegate.PollResult.class)); consumerNetworkThread.runOnce(); requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm).poll(anyLong()))); requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm).maximumTimeToWait(anyLong()))); + verify(networkClientDelegate).addAll(any(NetworkClientDelegate.PollResult.class)); verify(networkClientDelegate).poll(anyLong(), anyLong()); } @@ -346,9 +348,7 @@ void testEnsureEventsAreCompleted() { Cluster cluster = mock(Cluster.class); when(metadata.fetch()).thenReturn(cluster); - List list = new ArrayList<>(); - list.add(new Node(0, "host", 0)); - when(cluster.nodes()).thenReturn(list); + when(cluster.nodes()).thenReturn(Collections.singletonList(new Node(0, "host", 0))); LinkedList queue = new LinkedList<>(); when(networkClientDelegate.unsentRequests()).thenReturn(queue); From 41b78e86a73d85f20348a63f7c1efefa281be573 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Tue, 4 Jun 2024 13:49:29 -0500 Subject: [PATCH 39/61] Reapply "Merge branch 'apache:trunk' into 16001" This reverts commit 5fb7304892ecf5662a23b5c65111c79b012007ab. --- README.md | 7 +- build.gradle | 17 +- checkstyle/checkstyle.xml | 2 + .../apache/kafka/common/ShareGroupState.java | 56 ++++ .../errors/FencedStateEpochException.java | 28 ++ .../errors/InvalidRecordStateException.java | 30 ++ .../InvalidShareSessionEpochException.java | 28 ++ .../errors/ShareSessionNotFoundException.java | 28 ++ .../apache/kafka/common/protocol/ApiKeys.java | 6 +- .../apache/kafka/common/protocol/Errors.java | 10 +- .../common/requests/AbstractRequest.java | 8 + .../common/requests/AbstractResponse.java | 8 + .../requests/ShareAcknowledgeRequest.java | 127 +++++++++ .../requests/ShareAcknowledgeResponse.java | 148 ++++++++++ .../common/requests/ShareFetchMetadata.java | 121 ++++++++ .../common/requests/ShareFetchRequest.java | 267 ++++++++++++++++++ .../common/requests/ShareFetchResponse.java | 212 ++++++++++++++ .../requests/ShareGroupDescribeRequest.java | 100 +++++++ .../requests/ShareGroupDescribeResponse.java | 77 +++++ .../requests/ShareGroupHeartbeatRequest.java | 86 ++++++ .../requests/ShareGroupHeartbeatResponse.java | 71 +++++ .../message/FindCoordinatorRequest.json | 4 +- .../message/FindCoordinatorResponse.json | 4 +- .../common/message/ListGroupsRequest.json | 4 +- .../common/message/ListGroupsResponse.json | 4 +- .../message/ShareAcknowledgeRequest.json | 53 ++++ .../message/ShareAcknowledgeResponse.json | 72 +++++ .../common/message/ShareFetchRequest.json | 67 +++++ .../common/message/ShareFetchResponse.json | 83 ++++++ .../message/ShareGroupDescribeRequest.json | 33 +++ .../message/ShareGroupDescribeResponse.json | 87 ++++++ .../message/ShareGroupHeartbeatRequest.json | 39 +++ .../message/ShareGroupHeartbeatResponse.json | 57 ++++ .../DefaultChannelMetadataRegistry.java | 4 +- .../common/requests/RequestResponseTest.java | 129 +++++++++ .../connect/runtime/rest/RestClient.java | 6 +- .../rest/entities/CreateConnectorRequest.java | 2 +- .../KafkaConfigBackingStoreMockitoTest.java | 157 +++++++++- .../storage/KafkaConfigBackingStoreTest.java | 170 ----------- .../server/builders/KafkaApisBuilder.java | 2 +- .../builders/ReplicaManagerBuilder.java | 2 +- .../src/main/scala/kafka/log/LogCleaner.scala | 17 +- .../src/main/scala/kafka/log/LogManager.scala | 2 +- .../kafka/network/RequestConvertToJson.scala | 8 + .../kafka/server/BrokerLifecycleManager.scala | 6 +- .../scala/kafka/server/BrokerServer.scala | 10 +- .../scala/kafka/server/ConfigHandler.scala | 2 +- .../ControllerConfigurationValidator.scala | 3 +- .../main/scala/kafka/server/KafkaConfig.scala | 4 +- .../main/scala/kafka/server/KafkaServer.scala | 2 +- .../scala/kafka/server/ReplicaManager.scala | 25 +- .../metadata/BrokerMetadataPublisher.scala | 16 -- .../main/scala/kafka/zk/AdminZkClient.scala | 4 +- .../log/remote/RemoteLogManagerTest.java | 2 +- .../junit/ClusterTestExtensionsUnitTest.java | 49 +++- .../scala/unit/kafka/log/LogCleanerTest.scala | 24 +- .../scala/unit/kafka/log/LogConfigTest.scala | 26 +- .../kafka/server/ApiVersionsRequestTest.scala | 2 +- .../server/BrokerLifecycleManagerTest.scala | 2 +- .../kafka/server/ReplicaManagerTest.scala | 37 ++- .../unit/kafka/server/RequestQuotaTest.scala | 16 +- .../BrokerMetadataPublisherTest.scala | 101 +------ docs/security.html | 36 +++ gradle/dependencies.gradle | 2 +- .../group/GroupMetadataManager.java | 28 +- .../consumer/TargetAssignmentBuilder.java | 30 +- .../group/GroupMetadataManagerTest.java | 43 +++ .../consumer/TargetAssignmentBuilderTest.java | 50 ++-- .../controller/ClusterControlManager.java | 7 + .../publisher/BrokerRegistrationTracker.java | 136 +++++++++ .../BrokerRegistrationTrackerTest.java | 151 ++++++++++ .../storage/RemoteLogManagerConfig.java | 38 +++ ...cBasedRemoteLogMetadataManagerHarness.java | 7 +- ...edRemoteLogMetadataManagerRestartTest.java | 163 +++++------ .../storage/RemoteLogManagerConfigTest.java | 6 +- .../group/ConsumerGroupCommandTestUtils.java | 29 +- 76 files changed, 2972 insertions(+), 528 deletions(-) create mode 100644 clients/src/main/java/org/apache/kafka/common/ShareGroupState.java create mode 100644 clients/src/main/java/org/apache/kafka/common/errors/FencedStateEpochException.java create mode 100644 clients/src/main/java/org/apache/kafka/common/errors/InvalidRecordStateException.java create mode 100644 clients/src/main/java/org/apache/kafka/common/errors/InvalidShareSessionEpochException.java create mode 100644 clients/src/main/java/org/apache/kafka/common/errors/ShareSessionNotFoundException.java create mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java create mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java create mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareFetchMetadata.java create mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java create mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java create mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java create mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeResponse.java create mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatRequest.java create mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatResponse.java create mode 100644 clients/src/main/resources/common/message/ShareAcknowledgeRequest.json create mode 100644 clients/src/main/resources/common/message/ShareAcknowledgeResponse.json create mode 100644 clients/src/main/resources/common/message/ShareFetchRequest.json create mode 100644 clients/src/main/resources/common/message/ShareFetchResponse.json create mode 100644 clients/src/main/resources/common/message/ShareGroupDescribeRequest.json create mode 100644 clients/src/main/resources/common/message/ShareGroupDescribeResponse.json create mode 100644 clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json create mode 100644 clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json rename clients/src/{main => test}/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java (93%) create mode 100644 metadata/src/main/java/org/apache/kafka/image/publisher/BrokerRegistrationTracker.java create mode 100644 metadata/src/test/java/org/apache/kafka/image/publisher/BrokerRegistrationTrackerTest.java diff --git a/README.md b/README.md index 27ce0dc0bce64..ab7dcd7685bde 100644 --- a/README.md +++ b/README.md @@ -227,11 +227,16 @@ There are two code quality analysis tools that we regularly run, spotbugs and ch Checkstyle enforces a consistent coding style in Kafka. You can run checkstyle using: - ./gradlew checkstyleMain checkstyleTest + ./gradlew checkstyleMain checkstyleTest spotlessCheck The checkstyle warnings will be found in `reports/checkstyle/reports/main.html` and `reports/checkstyle/reports/test.html` files in the subproject build directories. They are also printed to the console. The build will fail if Checkstyle fails. +#### Spotless #### +The import order is a part of static check. please call `spotlessApply` to optimize the imports of Java codes before filing pull request : + + ./gradlew spotlessApply + #### Spotbugs #### Spotbugs uses static analysis to look for bugs in the code. You can run spotbugs using: diff --git a/build.gradle b/build.gradle index ea168ecb26fb4..a2a6531d29a62 100644 --- a/build.gradle +++ b/build.gradle @@ -47,7 +47,9 @@ plugins { // Updating the shadow plugin version to 8.1.1 causes issue with signing and publishing the shadowed // artifacts - see https://github.com/johnrengelman/shadow/issues/901 id 'com.github.johnrengelman.shadow' version '8.1.0' apply false - id 'com.diffplug.spotless' version '6.14.0' apply false // 6.14.1 and newer require Java 11 at compile time, so we can't upgrade until AK 4.0 + // the minimum required JRE of 6.14.0+ is 11 + // refer:https://github.com/diffplug/spotless/tree/main/plugin-gradle#requirements + id 'com.diffplug.spotless' version "6.13.0" apply false } ext { @@ -198,6 +200,9 @@ def determineCommitId() { } } +def spotlessApplyModules = [''] + + apply from: file('wrapper.gradle') if (repo != null) { @@ -793,6 +798,16 @@ subprojects { skipProjects = [ ":jmh-benchmarks", ":trogdor" ] skipConfigurations = [ "zinc" ] } + + if (project.name in spotlessApplyModules) { + apply plugin: 'com.diffplug.spotless' + spotless { + java { + importOrder('kafka', 'org.apache.kafka', 'com', 'net', 'org', 'java', 'javax', '', '\\#') + removeUnusedImports() + } + } + } } gradle.taskGraph.whenReady { taskGraph -> diff --git a/checkstyle/checkstyle.xml b/checkstyle/checkstyle.xml index aff659638928b..61eb7e4b245fd 100644 --- a/checkstyle/checkstyle.xml +++ b/checkstyle/checkstyle.xml @@ -82,6 +82,8 @@ + + diff --git a/clients/src/main/java/org/apache/kafka/common/ShareGroupState.java b/clients/src/main/java/org/apache/kafka/common/ShareGroupState.java new file mode 100644 index 0000000000000..716421f3dea2a --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/ShareGroupState.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common; + +import java.util.Arrays; +import java.util.Locale; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * The share group state. + */ +public enum ShareGroupState { + UNKNOWN("Unknown"), + STABLE("Stable"), + DEAD("Dead"), + EMPTY("Empty"); + + private final static Map NAME_TO_ENUM = Arrays.stream(values()) + .collect(Collectors.toMap(state -> state.name.toUpperCase(Locale.ROOT), Function.identity())); + + private final String name; + + ShareGroupState(String name) { + this.name = name; + } + + /** + * Case-insensitive share group state lookup by string name. + */ + public static ShareGroupState parse(String name) { + ShareGroupState state = NAME_TO_ENUM.get(name.toUpperCase(Locale.ROOT)); + return state == null ? UNKNOWN : state; + } + + @Override + public String toString() { + return name; + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/errors/FencedStateEpochException.java b/clients/src/main/java/org/apache/kafka/common/errors/FencedStateEpochException.java new file mode 100644 index 0000000000000..1e74bba199402 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/errors/FencedStateEpochException.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.errors; + +/** + * Thrown when the share coordinator rejected the request because the share-group state epoch did not match. + */ +public class FencedStateEpochException extends ApiException { + private static final long serialVersionUID = 1L; + + public FencedStateEpochException(String message) { + super(message); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/errors/InvalidRecordStateException.java b/clients/src/main/java/org/apache/kafka/common/errors/InvalidRecordStateException.java new file mode 100644 index 0000000000000..ae0fef5edeaef --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/errors/InvalidRecordStateException.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.errors; + +/** + * Thrown when the acknowledgement of delivery of a record could not be completed because the record + * state is invalid. + */ +public class InvalidRecordStateException extends ApiException { + + private static final long serialVersionUID = 1L; + + public InvalidRecordStateException(String message) { + super(message); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/errors/InvalidShareSessionEpochException.java b/clients/src/main/java/org/apache/kafka/common/errors/InvalidShareSessionEpochException.java new file mode 100644 index 0000000000000..e261d8b7a8e88 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/errors/InvalidShareSessionEpochException.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.errors; + +/** + * Thrown when the share session epoch is invalid. + */ +public class InvalidShareSessionEpochException extends RetriableException { + private static final long serialVersionUID = 1L; + + public InvalidShareSessionEpochException(String message) { + super(message); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/errors/ShareSessionNotFoundException.java b/clients/src/main/java/org/apache/kafka/common/errors/ShareSessionNotFoundException.java new file mode 100644 index 0000000000000..2b2249f8a5831 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/errors/ShareSessionNotFoundException.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.errors; + +/** + * Thrown when the share session was not found. + */ +public class ShareSessionNotFoundException extends RetriableException { + private static final long serialVersionUID = 1L; + + public ShareSessionNotFoundException(String message) { + super(message); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java index 16bec4fb72dc6..ffd5737ca3162 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java @@ -118,7 +118,11 @@ public enum ApiKeys { PUSH_TELEMETRY(ApiMessageType.PUSH_TELEMETRY), ASSIGN_REPLICAS_TO_DIRS(ApiMessageType.ASSIGN_REPLICAS_TO_DIRS), LIST_CLIENT_METRICS_RESOURCES(ApiMessageType.LIST_CLIENT_METRICS_RESOURCES), - DESCRIBE_TOPIC_PARTITIONS(ApiMessageType.DESCRIBE_TOPIC_PARTITIONS); + DESCRIBE_TOPIC_PARTITIONS(ApiMessageType.DESCRIBE_TOPIC_PARTITIONS), + SHARE_GROUP_HEARTBEAT(ApiMessageType.SHARE_GROUP_HEARTBEAT), + SHARE_GROUP_DESCRIBE(ApiMessageType.SHARE_GROUP_DESCRIBE), + SHARE_FETCH(ApiMessageType.SHARE_FETCH), + SHARE_ACKNOWLEDGE(ApiMessageType.SHARE_ACKNOWLEDGE); private static final Map> APIS_BY_LISTENER = new EnumMap<>(ApiMessageType.ListenerType.class); diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java index 900d191c8f9d4..10ae05aa850c9 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java @@ -41,6 +41,7 @@ import org.apache.kafka.common.errors.FencedInstanceIdException; import org.apache.kafka.common.errors.FencedLeaderEpochException; import org.apache.kafka.common.errors.FencedMemberEpochException; +import org.apache.kafka.common.errors.FencedStateEpochException; import org.apache.kafka.common.errors.FetchSessionIdNotFoundException; import org.apache.kafka.common.errors.FetchSessionTopicIdException; import org.apache.kafka.common.errors.GroupAuthorizationException; @@ -64,12 +65,14 @@ import org.apache.kafka.common.errors.InvalidPidMappingException; import org.apache.kafka.common.errors.InvalidPrincipalTypeException; import org.apache.kafka.common.errors.InvalidProducerEpochException; +import org.apache.kafka.common.errors.InvalidRecordStateException; import org.apache.kafka.common.errors.InvalidRegistrationException; import org.apache.kafka.common.errors.InvalidReplicaAssignmentException; import org.apache.kafka.common.errors.InvalidReplicationFactorException; import org.apache.kafka.common.errors.InvalidRequestException; import org.apache.kafka.common.errors.InvalidRequiredAcksException; import org.apache.kafka.common.errors.InvalidSessionTimeoutException; +import org.apache.kafka.common.errors.InvalidShareSessionEpochException; import org.apache.kafka.common.errors.InvalidTimestampException; import org.apache.kafka.common.errors.InvalidTopicException; import org.apache.kafka.common.errors.InvalidTxnStateException; @@ -109,6 +112,7 @@ import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.SaslAuthenticationException; import org.apache.kafka.common.errors.SecurityDisabledException; +import org.apache.kafka.common.errors.ShareSessionNotFoundException; import org.apache.kafka.common.errors.SnapshotNotFoundException; import org.apache.kafka.common.errors.StaleBrokerEpochException; import org.apache.kafka.common.errors.StaleMemberEpochException; @@ -394,7 +398,11 @@ public enum Errors { UNKNOWN_SUBSCRIPTION_ID(117, "Client sent a push telemetry request with an invalid or outdated subscription ID.", UnknownSubscriptionIdException::new), TELEMETRY_TOO_LARGE(118, "Client sent a push telemetry request larger than the maximum size the broker will accept.", TelemetryTooLargeException::new), INVALID_REGISTRATION(119, "The controller has considered the broker registration to be invalid.", InvalidRegistrationException::new), - TRANSACTION_ABORTABLE(120, "The server encountered an error with the transaction. The client can abort the transaction to continue using this transactional ID.", TransactionAbortableException::new); + TRANSACTION_ABORTABLE(120, "The server encountered an error with the transaction. The client can abort the transaction to continue using this transactional ID.", TransactionAbortableException::new), + INVALID_RECORD_STATE(121, "The record state is invalid. The acknowledgement of delivery could not be completed.", InvalidRecordStateException::new), + SHARE_SESSION_NOT_FOUND(122, "The share session was not found.", ShareSessionNotFoundException::new), + INVALID_SHARE_SESSION_EPOCH(123, "The share session epoch is invalid.", InvalidShareSessionEpochException::new), + FENCED_STATE_EPOCH(124, "The share coordinator rejected the request because the share-group state epoch did not match.", FencedStateEpochException::new); private static final Logger log = LoggerFactory.getLogger(Errors.class); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java index b51221f5af642..589e163992b22 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java @@ -326,6 +326,14 @@ private static AbstractRequest doParseRequest(ApiKeys apiKey, short apiVersion, return ListClientMetricsResourcesRequest.parse(buffer, apiVersion); case DESCRIBE_TOPIC_PARTITIONS: return DescribeTopicPartitionsRequest.parse(buffer, apiVersion); + case SHARE_GROUP_HEARTBEAT: + return ShareGroupHeartbeatRequest.parse(buffer, apiVersion); + case SHARE_GROUP_DESCRIBE: + return ShareGroupDescribeRequest.parse(buffer, apiVersion); + case SHARE_FETCH: + return ShareFetchRequest.parse(buffer, apiVersion); + case SHARE_ACKNOWLEDGE: + return ShareAcknowledgeRequest.parse(buffer, apiVersion); default: throw new AssertionError(String.format("ApiKey %s is not currently handled in `parseRequest`, the " + "code should be updated to do so.", apiKey)); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java index dbafdbf3bcb07..5534168098e9d 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java @@ -263,6 +263,14 @@ public static AbstractResponse parseResponse(ApiKeys apiKey, ByteBuffer response return ListClientMetricsResourcesResponse.parse(responseBuffer, version); case DESCRIBE_TOPIC_PARTITIONS: return DescribeTopicPartitionsResponse.parse(responseBuffer, version); + case SHARE_GROUP_HEARTBEAT: + return ShareGroupHeartbeatResponse.parse(responseBuffer, version); + case SHARE_GROUP_DESCRIBE: + return ShareGroupDescribeResponse.parse(responseBuffer, version); + case SHARE_FETCH: + return ShareFetchResponse.parse(responseBuffer, version); + case SHARE_ACKNOWLEDGE: + return ShareAcknowledgeResponse.parse(responseBuffer, version); default: throw new AssertionError(String.format("ApiKey %s is not currently handled in `parseResponse`, the " + "code should be updated to do so.", apiKey)); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java new file mode 100644 index 0000000000000..1b77b43be33c1 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.message.ShareAcknowledgeRequestData; +import org.apache.kafka.common.message.ShareAcknowledgeResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class ShareAcknowledgeRequest extends AbstractRequest { + + public static class Builder extends AbstractRequest.Builder { + + private final ShareAcknowledgeRequestData data; + + public Builder(ShareAcknowledgeRequestData data) { + this(data, false); + } + + public Builder(ShareAcknowledgeRequestData data, boolean enableUnstableLastVersion) { + super(ApiKeys.SHARE_ACKNOWLEDGE, enableUnstableLastVersion); + this.data = data; + } + + public static ShareAcknowledgeRequest.Builder forConsumer(String groupId, ShareFetchMetadata metadata, + Map> acknowledgementsMap) { + ShareAcknowledgeRequestData data = new ShareAcknowledgeRequestData(); + data.setGroupId(groupId); + if (metadata != null) { + data.setMemberId(metadata.memberId().toString()); + data.setShareSessionEpoch(metadata.epoch()); + } + + // Build a map of topics to acknowledge keyed by topic ID, and within each a map of partitions keyed by index + Map> ackMap = new HashMap<>(); + + for (Map.Entry> acknowledgeEntry : acknowledgementsMap.entrySet()) { + TopicIdPartition tip = acknowledgeEntry.getKey(); + Map partMap = ackMap.computeIfAbsent(tip.topicId(), k -> new HashMap<>()); + ShareAcknowledgeRequestData.AcknowledgePartition ackPartition = partMap.get(tip.partition()); + if (ackPartition == null) { + ackPartition = new ShareAcknowledgeRequestData.AcknowledgePartition() + .setPartitionIndex(tip.partition()); + partMap.put(tip.partition(), ackPartition); + } + ackPartition.setAcknowledgementBatches(acknowledgeEntry.getValue()); + } + + // Finally, build up the data to fetch + data.setTopics(new ArrayList<>()); + ackMap.forEach((topicId, partMap) -> { + ShareAcknowledgeRequestData.AcknowledgeTopic ackTopic = new ShareAcknowledgeRequestData.AcknowledgeTopic() + .setTopicId(topicId) + .setPartitions(new ArrayList<>()); + data.topics().add(ackTopic); + + partMap.forEach((index, ackPartition) -> ackTopic.partitions().add(ackPartition)); + }); + + return new ShareAcknowledgeRequest.Builder(data, true); + } + + public ShareAcknowledgeRequestData data() { + return data; + } + + @Override + public ShareAcknowledgeRequest build(short version) { + return new ShareAcknowledgeRequest(data, version); + } + + @Override + public String toString() { + return data.toString(); + } + } + + private final ShareAcknowledgeRequestData data; + + public ShareAcknowledgeRequest(ShareAcknowledgeRequestData data, short version) { + super(ApiKeys.SHARE_ACKNOWLEDGE, version); + this.data = data; + } + + @Override + public ShareAcknowledgeRequestData data() { + return data; + } + + @Override + public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { + Errors error = Errors.forException(e); + return new ShareAcknowledgeResponse(new ShareAcknowledgeResponseData() + .setThrottleTimeMs(throttleTimeMs) + .setErrorCode(error.code())); + } + + public static ShareAcknowledgeRequest parse(ByteBuffer buffer, short version) { + return new ShareAcknowledgeRequest( + new ShareAcknowledgeRequestData(new ByteBufferAccessor(buffer), version), + version + ); + } +} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java new file mode 100644 index 0000000000000..5cab233dccac8 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.message.ShareAcknowledgeResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +/** + * Possible error codes. + * - {@link Errors#GROUP_AUTHORIZATION_FAILED} + * - {@link Errors#TOPIC_AUTHORIZATION_FAILED} + * - {@link Errors#UNKNOWN_TOPIC_OR_PARTITION} + * - {@link Errors#NOT_LEADER_OR_FOLLOWER} + * - {@link Errors#UNKNOWN_TOPIC_ID} + * - {@link Errors#INVALID_RECORD_STATE} + * - {@link Errors#KAFKA_STORAGE_ERROR} + * - {@link Errors#INVALID_REQUEST} + * - {@link Errors#UNKNOWN_SERVER_ERROR} + */ +public class ShareAcknowledgeResponse extends AbstractResponse { + + private final ShareAcknowledgeResponseData data; + + public ShareAcknowledgeResponse(ShareAcknowledgeResponseData data) { + super(ApiKeys.SHARE_ACKNOWLEDGE); + this.data = data; + } + + public Errors error() { + return Errors.forCode(data.errorCode()); + } + + @Override + public ShareAcknowledgeResponseData data() { + return data; + } + + @Override + public Map errorCounts() { + HashMap counts = new HashMap<>(); + updateErrorCounts(counts, Errors.forCode(data.errorCode())); + data.responses().forEach( + topic -> topic.partitions().forEach( + partition -> updateErrorCounts(counts, Errors.forCode(partition.errorCode())) + ) + ); + return counts; + } + + @Override + public int throttleTimeMs() { + return data.throttleTimeMs(); + } + + @Override + public void maybeSetThrottleTimeMs(int throttleTimeMs) { + data.setThrottleTimeMs(throttleTimeMs); + } + + public static ShareAcknowledgeResponse parse(ByteBuffer buffer, short version) { + return new ShareAcknowledgeResponse( + new ShareAcknowledgeResponseData(new ByteBufferAccessor(buffer), version) + ); + } + + private static boolean matchingTopic(ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse previousTopic, TopicIdPartition currentTopic) { + if (previousTopic == null) + return false; + return previousTopic.topicId().equals(currentTopic.topicId()); + } + + public static ShareAcknowledgeResponseData.PartitionData partitionResponse(TopicIdPartition topicIdPartition, Errors error) { + return partitionResponse(topicIdPartition.topicPartition().partition(), error); + } + + public static ShareAcknowledgeResponseData.PartitionData partitionResponse(int partition, Errors error) { + return new ShareAcknowledgeResponseData.PartitionData() + .setPartitionIndex(partition) + .setErrorCode(error.code()); + } + + public static ShareAcknowledgeResponse of(Errors error, + int throttleTimeMs, + LinkedHashMap responseData, + List nodeEndpoints) { + return new ShareAcknowledgeResponse(toMessage(error, throttleTimeMs, responseData.entrySet().iterator(), nodeEndpoints)); + } + + public static ShareAcknowledgeResponseData toMessage(Errors error, int throttleTimeMs, + Iterator> partIterator, + List nodeEndpoints) { + Map topicResponseList = new LinkedHashMap<>(); + while (partIterator.hasNext()) { + Map.Entry entry = partIterator.next(); + ShareAcknowledgeResponseData.PartitionData partitionData = entry.getValue(); + // Since PartitionData alone doesn't know the partition ID, we set it here + partitionData.setPartitionIndex(entry.getKey().topicPartition().partition()); + // Checking if the topic is already present in the map + if (topicResponseList.containsKey(entry.getKey().topicId())) { + topicResponseList.get(entry.getKey().topicId()).partitions().add(partitionData); + } else { + List partitionResponses = new ArrayList<>(); + partitionResponses.add(partitionData); + topicResponseList.put(entry.getKey().topicId(), new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse() + .setTopicId(entry.getKey().topicId()) + .setPartitions(partitionResponses)); + } + } + ShareAcknowledgeResponseData data = new ShareAcknowledgeResponseData(); + // KafkaApis should only pass in node endpoints on error, otherwise this should be an empty list + nodeEndpoints.forEach(endpoint -> data.nodeEndpoints().add( + new ShareAcknowledgeResponseData.NodeEndpoint() + .setNodeId(endpoint.id()) + .setHost(endpoint.host()) + .setPort(endpoint.port()) + .setRack(endpoint.rack()))); + return data.setThrottleTimeMs(throttleTimeMs) + .setErrorCode(error.code()) + .setResponses(new ArrayList<>(topicResponseList.values())); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchMetadata.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchMetadata.java new file mode 100644 index 0000000000000..4e5bcc2237e43 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchMetadata.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.Uuid; + +public class ShareFetchMetadata { + /** + * The first epoch. When used in a ShareFetch request, indicates that the client + * wants to create a session. + */ + public static final int INITIAL_EPOCH = 0; + + /** + * An invalid epoch. When used in a ShareFetch request, indicates that the client + * wants to close an existing session. + */ + public static final int FINAL_EPOCH = -1; + + /** + * + */ + public boolean isNewSession() { + return epoch == INITIAL_EPOCH; + } + + /** + * Returns true if this is a full share fetch request. + */ + public boolean isFull() { + return (this.epoch == INITIAL_EPOCH) || (this.epoch == FINAL_EPOCH); + } + + /** + * Returns the next epoch. + * + * @param prevEpoch The previous epoch. + * @return The next epoch. + */ + public static int nextEpoch(int prevEpoch) { + if (prevEpoch < 0) { + // The next epoch after FINAL_EPOCH is always FINAL_EPOCH itself. + return FINAL_EPOCH; + } else if (prevEpoch == Integer.MAX_VALUE) { + return 1; + } else { + return prevEpoch + 1; + } + } + + /** + * The member ID. + */ + private final Uuid memberId; + + /** + * The share session epoch. + */ + private final int epoch; + + public ShareFetchMetadata(Uuid memberId, int epoch) { + this.memberId = memberId; + this.epoch = epoch; + } + + public static ShareFetchMetadata initialEpoch(Uuid memberId) { + return new ShareFetchMetadata(memberId, INITIAL_EPOCH); + } + + public ShareFetchMetadata nextEpoch() { + return new ShareFetchMetadata(memberId, nextEpoch(epoch)); + } + + public ShareFetchMetadata nextCloseExistingAttemptNew() { + return new ShareFetchMetadata(memberId, INITIAL_EPOCH); + } + + public ShareFetchMetadata finalEpoch() { + return new ShareFetchMetadata(memberId, FINAL_EPOCH); + } + + public Uuid memberId() { + return memberId; + } + + public int epoch() { + return epoch; + } + + public boolean isFinalEpoch() { + return epoch == FINAL_EPOCH; + } + + public String toString() { + StringBuilder bld = new StringBuilder(); + bld.append("(memberId=").append(memberId).append(", "); + if (epoch == INITIAL_EPOCH) { + bld.append("epoch=INITIAL)"); + } else if (epoch == FINAL_EPOCH) { + bld.append("epoch=FINAL)"); + } else { + bld.append("epoch=").append(epoch).append(")"); + } + return bld.toString(); + } +} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java new file mode 100644 index 0000000000000..385e802a691a9 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java @@ -0,0 +1,267 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.message.ShareFetchRequestData; +import org.apache.kafka.common.message.ShareFetchResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class ShareFetchRequest extends AbstractRequest { + + public static class Builder extends AbstractRequest.Builder { + + private final ShareFetchRequestData data; + + public Builder(ShareFetchRequestData data) { + this(data, false); + } + + public Builder(ShareFetchRequestData data, boolean enableUnstableLastVersion) { + super(ApiKeys.SHARE_FETCH, enableUnstableLastVersion); + this.data = data; + } + + public static Builder forConsumer(String groupId, ShareFetchMetadata metadata, + int maxWait, int minBytes, int maxBytes, int fetchSize, + List send, List forget, + Map> acknowledgementsMap) { + ShareFetchRequestData data = new ShareFetchRequestData(); + data.setGroupId(groupId); + int ackOnlyPartitionMaxBytes = fetchSize; + boolean isClosingShareSession = false; + if (metadata != null) { + data.setMemberId(metadata.memberId().toString()); + data.setShareSessionEpoch(metadata.epoch()); + if (metadata.isFinalEpoch()) { + isClosingShareSession = true; + ackOnlyPartitionMaxBytes = 0; + } + } + data.setMaxWaitMs(maxWait); + data.setMinBytes(minBytes); + data.setMaxBytes(maxBytes); + + // Build a map of topics to fetch keyed by topic ID, and within each a map of partitions keyed by index + Map> fetchMap = new HashMap<>(); + + // First, start by adding the list of topic-partitions we are fetching + if (!isClosingShareSession) { + for (TopicIdPartition tip : send) { + Map partMap = fetchMap.computeIfAbsent(tip.topicId(), k -> new HashMap<>()); + ShareFetchRequestData.FetchPartition fetchPartition = new ShareFetchRequestData.FetchPartition() + .setPartitionIndex(tip.partition()) + .setPartitionMaxBytes(fetchSize); + partMap.put(tip.partition(), fetchPartition); + } + } + + // Next, add acknowledgements that we are piggybacking onto the fetch. Generally, the list of + // topic-partitions will be a subset, but if the assignment changes, there might be new entries to add + for (Map.Entry> acknowledgeEntry : acknowledgementsMap.entrySet()) { + TopicIdPartition tip = acknowledgeEntry.getKey(); + Map partMap = fetchMap.computeIfAbsent(tip.topicId(), k -> new HashMap<>()); + ShareFetchRequestData.FetchPartition fetchPartition = partMap.get(tip.partition()); + if (fetchPartition == null) { + fetchPartition = new ShareFetchRequestData.FetchPartition() + .setPartitionIndex(tip.partition()) + .setPartitionMaxBytes(ackOnlyPartitionMaxBytes); + partMap.put(tip.partition(), fetchPartition); + } + fetchPartition.setAcknowledgementBatches(acknowledgeEntry.getValue()); + } + + // Build up the data to fetch + if (!fetchMap.isEmpty()) { + data.setTopics(new ArrayList<>()); + fetchMap.forEach((topicId, partMap) -> { + ShareFetchRequestData.FetchTopic fetchTopic = new ShareFetchRequestData.FetchTopic() + .setTopicId(topicId) + .setPartitions(new ArrayList<>()); + partMap.forEach((index, fetchPartition) -> fetchTopic.partitions().add(fetchPartition)); + data.topics().add(fetchTopic); + }); + } + + // And finally, forget the topic-partitions that are no longer in the session + if (!forget.isEmpty()) { + Map> forgetMap = new HashMap<>(); + for (TopicIdPartition tip : forget) { + List partList = forgetMap.computeIfAbsent(tip.topicId(), k -> new ArrayList<>()); + partList.add(tip.partition()); + } + data.setForgottenTopicsData(new ArrayList<>()); + forgetMap.forEach((topicId, partList) -> { + ShareFetchRequestData.ForgottenTopic forgetTopic = new ShareFetchRequestData.ForgottenTopic() + .setTopicId(topicId) + .setPartitions(new ArrayList<>()); + partList.forEach(index -> forgetTopic.partitions().add(index)); + data.forgottenTopicsData().add(forgetTopic); + }); + } + + return new Builder(data, true); + } + + public ShareFetchRequestData data() { + return data; + } + + @Override + public ShareFetchRequest build(short version) { + return new ShareFetchRequest(data, version); + } + + @Override + public String toString() { + return data.toString(); + } + } + + private final ShareFetchRequestData data; + private volatile LinkedHashMap shareFetchData = null; + private volatile List toForget = null; + + public ShareFetchRequest(ShareFetchRequestData data, short version) { + super(ApiKeys.SHARE_FETCH, version); + this.data = data; + } + + @Override + public ShareFetchRequestData data() { + return data; + } + + @Override + public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { + Errors error = Errors.forException(e); + return new ShareFetchResponse(new ShareFetchResponseData() + .setThrottleTimeMs(throttleTimeMs) + .setErrorCode(error.code())); + } + + public static ShareFetchRequest parse(ByteBuffer buffer, short version) { + return new ShareFetchRequest( + new ShareFetchRequestData(new ByteBufferAccessor(buffer), version), + version + ); + } + + public static final class SharePartitionData { + public final Uuid topicId; + public final int maxBytes; + + public SharePartitionData( + Uuid topicId, + int maxBytes + ) { + this.topicId = topicId; + this.maxBytes = maxBytes; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ShareFetchRequest.SharePartitionData that = (ShareFetchRequest.SharePartitionData) o; + return Objects.equals(topicId, that.topicId) && + maxBytes == that.maxBytes; + } + + @Override + public int hashCode() { + return Objects.hash(topicId, maxBytes); + } + + @Override + public String toString() { + return "SharePartitionData(" + + "topicId=" + topicId + + ", maxBytes=" + maxBytes + + ')'; + } + } + + public int minBytes() { + return data.minBytes(); + } + + public int maxBytes() { + return data.maxBytes(); + } + + public int maxWait() { + return data.maxWaitMs(); + } + + public Map shareFetchData(Map topicNames) { + if (shareFetchData == null) { + synchronized (this) { + if (shareFetchData == null) { + // Assigning the lazy-initialized `shareFetchData` in the last step + // to avoid other threads accessing a half-initialized object. + final LinkedHashMap shareFetchDataTmp = new LinkedHashMap<>(); + data.topics().forEach(shareFetchTopic -> { + String name = topicNames.get(shareFetchTopic.topicId()); + shareFetchTopic.partitions().forEach(shareFetchPartition -> { + // Topic name may be null here if the topic name was unable to be resolved using the topicNames map. + shareFetchDataTmp.put(new TopicIdPartition(shareFetchTopic.topicId(), new TopicPartition(name, shareFetchPartition.partitionIndex())), + new ShareFetchRequest.SharePartitionData( + shareFetchTopic.topicId(), + shareFetchPartition.partitionMaxBytes() + ) + ); + }); + }); + shareFetchData = shareFetchDataTmp; + } + } + } + return shareFetchData; + } + + public List forgottenTopics(Map topicNames) { + if (toForget == null) { + synchronized (this) { + if (toForget == null) { + // Assigning the lazy-initialized `toForget` in the last step + // to avoid other threads accessing a half-initialized object. + final List toForgetTmp = new ArrayList<>(); + data.forgottenTopicsData().forEach(forgottenTopic -> { + String name = topicNames.get(forgottenTopic.topicId()); + // Topic name may be null here if the topic name was unable to be resolved using the topicNames map. + forgottenTopic.partitions().forEach(partitionId -> toForgetTmp.add(new TopicIdPartition(forgottenTopic.topicId(), new TopicPartition(name, partitionId)))); + }); + toForget = toForgetTmp; + } + } + } + return toForget; + } +} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java new file mode 100644 index 0000000000000..b33969e0efa41 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java @@ -0,0 +1,212 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.message.ShareFetchResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.ObjectSerializationCache; +import org.apache.kafka.common.record.MemoryRecords; +import org.apache.kafka.common.record.Records; + +import java.nio.ByteBuffer; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Iterator; +import java.util.Collections; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + + +/** + * Possible error codes. + * - {@link Errors#GROUP_AUTHORIZATION_FAILED} + * - {@link Errors#TOPIC_AUTHORIZATION_FAILED} + * - {@link Errors#UNKNOWN_TOPIC_OR_PARTITION} + * - {@link Errors#NOT_LEADER_OR_FOLLOWER} + * - {@link Errors#UNKNOWN_TOPIC_ID} + * - {@link Errors#INVALID_RECORD_STATE} + * - {@link Errors#KAFKA_STORAGE_ERROR} + * - {@link Errors#CORRUPT_MESSAGE} + * - {@link Errors#INVALID_REQUEST} + * - {@link Errors#UNKNOWN_SERVER_ERROR} + */ +public class ShareFetchResponse extends AbstractResponse { + + private final ShareFetchResponseData data; + + private volatile LinkedHashMap responseData = null; + + public ShareFetchResponse(ShareFetchResponseData data) { + super(ApiKeys.SHARE_FETCH); + this.data = data; + } + + public Errors error() { + return Errors.forCode(data.errorCode()); + } + + @Override + public ShareFetchResponseData data() { + return data; + } + + @Override + public Map errorCounts() { + HashMap counts = new HashMap<>(); + updateErrorCounts(counts, Errors.forCode(data.errorCode())); + data.responses().forEach( + topic -> topic.partitions().forEach( + partition -> updateErrorCounts(counts, Errors.forCode(partition.errorCode())) + ) + ); + return counts; + } + + public LinkedHashMap responseData(Map topicNames) { + if (responseData == null) { + synchronized (this) { + // Assigning the lazy-initialized `responseData` in the last step + // to avoid other threads accessing a half-initialized object. + if (responseData == null) { + final LinkedHashMap responseDataTmp = new LinkedHashMap<>(); + data.responses().forEach(topicResponse -> { + String name = topicNames.get(topicResponse.topicId()); + if (name != null) { + topicResponse.partitions().forEach(partitionData -> responseDataTmp.put(new TopicIdPartition(topicResponse.topicId(), + new TopicPartition(name, partitionData.partitionIndex())), partitionData)); + } + }); + responseData = responseDataTmp; + } + } + } + return responseData; + } + + @Override + public int throttleTimeMs() { + return data.throttleTimeMs(); + } + + @Override + public void maybeSetThrottleTimeMs(int throttleTimeMs) { + data.setThrottleTimeMs(throttleTimeMs); + } + + public static ShareFetchResponse parse(ByteBuffer buffer, short version) { + return new ShareFetchResponse( + new ShareFetchResponseData(new ByteBufferAccessor(buffer), version) + ); + } + + /** + * Returns `partition.records` as `Records` (instead of `BaseRecords`). If `records` is `null`, returns `MemoryRecords.EMPTY`. + * + *

If this response was deserialized after a share fetch, this method should never fail. An example where this would + * fail is a down-converted response (e.g. LazyDownConversionRecords) on the broker (before it's serialized and + * sent on the wire). + * + * @param partition partition data + * @return Records or empty record if the records in PartitionData is null. + */ + public static Records recordsOrFail(ShareFetchResponseData.PartitionData partition) { + if (partition.records() == null) return MemoryRecords.EMPTY; + if (partition.records() instanceof Records) return (Records) partition.records(); + throw new ClassCastException("The record type is " + partition.records().getClass().getSimpleName() + ", which is not a subtype of " + + Records.class.getSimpleName() + ". This method is only safe to call if the `ShareFetchResponse` was deserialized from bytes."); + } + + /** + * Convenience method to find the size of a response. + * + * @param version The version of the request + * @param partIterator The partition iterator. + * @return The response size in bytes. + */ + public static int sizeOf(short version, + Iterator> partIterator) { + // Since the throttleTimeMs and metadata field sizes are constant and fixed, we can + // use arbitrary values here without affecting the result. + ShareFetchResponseData data = toMessage(Errors.NONE, 0, partIterator, Collections.emptyList()); + ObjectSerializationCache cache = new ObjectSerializationCache(); + return 4 + data.size(cache, version); + } + + /** + * @return The size in bytes of the records. 0 is returned if records of input partition is null. + */ + public static int recordsSize(ShareFetchResponseData.PartitionData partition) { + return partition.records() == null ? 0 : partition.records().sizeInBytes(); + } + + public static ShareFetchResponse of(Errors error, + int throttleTimeMs, + LinkedHashMap responseData, + List nodeEndpoints) { + return new ShareFetchResponse(toMessage(error, throttleTimeMs, responseData.entrySet().iterator(), nodeEndpoints)); + } + + public static ShareFetchResponseData toMessage(Errors error, int throttleTimeMs, + Iterator> partIterator, + List nodeEndpoints) { + Map topicResponseList = new LinkedHashMap<>(); + while (partIterator.hasNext()) { + Map.Entry entry = partIterator.next(); + ShareFetchResponseData.PartitionData partitionData = entry.getValue(); + // Since PartitionData alone doesn't know the partition ID, we set it here + partitionData.setPartitionIndex(entry.getKey().topicPartition().partition()); + // Checking if the topic is already present in the map + if (topicResponseList.containsKey(entry.getKey().topicId())) { + topicResponseList.get(entry.getKey().topicId()).partitions().add(partitionData); + } else { + List partitionResponses = new ArrayList<>(); + partitionResponses.add(partitionData); + topicResponseList.put(entry.getKey().topicId(), new ShareFetchResponseData.ShareFetchableTopicResponse() + .setTopicId(entry.getKey().topicId()) + .setPartitions(partitionResponses)); + } + } + ShareFetchResponseData data = new ShareFetchResponseData(); + // KafkaApis should only pass in node endpoints on error, otherwise this should be an empty list + nodeEndpoints.forEach(endpoint -> data.nodeEndpoints().add( + new ShareFetchResponseData.NodeEndpoint() + .setNodeId(endpoint.id()) + .setHost(endpoint.host()) + .setPort(endpoint.port()) + .setRack(endpoint.rack()))); + return data.setThrottleTimeMs(throttleTimeMs) + .setErrorCode(error.code()) + .setResponses(new ArrayList<>(topicResponseList.values())); + } + + public static ShareFetchResponseData.PartitionData partitionResponse(TopicIdPartition topicIdPartition, Errors error) { + return partitionResponse(topicIdPartition.topicPartition().partition(), error); + } + + public static ShareFetchResponseData.PartitionData partitionResponse(int partition, Errors error) { + return new ShareFetchResponseData.PartitionData() + .setPartitionIndex(partition) + .setErrorCode(error.code()); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java new file mode 100644 index 0000000000000..25c02e4a83c5e --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.ShareGroupDescribeRequestData; +import org.apache.kafka.common.message.ShareGroupDescribeResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.stream.Collectors; + +public class ShareGroupDescribeRequest extends AbstractRequest { + + public static class Builder extends AbstractRequest.Builder { + + private final ShareGroupDescribeRequestData data; + + public Builder(ShareGroupDescribeRequestData data) { + this(data, false); + } + + public Builder(ShareGroupDescribeRequestData data, boolean enableUnstableLastVersion) { + super(ApiKeys.SHARE_GROUP_DESCRIBE, enableUnstableLastVersion); + this.data = data; + } + + @Override + public ShareGroupDescribeRequest build(short version) { + return new ShareGroupDescribeRequest(data, version); + } + + @Override + public String toString() { + return data.toString(); + } + } + + private final ShareGroupDescribeRequestData data; + + public ShareGroupDescribeRequest(ShareGroupDescribeRequestData data, short version) { + super(ApiKeys.SHARE_GROUP_DESCRIBE, version); + this.data = data; + } + + @Override + public ShareGroupDescribeResponse getErrorResponse(int throttleTimeMs, Throwable e) { + ShareGroupDescribeResponseData data = new ShareGroupDescribeResponseData() + .setThrottleTimeMs(throttleTimeMs); + // Set error for each group + short errorCode = Errors.forException(e).code(); + this.data.groupIds().forEach( + groupId -> data.groups().add( + new ShareGroupDescribeResponseData.DescribedGroup() + .setGroupId(groupId) + .setErrorCode(errorCode) + ) + ); + return new ShareGroupDescribeResponse(data); + } + + @Override + public ShareGroupDescribeRequestData data() { + return data; + } + + public static ShareGroupDescribeRequest parse(ByteBuffer buffer, short version) { + return new ShareGroupDescribeRequest( + new ShareGroupDescribeRequestData(new ByteBufferAccessor(buffer), version), + version + ); + } + + public static List getErrorDescribedGroupList( + List groupIds, + Errors error + ) { + return groupIds.stream() + .map(groupId -> new ShareGroupDescribeResponseData.DescribedGroup() + .setGroupId(groupId) + .setErrorCode(error.code()) + ).collect(Collectors.toList()); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeResponse.java new file mode 100644 index 0000000000000..95dd371eedfa7 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeResponse.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.ShareGroupDescribeResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; + +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; + +/** + * Possible error codes. + * + * - {@link Errors#GROUP_AUTHORIZATION_FAILED} + * - {@link Errors#NOT_COORDINATOR} + * - {@link Errors#COORDINATOR_NOT_AVAILABLE} + * - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS} + * - {@link Errors#INVALID_REQUEST} + * - {@link Errors#INVALID_GROUP_ID} + * - {@link Errors#GROUP_ID_NOT_FOUND} + */ +public class ShareGroupDescribeResponse extends AbstractResponse { + + private final ShareGroupDescribeResponseData data; + + public ShareGroupDescribeResponse(ShareGroupDescribeResponseData data) { + super(ApiKeys.SHARE_GROUP_DESCRIBE); + this.data = data; + } + + @Override + public ShareGroupDescribeResponseData data() { + return data; + } + + @Override + public Map errorCounts() { + HashMap counts = new HashMap<>(); + data.groups().forEach( + group -> updateErrorCounts(counts, Errors.forCode(group.errorCode())) + ); + return counts; + } + + @Override + public int throttleTimeMs() { + return data.throttleTimeMs(); + } + + @Override + public void maybeSetThrottleTimeMs(int throttleTimeMs) { + data.setThrottleTimeMs(throttleTimeMs); + } + + public static ShareGroupDescribeResponse parse(ByteBuffer buffer, short version) { + return new ShareGroupDescribeResponse( + new ShareGroupDescribeResponseData(new ByteBufferAccessor(buffer), version) + ); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatRequest.java new file mode 100644 index 0000000000000..7e112ef29dd14 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatRequest.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import java.nio.ByteBuffer; + +import org.apache.kafka.common.message.ShareGroupHeartbeatRequestData; +import org.apache.kafka.common.message.ShareGroupHeartbeatResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; + +public class ShareGroupHeartbeatRequest extends AbstractRequest { + /** + * A member epoch of -1 means that the member wants to leave the group. + */ + public static final int LEAVE_GROUP_MEMBER_EPOCH = -1; + + /** + * A member epoch of 0 means that the member wants to join the group. + */ + public static final int JOIN_GROUP_MEMBER_EPOCH = 0; + + public static class Builder extends AbstractRequest.Builder { + private final ShareGroupHeartbeatRequestData data; + + public Builder(ShareGroupHeartbeatRequestData data) { + this(data, true); + } + + public Builder(ShareGroupHeartbeatRequestData data, boolean enableUnstableLastVersion) { + super(ApiKeys.SHARE_GROUP_HEARTBEAT, enableUnstableLastVersion); + this.data = data; + } + + @Override + public ShareGroupHeartbeatRequest build(short version) { + return new ShareGroupHeartbeatRequest(data, version); + } + + @Override + public String toString() { + return data.toString(); + } + } + + private final ShareGroupHeartbeatRequestData data; + + public ShareGroupHeartbeatRequest(ShareGroupHeartbeatRequestData data, short version) { + super(ApiKeys.SHARE_GROUP_HEARTBEAT, version); + this.data = data; + } + + @Override + public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { + return new ShareGroupHeartbeatResponse( + new ShareGroupHeartbeatResponseData() + .setThrottleTimeMs(throttleTimeMs) + .setErrorCode(Errors.forException(e).code()) + ); + } + + @Override + public ShareGroupHeartbeatRequestData data() { + return data; + } + + public static ShareGroupHeartbeatRequest parse(ByteBuffer buffer, short version) { + return new ShareGroupHeartbeatRequest(new ShareGroupHeartbeatRequestData( + new ByteBufferAccessor(buffer), version), version); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatResponse.java new file mode 100644 index 0000000000000..de05d44aebecb --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatResponse.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.ShareGroupHeartbeatResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; + +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.Map; + +/** + * Possible error codes. + * + * - {@link Errors#GROUP_AUTHORIZATION_FAILED} + * - {@link Errors#NOT_COORDINATOR} + * - {@link Errors#COORDINATOR_NOT_AVAILABLE} + * - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS} + * - {@link Errors#INVALID_REQUEST} + * - {@link Errors#UNKNOWN_MEMBER_ID} + * - {@link Errors#GROUP_MAX_SIZE_REACHED} + */ +public class ShareGroupHeartbeatResponse extends AbstractResponse { + private final ShareGroupHeartbeatResponseData data; + + public ShareGroupHeartbeatResponse(ShareGroupHeartbeatResponseData data) { + super(ApiKeys.SHARE_GROUP_HEARTBEAT); + this.data = data; + } + + @Override + public ShareGroupHeartbeatResponseData data() { + return data; + } + + @Override + public Map errorCounts() { + return Collections.singletonMap(Errors.forCode(data.errorCode()), 1); + } + + @Override + public int throttleTimeMs() { + return data.throttleTimeMs(); + } + + @Override + public void maybeSetThrottleTimeMs(int throttleTimeMs) { + data.setThrottleTimeMs(throttleTimeMs); + } + + public static ShareGroupHeartbeatResponse parse(ByteBuffer buffer, short version) { + return new ShareGroupHeartbeatResponse(new ShareGroupHeartbeatResponseData( + new ByteBufferAccessor(buffer), version)); + } +} diff --git a/clients/src/main/resources/common/message/FindCoordinatorRequest.json b/clients/src/main/resources/common/message/FindCoordinatorRequest.json index 42b2f4c891ad5..43e6fe5014b26 100644 --- a/clients/src/main/resources/common/message/FindCoordinatorRequest.json +++ b/clients/src/main/resources/common/message/FindCoordinatorRequest.json @@ -27,7 +27,9 @@ // Version 4 adds support for batching via CoordinatorKeys (KIP-699) // // Version 5 adds support for new error code TRANSACTION_ABORTABLE (KIP-890). - "validVersions": "0-5", + // + // Version 6 adds support for share groups (KIP-932). + "validVersions": "0-6", "deprecatedVersions": "0", "flexibleVersions": "3+", "fields": [ diff --git a/clients/src/main/resources/common/message/FindCoordinatorResponse.json b/clients/src/main/resources/common/message/FindCoordinatorResponse.json index 860d655a252b2..be0479f908c96 100644 --- a/clients/src/main/resources/common/message/FindCoordinatorResponse.json +++ b/clients/src/main/resources/common/message/FindCoordinatorResponse.json @@ -26,7 +26,9 @@ // Version 4 adds support for batching via Coordinators (KIP-699) // // Version 5 adds support for new error code TRANSACTION_ABORTABLE (KIP-890). - "validVersions": "0-5", + // + // Version 6 adds support for share groups (KIP-932). + "validVersions": "0-6", "flexibleVersions": "3+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "1+", "ignorable": true, diff --git a/clients/src/main/resources/common/message/ListGroupsRequest.json b/clients/src/main/resources/common/message/ListGroupsRequest.json index 32defaa203382..a872165d516cf 100644 --- a/clients/src/main/resources/common/message/ListGroupsRequest.json +++ b/clients/src/main/resources/common/message/ListGroupsRequest.json @@ -25,7 +25,9 @@ // Version 4 adds the StatesFilter field (KIP-518). // // Version 5 adds the TypesFilter field (KIP-848). - "validVersions": "0-5", + // + // Version 6 adds support for share groups (KIP-932). + "validVersions": "0-6", "flexibleVersions": "3+", "fields": [ { "name": "StatesFilter", "type": "[]string", "versions": "4+", diff --git a/clients/src/main/resources/common/message/ListGroupsResponse.json b/clients/src/main/resources/common/message/ListGroupsResponse.json index fc4077c080f46..77f1c89e34a38 100644 --- a/clients/src/main/resources/common/message/ListGroupsResponse.json +++ b/clients/src/main/resources/common/message/ListGroupsResponse.json @@ -27,7 +27,9 @@ // Version 4 adds the GroupState field (KIP-518). // // Version 5 adds the GroupType field (KIP-848). - "validVersions": "0-5", + // + // Version 6 adds support for share groups (KIP-932). + "validVersions": "0-6", "flexibleVersions": "3+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "1+", "ignorable": true, diff --git a/clients/src/main/resources/common/message/ShareAcknowledgeRequest.json b/clients/src/main/resources/common/message/ShareAcknowledgeRequest.json new file mode 100644 index 0000000000000..db534cb4c1c13 --- /dev/null +++ b/clients/src/main/resources/common/message/ShareAcknowledgeRequest.json @@ -0,0 +1,53 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 79, + "type": "request", + "listeners": ["broker"], + "name": "ShareAcknowledgeRequest", + "validVersions": "0", + "flexibleVersions": "0+", + // The ShareAcknowledgeRequest API is added as part of KIP-932 and is still under + // development. Hence, the API is not exposed by default by brokers unless + // explicitly enabled. + "latestVersionUnstable": true, + "fields": [ + { "name": "GroupId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "entityType": "groupId", + "about": "The group identifier." }, + { "name": "MemberId", "type": "string", "versions": "0+", "nullableVersions": "0+", + "about": "The member ID." }, + { "name": "ShareSessionEpoch", "type": "int32", "versions": "0+", + "about": "The current share session epoch: 0 to open a share session; -1 to close it; otherwise increments for consecutive requests." }, + { "name": "Topics", "type": "[]AcknowledgeTopic", "versions": "0+", + "about": "The topics containing records to acknowledge.", "fields": [ + { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The unique topic ID."}, + { "name": "Partitions", "type": "[]AcknowledgePartition", "versions": "0+", + "about": "The partitions containing records to acknowledge.", "fields": [ + { "name": "PartitionIndex", "type": "int32", "versions": "0+", + "about": "The partition index." }, + { "name": "AcknowledgementBatches", "type": "[]AcknowledgementBatch", "versions": "0+", + "about": "Record batches to acknowledge.", "fields": [ + { "name": "FirstOffset", "type": "int64", "versions": "0+", + "about": "First offset of batch of records to acknowledge."}, + { "name": "LastOffset", "type": "int64", "versions": "0+", + "about": "Last offset (inclusive) of batch of records to acknowledge."}, + { "name": "AcknowledgeTypes", "type": "[]int8", "versions": "0+", + "about": "Array of acknowledge types - 0:Gap,1:Accept,2:Release,3:Reject."} + ]} + ]} + ]} + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json b/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json new file mode 100644 index 0000000000000..638ca10c64b3b --- /dev/null +++ b/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 79, + "type": "response", + "name": "ShareAcknowledgeResponse", + "validVersions": "0", + "flexibleVersions": "0+", + // Supported errors: + // - GROUP_AUTHORIZATION_FAILED (version 0+) + // - TOPIC_AUTHORIZATION_FAILED (version 0+) + // - UNKNOWN_TOPIC_OR_PARTITION (version 0+) + // - SHARE_SESSION_NOT_FOUND (version 0+) + // - INVALID_SHARE_SESSION_EPOCH (version 0+) + // - NOT_LEADER_OR_FOLLOWER (version 0+) + // - UNKNOWN_TOPIC_ID (version 0+) + // - INVALID_RECORD_STATE (version 0+) + // - KAFKA_STORAGE_ERROR (version 0+) + // - INVALID_REQUEST (version 0+) + // - UNKNOWN_SERVER_ERROR (version 0+) + "fields": [ + { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "ignorable": true, + "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, + { "name": "ErrorCode", "type": "int16", "versions": "0+", "ignorable": true, + "about": "The top level response error code." }, + { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The top-level error message, or null if there was no error." }, + { "name": "Responses", "type": "[]ShareAcknowledgeTopicResponse", "versions": "0+", + "about": "The response topics.", "fields": [ + { "name": "TopicId", "type": "uuid", "versions": "0+", "ignorable": true, "about": "The unique topic ID."}, + { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", + "about": "The topic partitions.", "fields": [ + { "name": "PartitionIndex", "type": "int32", "versions": "0+", + "about": "The partition index." }, + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The error code, or 0 if there was no error." }, + { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The error message, or null if there was no error." }, + { "name": "CurrentLeader", "type": "LeaderIdAndEpoch", "versions": "0+", "fields": [ + { "name": "LeaderId", "type": "int32", "versions": "0+", + "about": "The ID of the current leader or -1 if the leader is unknown." }, + { "name": "LeaderEpoch", "type": "int32", "versions": "0+", + "about": "The latest known leader epoch." } + ]} + ]} + ]}, + { "name": "NodeEndpoints", "type": "[]NodeEndpoint", "versions": "0+", + "about": "Endpoints for all current leaders enumerated in PartitionData with error NOT_LEADER_OR_FOLLOWER.", "fields": [ + { "name": "NodeId", "type": "int32", "versions": "0+", + "mapKey": true, "entityType": "brokerId", "about": "The ID of the associated node." }, + { "name": "Host", "type": "string", "versions": "0+", + "about": "The node's hostname." }, + { "name": "Port", "type": "int32", "versions": "0+", + "about": "The node's port." }, + { "name": "Rack", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The rack of the node, or null if it has not been assigned to a rack." } + ]} + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareFetchRequest.json b/clients/src/main/resources/common/message/ShareFetchRequest.json new file mode 100644 index 0000000000000..d0b59dcb26a80 --- /dev/null +++ b/clients/src/main/resources/common/message/ShareFetchRequest.json @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 78, + "type": "request", + "listeners": ["broker"], + "name": "ShareFetchRequest", + "validVersions": "0", + "flexibleVersions": "0+", + // The ShareFetchRequest API is added as part of KIP-932 and is still under + // development. Hence, the API is not exposed by default by brokers unless + // explicitly enabled. + "latestVersionUnstable": true, + "fields": [ + { "name": "GroupId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "entityType": "groupId", + "about": "The group identifier." }, + { "name": "MemberId", "type": "string", "versions": "0+", "nullableVersions": "0+", + "about": "The member ID." }, + { "name": "ShareSessionEpoch", "type": "int32", "versions": "0+", + "about": "The current share session epoch: 0 to open a share session; -1 to close it; otherwise increments for consecutive requests." }, + { "name": "MaxWaitMs", "type": "int32", "versions": "0+", + "about": "The maximum time in milliseconds to wait for the response." }, + { "name": "MinBytes", "type": "int32", "versions": "0+", + "about": "The minimum bytes to accumulate in the response." }, + { "name": "MaxBytes", "type": "int32", "versions": "0+", "default": "0x7fffffff", "ignorable": true, + "about": "The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored." }, + { "name": "Topics", "type": "[]FetchTopic", "versions": "0+", + "about": "The topics to fetch.", "fields": [ + { "name": "TopicId", "type": "uuid", "versions": "0+", "ignorable": true, "about": "The unique topic ID."}, + { "name": "Partitions", "type": "[]FetchPartition", "versions": "0+", + "about": "The partitions to fetch.", "fields": [ + { "name": "PartitionIndex", "type": "int32", "versions": "0+", + "about": "The partition index." }, + { "name": "PartitionMaxBytes", "type": "int32", "versions": "0+", + "about": "The maximum bytes to fetch from this partition. 0 when only acknowledgement with no fetching is required. See KIP-74 for cases where this limit may not be honored." }, + { "name": "AcknowledgementBatches", "type": "[]AcknowledgementBatch", "versions": "0+", + "about": "Record batches to acknowledge.", "fields": [ + { "name": "FirstOffset", "type": "int64", "versions": "0+", + "about": "First offset of batch of records to acknowledge."}, + { "name": "LastOffset", "type": "int64", "versions": "0+", + "about": "Last offset (inclusive) of batch of records to acknowledge."}, + { "name": "AcknowledgeTypes", "type": "[]int8", "versions": "0+", + "about": "Array of acknowledge types - 0:Gap,1:Accept,2:Release,3:Reject."} + ]} + ]} + ]}, + { "name": "ForgottenTopicsData", "type": "[]ForgottenTopic", "versions": "0+", "ignorable": false, + "about": "The partitions to remove from this share session.", "fields": [ + { "name": "TopicId", "type": "uuid", "versions": "0+", "ignorable": true, "about": "The unique topic ID."}, + { "name": "Partitions", "type": "[]int32", "versions": "0+", + "about": "The partitions indexes to forget." } + ]} + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareFetchResponse.json b/clients/src/main/resources/common/message/ShareFetchResponse.json new file mode 100644 index 0000000000000..5338e1208a7bc --- /dev/null +++ b/clients/src/main/resources/common/message/ShareFetchResponse.json @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 78, + "type": "response", + "name": "ShareFetchResponse", + "validVersions": "0", + "flexibleVersions": "0+", + // Supported errors for ErrorCode and AcknowledgeErrorCode: + // - GROUP_AUTHORIZATION_FAILED (version 0+) + // - TOPIC_AUTHORIZATION_FAILED (version 0+) + // - SHARE_SESSION_NOT_FOUND (version 0+) + // - INVALID_SHARE_SESSION_EPOCH (version 0+) + // - UNKNOWN_TOPIC_OR_PARTITION (version 0+) + // - NOT_LEADER_OR_FOLLOWER (version 0+) + // - UNKNOWN_TOPIC_ID (version 0+) + // - INVALID_RECORD_STATE (version 0+) - only for AcknowledgeErrorCode + // - KAFKA_STORAGE_ERROR (version 0+) + // - CORRUPT_MESSAGE (version 0+) + // - INVALID_REQUEST (version 0+) + // - UNKNOWN_SERVER_ERROR (version 0+) + "fields": [ + { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "ignorable": true, + "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, + { "name": "ErrorCode", "type": "int16", "versions": "0+", "ignorable": true, + "about": "The top-level response error code." }, + { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The top-level error message, or null if there was no error." }, + { "name": "Responses", "type": "[]ShareFetchableTopicResponse", "versions": "0+", + "about": "The response topics.", "fields": [ + { "name": "TopicId", "type": "uuid", "versions": "0+", "ignorable": true, "about": "The unique topic ID."}, + { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", + "about": "The topic partitions.", "fields": [ + { "name": "PartitionIndex", "type": "int32", "versions": "0+", + "about": "The partition index." }, + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The fetch error code, or 0 if there was no fetch error." }, + { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The fetch error message, or null if there was no fetch error." }, + { "name": "AcknowledgeErrorCode", "type": "int16", "versions": "0+", + "about": "The acknowledge error code, or 0 if there was no acknowledge error." }, + { "name": "AcknowledgeErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The acknowledge error message, or null if there was no acknowledge error." }, + { "name": "CurrentLeader", "type": "LeaderIdAndEpoch", "versions": "0+", "fields": [ + { "name": "LeaderId", "type": "int32", "versions": "0+", + "about": "The ID of the current leader or -1 if the leader is unknown." }, + { "name": "LeaderEpoch", "type": "int32", "versions": "0+", + "about": "The latest known leader epoch." } + ]}, + { "name": "Records", "type": "records", "versions": "0+", "nullableVersions": "0+", "about": "The record data."}, + { "name": "AcquiredRecords", "type": "[]AcquiredRecords", "versions": "0+", "about": "The acquired records.", "fields": [ + {"name": "FirstOffset", "type": "int64", "versions": "0+", "about": "The earliest offset in this batch of acquired records."}, + {"name": "LastOffset", "type": "int64", "versions": "0+", "about": "The last offset of this batch of acquired records."}, + {"name": "DeliveryCount", "type": "int16", "versions": "0+", "about": "The delivery count of this batch of acquired records."} + ]} + ]} + ]}, + { "name": "NodeEndpoints", "type": "[]NodeEndpoint", "versions": "0+", + "about": "Endpoints for all current leaders enumerated in PartitionData with error NOT_LEADER_OR_FOLLOWER.", "fields": [ + { "name": "NodeId", "type": "int32", "versions": "0+", + "mapKey": true, "entityType": "brokerId", "about": "The ID of the associated node." }, + { "name": "Host", "type": "string", "versions": "0+", + "about": "The node's hostname." }, + { "name": "Port", "type": "int32", "versions": "0+", + "about": "The node's port." }, + { "name": "Rack", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The rack of the node, or null if it has not been assigned to a rack." } + ]} + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json b/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json new file mode 100644 index 0000000000000..c95790c9b198f --- /dev/null +++ b/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 77, + "type": "request", + "listeners": ["broker"], + "name": "ShareGroupDescribeRequest", + "validVersions": "0", + "flexibleVersions": "0+", + // The ShareGroupDescribeRequest API is added as part of KIP-932 and is still under + // development. Hence, the API is not exposed by default by brokers unless + // explicitly enabled. + "latestVersionUnstable": true, + "fields": [ + { "name": "GroupIds", "type": "[]string", "versions": "0+", "entityType": "groupId", + "about": "The ids of the groups to describe" }, + { "name": "IncludeAuthorizedOperations", "type": "bool", "versions": "0+", + "about": "Whether to include authorized operations." } + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareGroupDescribeResponse.json b/clients/src/main/resources/common/message/ShareGroupDescribeResponse.json new file mode 100644 index 0000000000000..c093b788bfc2f --- /dev/null +++ b/clients/src/main/resources/common/message/ShareGroupDescribeResponse.json @@ -0,0 +1,87 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 77, + "type": "response", + "name": "ShareGroupDescribeResponse", + "validVersions": "0", + "flexibleVersions": "0+", + // Supported errors: + // - GROUP_AUTHORIZATION_FAILED (version 0+) + // - NOT_COORDINATOR (version 0+) + // - COORDINATOR_NOT_AVAILABLE (version 0+) + // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) + // - INVALID_REQUEST (version 0+) + // - INVALID_GROUP_ID (version 0+) + // - GROUP_ID_NOT_FOUND (version 0+) + "fields": [ + { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", + "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, + { "name": "Groups", "type": "[]DescribedGroup", "versions": "0+", + "about": "Each described group.", + "fields": [ + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The describe error, or 0 if there was no error." }, + { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The top-level error message, or null if there was no error." }, + { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", + "about": "The group ID string." }, + { "name": "GroupState", "type": "string", "versions": "0+", + "about": "The group state string, or the empty string." }, + { "name": "GroupEpoch", "type": "int32", "versions": "0+", + "about": "The group epoch." }, + { "name": "AssignmentEpoch", "type": "int32", "versions": "0+", + "about": "The assignment epoch." }, + { "name": "AssignorName", "type": "string", "versions": "0+", + "about": "The selected assignor." }, + { "name": "Members", "type": "[]Member", "versions": "0+", + "about": "The members.", + "fields": [ + { "name": "MemberId", "type": "string", "versions": "0+", + "about": "The member ID." }, + { "name": "RackId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The member rack ID." }, + { "name": "MemberEpoch", "type": "int32", "versions": "0+", + "about": "The current member epoch." }, + { "name": "ClientId", "type": "string", "versions": "0+", + "about": "The client ID." }, + { "name": "ClientHost", "type": "string", "versions": "0+", + "about": "The client host." }, + { "name": "SubscribedTopicNames", "type": "[]string", "versions": "0+", "entityType": "topicName", + "about": "The subscribed topic names." }, + { "name": "Assignment", "type": "Assignment", "versions": "0+", + "about": "The current assignment." } + ]}, + { "name": "AuthorizedOperations", "type": "int32", "versions": "0+", "default": "-2147483648", + "about": "32-bit bitfield to represent authorized operations for this group." } + ] + } + ], + "commonStructs": [ + { "name": "TopicPartitions", "versions": "0+", "fields": [ + { "name": "TopicId", "type": "uuid", "versions": "0+", + "about": "The topic ID." }, + { "name": "TopicName", "type": "string", "versions": "0+", "entityType": "topicName", + "about": "The topic name." }, + { "name": "Partitions", "type": "[]int32", "versions": "0+", + "about": "The partitions." } + ]}, + { "name": "Assignment", "versions": "0+", "fields": [ + { "name": "TopicPartitions", "type": "[]TopicPartitions", "versions": "0+", + "about": "The assigned topic-partitions to the member." } + ]} + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json b/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json new file mode 100644 index 0000000000000..7d28c116454d3 --- /dev/null +++ b/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 76, + "type": "request", + "listeners": ["broker"], + "name": "ShareGroupHeartbeatRequest", + "validVersions": "0", + "flexibleVersions": "0+", + // The ShareGroupHeartbeatRequest API is added as part of KIP-932 and is still under + // development. Hence, the API is not exposed by default by brokers unless + // explicitly enabled. + "latestVersionUnstable": true, + "fields": [ + { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", + "about": "The group identifier." }, + { "name": "MemberId", "type": "string", "versions": "0+", + "about": "The member ID generated by the coordinator. The member ID must be kept during the entire lifetime of the member." }, + { "name": "MemberEpoch", "type": "int32", "versions": "0+", + "about": "The current member epoch; 0 to join the group; -1 to leave the group." }, + { "name": "RackId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "null if not provided or if it didn't change since the last heartbeat; the rack ID of consumer otherwise." }, + { "name": "SubscribedTopicNames", "type": "[]string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "null if it didn't change since the last heartbeat; the subscribed topic names otherwise." } + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json b/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json new file mode 100644 index 0000000000000..e692839f29bf9 --- /dev/null +++ b/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 76, + "type": "response", + "name": "ShareGroupHeartbeatResponse", + "validVersions": "0", + "flexibleVersions": "0+", + // Supported errors: + // - GROUP_AUTHORIZATION_FAILED (version 0+) + // - NOT_COORDINATOR (version 0+) + // - COORDINATOR_NOT_AVAILABLE (version 0+) + // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) + // - INVALID_REQUEST (version 0+) + // - UNKNOWN_MEMBER_ID (version 0+) + // - GROUP_MAX_SIZE_REACHED (version 0+) + "fields": [ + { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", + "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The top-level error code, or 0 if there was no error" }, + { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The top-level error message, or null if there was no error." }, + { "name": "MemberId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The member ID generated by the coordinator. Only provided when the member joins with MemberEpoch == 0." }, + { "name": "MemberEpoch", "type": "int32", "versions": "0+", + "about": "The member epoch." }, + { "name": "HeartbeatIntervalMs", "type": "int32", "versions": "0+", + "about": "The heartbeat interval in milliseconds." }, + { "name": "Assignment", "type": "Assignment", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "null if not provided; the assignment otherwise.", "fields": [ + { "name": "TopicPartitions", "type": "[]TopicPartitions", "versions": "0+", + "about": "The partitions assigned to the member." } + ]} + ], + "commonStructs": [ + { "name": "TopicPartitions", "versions": "0+", "fields": [ + { "name": "TopicId", "type": "uuid", "versions": "0+", + "about": "The topic ID." }, + { "name": "Partitions", "type": "[]int32", "versions": "0+", + "about": "The partitions." } + ]} + ] +} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java b/clients/src/test/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java similarity index 93% rename from clients/src/main/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java rename to clients/src/test/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java index ae9e9a83a0c2c..8985d00410c37 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java +++ b/clients/src/test/java/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java @@ -22,9 +22,7 @@ public class DefaultChannelMetadataRegistry implements ChannelMetadataRegistry { @Override public void registerCipherInformation(final CipherInformation cipherInformation) { - if (this.cipherInformation != null) { - this.cipherInformation = cipherInformation; - } + this.cipherInformation = cipherInformation; } @Override diff --git a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java index 512a7cea76681..82487bd418429 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java @@ -20,6 +20,7 @@ import org.apache.kafka.common.ElectionType; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.Node; +import org.apache.kafka.common.ShareGroupState; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.Uuid; @@ -210,6 +211,14 @@ import org.apache.kafka.common.message.SaslAuthenticateResponseData; import org.apache.kafka.common.message.SaslHandshakeRequestData; import org.apache.kafka.common.message.SaslHandshakeResponseData; +import org.apache.kafka.common.message.ShareAcknowledgeRequestData; +import org.apache.kafka.common.message.ShareAcknowledgeResponseData; +import org.apache.kafka.common.message.ShareFetchRequestData; +import org.apache.kafka.common.message.ShareFetchResponseData; +import org.apache.kafka.common.message.ShareGroupDescribeRequestData; +import org.apache.kafka.common.message.ShareGroupDescribeResponseData; +import org.apache.kafka.common.message.ShareGroupHeartbeatRequestData; +import org.apache.kafka.common.message.ShareGroupHeartbeatResponseData; import org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaPartitionState; import org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaTopicState; import org.apache.kafka.common.message.StopReplicaResponseData; @@ -1001,6 +1010,10 @@ public void testErrorCountsIncludesNone() { assertEquals(1, createTxnOffsetCommitResponse().errorCounts().get(Errors.NONE)); assertEquals(1, createUpdateMetadataResponse().errorCounts().get(Errors.NONE)); assertEquals(1, createWriteTxnMarkersResponse().errorCounts().get(Errors.NONE)); + assertEquals(1, createShareGroupHeartbeatResponse().errorCounts().get(Errors.NONE)); + assertEquals(1, createShareGroupDescribeResponse().errorCounts().get(Errors.NONE)); + assertEquals(2, createShareFetchResponse().errorCounts().get(Errors.NONE)); + assertEquals(2, createShareAcknowledgeResponse().errorCounts().get(Errors.NONE)); } private AbstractRequest getRequest(ApiKeys apikey, short version) { @@ -1081,6 +1094,10 @@ private AbstractRequest getRequest(ApiKeys apikey, short version) { case ASSIGN_REPLICAS_TO_DIRS: return createAssignReplicasToDirsRequest(version); case LIST_CLIENT_METRICS_RESOURCES: return createListClientMetricsResourcesRequest(version); case DESCRIBE_TOPIC_PARTITIONS: return createDescribeTopicPartitionsRequest(version); + case SHARE_GROUP_HEARTBEAT: return createShareGroupHeartbeatRequest(version); + case SHARE_GROUP_DESCRIBE: return createShareGroupDescribeRequest(version); + case SHARE_FETCH: return createShareFetchRequest(version); + case SHARE_ACKNOWLEDGE: return createShareAcknowledgeRequest(version); default: throw new IllegalArgumentException("Unknown API key " + apikey); } } @@ -1163,6 +1180,10 @@ private AbstractResponse getResponse(ApiKeys apikey, short version) { case ASSIGN_REPLICAS_TO_DIRS: return createAssignReplicasToDirsResponse(); case LIST_CLIENT_METRICS_RESOURCES: return createListClientMetricsResourcesResponse(); case DESCRIBE_TOPIC_PARTITIONS: return createDescribeTopicPartitionsResponse(); + case SHARE_GROUP_HEARTBEAT: return createShareGroupHeartbeatResponse(); + case SHARE_GROUP_DESCRIBE: return createShareGroupDescribeResponse(); + case SHARE_FETCH: return createShareFetchResponse(); + case SHARE_ACKNOWLEDGE: return createShareAcknowledgeResponse(); default: throw new IllegalArgumentException("Unknown API key " + apikey); } } @@ -1330,6 +1351,114 @@ private ConsumerGroupHeartbeatResponse createConsumerGroupHeartbeatResponse() { return new ConsumerGroupHeartbeatResponse(data); } + private ShareGroupHeartbeatRequest createShareGroupHeartbeatRequest(short version) { + ShareGroupHeartbeatRequestData data = new ShareGroupHeartbeatRequestData() + .setGroupId("group") + .setMemberId("memberid") + .setMemberEpoch(10) + .setRackId("rackid") + .setSubscribedTopicNames(Arrays.asList("foo", "bar")); + return new ShareGroupHeartbeatRequest.Builder(data).build(version); + } + + private ShareGroupHeartbeatResponse createShareGroupHeartbeatResponse() { + ShareGroupHeartbeatResponseData data = new ShareGroupHeartbeatResponseData() + .setErrorCode(Errors.NONE.code()) + .setThrottleTimeMs(1000) + .setMemberId("memberid") + .setMemberEpoch(11) + .setAssignment(new ShareGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(Arrays.asList( + new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(Uuid.randomUuid()) + .setPartitions(Arrays.asList(0, 1, 2)), + new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(Uuid.randomUuid()) + .setPartitions(Arrays.asList(3, 4, 5)) + )) + ); + return new ShareGroupHeartbeatResponse(data); + } + + private ShareGroupDescribeRequest createShareGroupDescribeRequest(short version) { + ShareGroupDescribeRequestData data = new ShareGroupDescribeRequestData() + .setGroupIds(Collections.singletonList("group")) + .setIncludeAuthorizedOperations(false); + return new ShareGroupDescribeRequest.Builder(data).build(version); + } + + private ShareGroupDescribeResponse createShareGroupDescribeResponse() { + ShareGroupDescribeResponseData data = new ShareGroupDescribeResponseData() + .setGroups(Collections.singletonList( + new ShareGroupDescribeResponseData.DescribedGroup() + .setGroupId("group") + .setErrorCode((short) 0) + .setErrorMessage(Errors.forCode((short) 0).message()) + .setGroupState(ShareGroupState.EMPTY.toString()) + .setMembers(new ArrayList<>(0)) + )) + .setThrottleTimeMs(1000); + return new ShareGroupDescribeResponse(data); + } + + private ShareFetchRequest createShareFetchRequest(short version) { + ShareFetchRequestData data = new ShareFetchRequestData() + .setGroupId("group") + .setMemberId(Uuid.randomUuid().toString()) + .setTopics(singletonList(new ShareFetchRequestData.FetchTopic() + .setTopicId(Uuid.randomUuid()) + .setPartitions(singletonList(new ShareFetchRequestData.FetchPartition() + .setPartitionIndex(0))))); + return new ShareFetchRequest.Builder(data).build(version); + } + + private ShareFetchResponse createShareFetchResponse() { + ShareFetchResponseData data = new ShareFetchResponseData(); + MemoryRecords records = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("blah".getBytes())); + ShareFetchResponseData.PartitionData partition = new ShareFetchResponseData.PartitionData() + .setPartitionIndex(0) + .setErrorCode(Errors.NONE.code()) + .setRecords(records) + .setAcquiredRecords(singletonList(new ShareFetchResponseData.AcquiredRecords() + .setFirstOffset(0) + .setLastOffset(0) + .setDeliveryCount((short) 1))); + ShareFetchResponseData.ShareFetchableTopicResponse response = new ShareFetchResponseData.ShareFetchableTopicResponse() + .setTopicId(Uuid.randomUuid()) + .setPartitions(singletonList(partition)); + + data.setResponses(singletonList(response)); + data.setThrottleTimeMs(345); + data.setErrorCode(Errors.NONE.code()); + return new ShareFetchResponse(data); + } + + private ShareAcknowledgeRequest createShareAcknowledgeRequest(short version) { + ShareAcknowledgeRequestData data = new ShareAcknowledgeRequestData() + .setMemberId(Uuid.randomUuid().toString()) + .setTopics(singletonList(new ShareAcknowledgeRequestData.AcknowledgeTopic() + .setTopicId(Uuid.randomUuid()) + .setPartitions(singletonList(new ShareAcknowledgeRequestData.AcknowledgePartition() + .setPartitionIndex(0) + .setAcknowledgementBatches(singletonList(new ShareAcknowledgeRequestData.AcknowledgementBatch() + .setFirstOffset(0) + .setLastOffset(0) + .setAcknowledgeTypes(Collections.singletonList((byte) 0)))))))); + return new ShareAcknowledgeRequest.Builder(data).build(version); + } + + private ShareAcknowledgeResponse createShareAcknowledgeResponse() { + ShareAcknowledgeResponseData data = new ShareAcknowledgeResponseData(); + data.setResponses(singletonList(new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse() + .setTopicId(Uuid.randomUuid()) + .setPartitions(singletonList(new ShareAcknowledgeResponseData.PartitionData() + .setPartitionIndex(0) + .setErrorCode(Errors.NONE.code()))))); + data.setThrottleTimeMs(345); + data.setErrorCode(Errors.NONE.code()); + return new ShareAcknowledgeResponse(data); + } + private ControllerRegistrationRequest createControllerRegistrationRequest(short version) { ControllerRegistrationRequestData data = new ControllerRegistrationRequestData(). setControllerId(3). diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java index 9a47a0e7530bb..37673ee05577d 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java @@ -190,15 +190,15 @@ private HttpResponse httpRequest(HttpClient client, String url, String me "Unexpected status code when handling forwarded request: " + responseCode); } } catch (IOException | InterruptedException | TimeoutException | ExecutionException e) { - log.error("IO error forwarding REST request: ", e); + log.error("IO error forwarding REST request to {} :", url, e); throw new ConnectRestException(Response.Status.INTERNAL_SERVER_ERROR, "IO Error trying to forward REST request: " + e.getMessage(), e); } catch (ConnectRestException e) { // catching any explicitly thrown ConnectRestException-s to preserve its status code // and to avoid getting it overridden by the more generic catch (Throwable) clause down below - log.error("Error forwarding REST request", e); + log.error("Error forwarding REST request to {} :", url, e); throw e; } catch (Throwable t) { - log.error("Error forwarding REST request", t); + log.error("Error forwarding REST request to {} :", url, t); throw new ConnectRestException(Response.Status.INTERNAL_SERVER_ERROR, "Error trying to forward REST request: " + t.getMessage(), t); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java index da8e235e42411..9d338936dbbf0 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java @@ -47,7 +47,7 @@ public Map config() { return config; } - @JsonProperty + @JsonProperty("initial_state") public InitialState initialState() { return initialState; } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java index 6ebac341032a3..3ec037734f116 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java @@ -61,6 +61,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -159,7 +160,7 @@ public class KafkaConfigBackingStoreMockitoTest { new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(1)), new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(2)) ); - + private static final Struct TARGET_STATE_STARTED = new Struct(KafkaConfigBackingStore.TARGET_STATE_V0).put("state", "STARTED"); private static final Struct TARGET_STATE_PAUSED = new Struct(KafkaConfigBackingStore.TARGET_STATE_V1) .put("state", "PAUSED") .put("state.v2", "PAUSED"); @@ -1184,6 +1185,147 @@ public void testRestoreRestartRequestInconsistentState() { verify(configLog).stop(); } + @Test + public void testPutTaskConfigsZeroTasks() throws Exception { + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + configStorage.start(); + verify(configLog).start(); + + // Records to be read by consumer as it reads to the end of the log + doAnswer(expectReadToEnd(new LinkedHashMap<>())). + doAnswer(expectReadToEnd(Collections.singletonMap(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) + .when(configLog).readToEnd(); + + expectConvertWriteRead( + COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(0), + "tasks", 0); // We have 0 tasks + + // Bootstrap as if we had already added the connector, but no tasks had been added yet + addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); + + + // Null before writing + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(-1, configState.offset()); + + // Writing task configs should block until all the writes have been performed and the root record update + // has completed + List> taskConfigs = Collections.emptyList(); + configStorage.putTaskConfigs("connector1", taskConfigs); + + // Validate root config by listing all connectors and tasks + configState = configStorage.snapshot(); + assertEquals(1, configState.offset()); + String connectorName = CONNECTOR_IDS.get(0); + assertEquals(Collections.singletonList(connectorName), new ArrayList<>(configState.connectors())); + assertEquals(Collections.emptyList(), configState.tasks(connectorName)); + assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); + + // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks + verify(configUpdateListener).onTaskConfigUpdate(Collections.emptyList()); + + configStorage.stop(); + verify(configLog).stop(); + } + + @Test + public void testBackgroundUpdateTargetState() throws Exception { + // verify that we handle target state changes correctly when they come up through the log + List> existingRecords = Arrays.asList( + new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), + CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); + LinkedHashMap deserializedOnStartup = new LinkedHashMap<>(); + deserializedOnStartup.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); + deserializedOnStartup.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); + deserializedOnStartup.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); + deserializedOnStartup.put(CONFIGS_SERIALIZED.get(3), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); + logOffset = 5; + + expectStart(existingRecords, deserializedOnStartup); + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + configStorage.start(); + verify(configLog).start(); + + // Should see a single connector with initial state started + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(Collections.singleton(CONNECTOR_IDS.get(0)), configStorage.connectorTargetStates.keySet()); + assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); + + LinkedHashMap serializedAfterStartup = new LinkedHashMap<>(); + serializedAfterStartup.put(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); + serializedAfterStartup.put(TARGET_STATE_KEYS.get(1), CONFIGS_SERIALIZED.get(1)); + doAnswer(expectReadToEnd(serializedAfterStartup)).when(configLog).readToEnd(); + + Map deserializedAfterStartup = new HashMap<>(); + deserializedAfterStartup.put(TARGET_STATE_KEYS.get(0), TARGET_STATE_PAUSED); + deserializedAfterStartup.put(TARGET_STATE_KEYS.get(1), TARGET_STATE_STOPPED); + expectRead(serializedAfterStartup, deserializedAfterStartup); + + // Should see two connectors now, one paused and one stopped + configStorage.refresh(0, TimeUnit.SECONDS); + verify(configUpdateListener).onConnectorTargetStateChange(CONNECTOR_IDS.get(0)); + configState = configStorage.snapshot(); + + assertEquals(new HashSet<>(CONNECTOR_IDS), configStorage.connectorTargetStates.keySet()); + assertEquals(TargetState.PAUSED, configState.targetState(CONNECTOR_IDS.get(0))); + assertEquals(TargetState.STOPPED, configState.targetState(CONNECTOR_IDS.get(1))); + + configStorage.stop(); + verify(configStorage).stop(); + } + + @Test + public void testSameTargetState() throws Exception { + // verify that we handle target state changes correctly when they come up through the log + List> existingRecords = Arrays.asList( + new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), + CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), + CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); + LinkedHashMap deserialized = new LinkedHashMap<>(); + deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(3), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); + logOffset = 5; + + expectStart(existingRecords, deserialized); + + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + configStorage.start(); + verify(configLog).start(); + + ClusterConfigState configState = configStorage.snapshot(); + expectRead(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0), TARGET_STATE_STARTED); + // Should see a single connector with initial state paused + assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); + + expectRead(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0), TARGET_STATE_STARTED); + // on resume update listener shouldn't be called + verify(configUpdateListener, never()).onConnectorTargetStateChange(anyString()); + + configStorage.stop(); + verify(configStorage).stop(); + } + + @Test public void testPutLogLevel() throws Exception { final String logger1 = "org.apache.zookeeper"; @@ -1293,6 +1435,12 @@ private void expectRead(LinkedHashMap serializedValues, } } + private void expectRead(final String key, final byte[] serializedValue, Struct deserializedValue) { + LinkedHashMap serializedData = new LinkedHashMap<>(); + serializedData.put(key, serializedValue); + expectRead(serializedData, Collections.singletonMap(key, deserializedValue)); + } + // This map needs to maintain ordering private Answer> expectReadToEnd(final Map serializedConfigs) { return invocation -> { @@ -1315,4 +1463,11 @@ private Map structToMap(Struct struct) { for (Field field : struct.schema().fields()) result.put(field.name(), struct.get(field)); return result; } + + private void addConnector(String connectorName, Map connectorConfig, List> taskConfigs) { + for (int i = 0; i < taskConfigs.size(); i++) + configStorage.taskConfigs.put(new ConnectorTaskId(connectorName, i), taskConfigs.get(i)); + configStorage.connectorConfigs.put(connectorName, connectorConfig); + configStorage.connectorTaskCounts.put(connectorName, taskConfigs.size()); + } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java index ae5f82cd3eeb2..2e7b388413c55 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java @@ -28,7 +28,6 @@ import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.SchemaAndValue; import org.apache.kafka.connect.data.Struct; -import org.apache.kafka.connect.runtime.TargetState; import org.apache.kafka.connect.runtime.WorkerConfig; import org.apache.kafka.connect.runtime.distributed.DistributedConfig; import org.apache.kafka.connect.util.Callback; @@ -52,13 +51,11 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import static org.apache.kafka.connect.storage.KafkaConfigBackingStore.INCLUDE_TASKS_FIELD_NAME; @@ -430,167 +427,6 @@ public void testPutTaskConfigsStartsOnlyReconfiguredTasks() throws Exception { PowerMock.verifyAll(); } - @Test - public void testPutTaskConfigsZeroTasks() throws Exception { - expectConfigure(); - expectStart(Collections.emptyList(), Collections.emptyMap()); - - // Task configs should read to end, write to the log, read to end, write root. - expectReadToEnd(new LinkedHashMap<>()); - expectConvertWriteRead( - COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(0), - "tasks", 0); // We have 0 tasks - // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks - configUpdateListener.onTaskConfigUpdate(Collections.emptyList()); - EasyMock.expectLastCall(); - - // Records to be read by consumer as it reads to the end of the log - LinkedHashMap serializedConfigs = new LinkedHashMap<>(); - serializedConfigs.put(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); - expectReadToEnd(serializedConfigs); - - expectPartitionCount(1); - expectStop(); - - PowerMock.replayAll(); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - configStorage.start(); - - // Bootstrap as if we had already added the connector, but no tasks had been added yet - whiteboxAddConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); - - // Null before writing - ClusterConfigState configState = configStorage.snapshot(); - assertEquals(-1, configState.offset()); - - // Writing task configs should block until all the writes have been performed and the root record update - // has completed - List> taskConfigs = Collections.emptyList(); - configStorage.putTaskConfigs("connector1", taskConfigs); - - // Validate root config by listing all connectors and tasks - configState = configStorage.snapshot(); - assertEquals(1, configState.offset()); - String connectorName = CONNECTOR_IDS.get(0); - assertEquals(Collections.singletonList(connectorName), new ArrayList<>(configState.connectors())); - assertEquals(Collections.emptyList(), configState.tasks(connectorName)); - assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); - - configStorage.stop(); - - PowerMock.verifyAll(); - } - - @Test - public void testBackgroundUpdateTargetState() throws Exception { - // verify that we handle target state changes correctly when they come up through the log - - expectConfigure(); - List> existingRecords = Arrays.asList( - new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), - CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); - LinkedHashMap deserializedOnStartup = new LinkedHashMap<>(); - deserializedOnStartup.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); - deserializedOnStartup.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); - deserializedOnStartup.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); - deserializedOnStartup.put(CONFIGS_SERIALIZED.get(3), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); - logOffset = 5; - - expectStart(existingRecords, deserializedOnStartup); - - LinkedHashMap serializedAfterStartup = new LinkedHashMap<>(); - serializedAfterStartup.put(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); - serializedAfterStartup.put(TARGET_STATE_KEYS.get(1), CONFIGS_SERIALIZED.get(1)); - - Map deserializedAfterStartup = new HashMap<>(); - deserializedAfterStartup.put(TARGET_STATE_KEYS.get(0), TARGET_STATE_PAUSED); - deserializedAfterStartup.put(TARGET_STATE_KEYS.get(1), TARGET_STATE_STOPPED); - - expectRead(serializedAfterStartup, deserializedAfterStartup); - - configUpdateListener.onConnectorTargetStateChange(CONNECTOR_IDS.get(0)); - EasyMock.expectLastCall(); - - expectPartitionCount(1); - expectStop(); - - PowerMock.replayAll(); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - configStorage.start(); - - // Should see a single connector with initial state started - ClusterConfigState configState = configStorage.snapshot(); - assertEquals(Collections.singleton(CONNECTOR_IDS.get(0)), configStorage.connectorTargetStates.keySet()); - assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); - - // Should see two connectors now, one paused and one stopped - configStorage.refresh(0, TimeUnit.SECONDS); - configState = configStorage.snapshot(); - assertEquals(new HashSet<>(CONNECTOR_IDS), configStorage.connectorTargetStates.keySet()); - assertEquals(TargetState.PAUSED, configState.targetState(CONNECTOR_IDS.get(0))); - assertEquals(TargetState.STOPPED, configState.targetState(CONNECTOR_IDS.get(1))); - - configStorage.stop(); - - PowerMock.verifyAll(); - } - - @Test - public void testSameTargetState() throws Exception { - // verify that we handle target state changes correctly when they come up through the log - - expectConfigure(); - List> existingRecords = Arrays.asList( - new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1), - CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0), - CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); - LinkedHashMap deserialized = new LinkedHashMap<>(); - deserialized.put(CONFIGS_SERIALIZED.get(0), CONNECTOR_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(2), TASK_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(3), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); - logOffset = 5; - - expectStart(existingRecords, deserialized); - - // on resume update listener shouldn't be called - configUpdateListener.onConnectorTargetStateChange(EasyMock.anyString()); - EasyMock.expectLastCall().andStubThrow(new AssertionError("unexpected call to onConnectorTargetStateChange")); - - expectRead(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0), TARGET_STATE_STARTED); - - expectPartitionCount(1); - expectStop(); - - PowerMock.replayAll(); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - configStorage.start(); - - // Should see a single connector with initial state paused - ClusterConfigState configState = configStorage.snapshot(); - assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); - - configStorage.refresh(0, TimeUnit.SECONDS); - - configStorage.stop(); - - PowerMock.verifyAll(); - } - private void expectConfigure() throws Exception { PowerMock.expectPrivate(configStorage, "createKafkaBasedLog", EasyMock.capture(capturedTopic), EasyMock.capture(capturedProducerProps), @@ -636,12 +472,6 @@ private void expectRead(LinkedHashMap serializedValues, } } - private void expectRead(final String key, final byte[] serializedValue, Struct deserializedValue) { - LinkedHashMap serializedData = new LinkedHashMap<>(); - serializedData.put(key, serializedValue); - expectRead(serializedData, Collections.singletonMap(key, deserializedValue)); - } - // Expect a conversion & write to the underlying log, followed by a subsequent read when the data is consumed back // from the log. Validate the data that is captured when the conversion is performed matches the specified data // (by checking a single field's value) diff --git a/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java b/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java index 6ffd741f4fc64..1d422461678f5 100644 --- a/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java +++ b/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java @@ -179,7 +179,7 @@ public KafkaApis build() { if (metrics == null) throw new RuntimeException("You must set metrics"); if (quotas == null) throw new RuntimeException("You must set quotas"); if (fetchManager == null) throw new RuntimeException("You must set fetchManager"); - if (brokerTopicStats == null) brokerTopicStats = new BrokerTopicStats(config.isRemoteLogStorageSystemEnabled()); + if (brokerTopicStats == null) brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig().enableRemoteStorageSystem()); if (apiVersionManager == null) throw new RuntimeException("You must set apiVersionManager"); return new KafkaApis(requestChannel, diff --git a/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java b/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java index 82aa75909abba..5e8cf2dcdc64c 100644 --- a/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java +++ b/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java @@ -185,7 +185,7 @@ public ReplicaManager build() { if (metadataCache == null) throw new RuntimeException("You must set metadataCache"); if (logDirFailureChannel == null) throw new RuntimeException("You must set logDirFailureChannel"); if (alterPartitionManager == null) throw new RuntimeException("You must set alterIsrManager"); - if (brokerTopicStats == null) brokerTopicStats = new BrokerTopicStats(config.isRemoteLogStorageSystemEnabled()); + if (brokerTopicStats == null) brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig().enableRemoteStorageSystem()); // Initialize metrics in the end just before passing it to ReplicaManager to ensure ReplicaManager closes the // metrics correctly. There might be a resource leak if it is initialized and an exception occurs between // its initialization and creation of ReplicaManager. diff --git a/core/src/main/scala/kafka/log/LogCleaner.scala b/core/src/main/scala/kafka/log/LogCleaner.scala index 0b166c62535e9..1265e979373cd 100644 --- a/core/src/main/scala/kafka/log/LogCleaner.scala +++ b/core/src/main/scala/kafka/log/LogCleaner.scala @@ -161,11 +161,18 @@ class LogCleaner(initialConfig: CleanerConfig, /** * Stop the background cleaner threads */ - def shutdown(): Unit = { + private[this] def shutdownCleaners(): Unit = { info("Shutting down the log cleaner.") + cleaners.foreach(_.shutdown()) + cleaners.clear() + } + + /** + * Stop the background cleaner threads + */ + def shutdown(): Unit = { try { - cleaners.foreach(_.shutdown()) - cleaners.clear() + shutdownCleaners() } finally { removeMetrics() } @@ -220,8 +227,8 @@ class LogCleaner(initialConfig: CleanerConfig, info(s"Updating logCleanerIoMaxBytesPerSecond: $maxIoBytesPerSecond") throttler.updateDesiredRatePerSec(maxIoBytesPerSecond) } - - shutdown() + // call shutdownCleaners() instead of shutdown to avoid unnecessary deletion of metrics + shutdownCleaners() startup() } diff --git a/core/src/main/scala/kafka/log/LogManager.scala b/core/src/main/scala/kafka/log/LogManager.scala index 3bc6533117cba..d7599e569ab25 100755 --- a/core/src/main/scala/kafka/log/LogManager.scala +++ b/core/src/main/scala/kafka/log/LogManager.scala @@ -1562,7 +1562,7 @@ object LogManager { keepPartitionMetadataFile: Boolean): LogManager = { val defaultProps = config.extractLogConfigMap - LogConfig.validateBrokerLogConfigValues(defaultProps, config.isRemoteLogStorageSystemEnabled) + LogConfig.validateBrokerLogConfigValues(defaultProps, config.remoteLogManagerConfig.enableRemoteStorageSystem()) val defaultLogConfig = new LogConfig(defaultProps) val cleanerConfig = LogCleaner.cleanerConfig(config) diff --git a/core/src/main/scala/kafka/network/RequestConvertToJson.scala b/core/src/main/scala/kafka/network/RequestConvertToJson.scala index 54986f52c85a3..0900b94ef9f4f 100644 --- a/core/src/main/scala/kafka/network/RequestConvertToJson.scala +++ b/core/src/main/scala/kafka/network/RequestConvertToJson.scala @@ -95,6 +95,10 @@ object RequestConvertToJson { case req: RenewDelegationTokenRequest => RenewDelegationTokenRequestDataJsonConverter.write(req.data, request.version) case req: SaslAuthenticateRequest => SaslAuthenticateRequestDataJsonConverter.write(req.data, request.version) case req: SaslHandshakeRequest => SaslHandshakeRequestDataJsonConverter.write(req.data, request.version) + case req: ShareAcknowledgeRequest => ShareAcknowledgeRequestDataJsonConverter.write(req.data, request.version) + case req: ShareFetchRequest => ShareFetchRequestDataJsonConverter.write(req.data, request.version) + case req: ShareGroupDescribeRequest => ShareGroupDescribeRequestDataJsonConverter.write(req.data, request.version) + case req: ShareGroupHeartbeatRequest => ShareGroupHeartbeatRequestDataJsonConverter.write(req.data, request.version) case req: StopReplicaRequest => StopReplicaRequestDataJsonConverter.write(req.data, request.version) case req: SyncGroupRequest => SyncGroupRequestDataJsonConverter.write(req.data, request.version) case req: TxnOffsetCommitRequest => TxnOffsetCommitRequestDataJsonConverter.write(req.data, request.version) @@ -178,6 +182,10 @@ object RequestConvertToJson { case res: RenewDelegationTokenResponse => RenewDelegationTokenResponseDataJsonConverter.write(res.data, version) case res: SaslAuthenticateResponse => SaslAuthenticateResponseDataJsonConverter.write(res.data, version) case res: SaslHandshakeResponse => SaslHandshakeResponseDataJsonConverter.write(res.data, version) + case res: ShareAcknowledgeResponse => ShareAcknowledgeResponseDataJsonConverter.write(res.data, version) + case res: ShareFetchResponse => ShareFetchResponseDataJsonConverter.write(res.data, version) + case res: ShareGroupDescribeResponse => ShareGroupDescribeResponseDataJsonConverter.write(res.data, version) + case res: ShareGroupHeartbeatResponse => ShareGroupHeartbeatResponseDataJsonConverter.write(res.data, version) case res: StopReplicaResponse => StopReplicaResponseDataJsonConverter.write(res.data, version) case res: SyncGroupResponse => SyncGroupResponseDataJsonConverter.write(res.data, version) case res: TxnOffsetCommitResponse => TxnOffsetCommitResponseDataJsonConverter.write(res.data, version) diff --git a/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala b/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala index 5f3fdc81887ef..51bc16fb09d17 100644 --- a/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala +++ b/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala @@ -264,11 +264,11 @@ class BrokerLifecycleManager( new OfflineDirBrokerFailureEvent(directory)) } - def handleKraftJBODMetadataVersionUpdate(): Unit = { - eventQueue.append(new KraftJBODMetadataVersionUpdateEvent()) + def resendBrokerRegistrationUnlessZkMode(): Unit = { + eventQueue.append(new ResendBrokerRegistrationUnlessZkModeEvent()) } - private class KraftJBODMetadataVersionUpdateEvent extends EventQueue.Event { + private class ResendBrokerRegistrationUnlessZkModeEvent extends EventQueue.Event { override def run(): Unit = { if (!isZkBroker) { registered = false diff --git a/core/src/main/scala/kafka/server/BrokerServer.scala b/core/src/main/scala/kafka/server/BrokerServer.scala index 112a03c50a9a4..5e299fc0e02a8 100644 --- a/core/src/main/scala/kafka/server/BrokerServer.scala +++ b/core/src/main/scala/kafka/server/BrokerServer.scala @@ -37,7 +37,7 @@ import org.apache.kafka.common.utils.{LogContext, Time} import org.apache.kafka.common.{ClusterResource, TopicPartition, Uuid} import org.apache.kafka.coordinator.group.metrics.{GroupCoordinatorMetrics, GroupCoordinatorRuntimeMetrics} import org.apache.kafka.coordinator.group.{CoordinatorRecord, GroupCoordinator, GroupCoordinatorConfig, GroupCoordinatorService, CoordinatorRecordSerde} -import org.apache.kafka.image.publisher.MetadataPublisher +import org.apache.kafka.image.publisher.{BrokerRegistrationTracker, MetadataPublisher} import org.apache.kafka.metadata.{BrokerState, ListenerInfo, VersionRange} import org.apache.kafka.security.CredentialProvider import org.apache.kafka.server.{AssignmentsManager, ClientMetricsManager, NodeToControllerChannelManager} @@ -139,6 +139,8 @@ class BrokerServer( var brokerMetadataPublisher: BrokerMetadataPublisher = _ + var brokerRegistrationTracker: BrokerRegistrationTracker = _ + val brokerFeatures: BrokerFeatures = BrokerFeatures.createDefault(config.unstableFeatureVersionsEnabled) def kafkaYammerMetrics: KafkaYammerMetrics = KafkaYammerMetrics.INSTANCE @@ -184,7 +186,7 @@ class BrokerServer( kafkaScheduler.startup() /* register broker metrics */ - brokerTopicStats = new BrokerTopicStats(config.isRemoteLogStorageSystemEnabled) + brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig.enableRemoteStorageSystem()) quotaManagers = QuotaFactory.instantiate(config, metrics, time, s"broker-${config.nodeId}-") @@ -482,6 +484,10 @@ class BrokerServer( lifecycleManager ) metadataPublishers.add(brokerMetadataPublisher) + brokerRegistrationTracker = new BrokerRegistrationTracker(config.brokerId, + logManager.directoryIdsSet.toList.asJava, + () => lifecycleManager.resendBrokerRegistrationUnlessZkMode()) + metadataPublishers.add(brokerRegistrationTracker) // Register parts of the broker that can be reconfigured via dynamic configs. This needs to // be done before we publish the dynamic configs, so that we don't miss anything. diff --git a/core/src/main/scala/kafka/server/ConfigHandler.scala b/core/src/main/scala/kafka/server/ConfigHandler.scala index 1d5702e76e49d..ed9260b21947b 100644 --- a/core/src/main/scala/kafka/server/ConfigHandler.scala +++ b/core/src/main/scala/kafka/server/ConfigHandler.scala @@ -70,7 +70,7 @@ class TopicConfigHandler(private val replicaManager: ReplicaManager, val logs = logManager.logsByTopic(topic) val wasRemoteLogEnabledBeforeUpdate = logs.exists(_.remoteLogEnabled()) - logManager.updateTopicConfig(topic, props, kafkaConfig.isRemoteLogStorageSystemEnabled) + logManager.updateTopicConfig(topic, props, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) maybeBootstrapRemoteLogComponents(topic, logs, wasRemoteLogEnabledBeforeUpdate) } diff --git a/core/src/main/scala/kafka/server/ControllerConfigurationValidator.scala b/core/src/main/scala/kafka/server/ControllerConfigurationValidator.scala index 15eb1eff04aa3..f957b65ddd105 100644 --- a/core/src/main/scala/kafka/server/ControllerConfigurationValidator.scala +++ b/core/src/main/scala/kafka/server/ControllerConfigurationValidator.scala @@ -107,7 +107,8 @@ class ControllerConfigurationValidator(kafkaConfig: KafkaConfig) extends Configu throw new InvalidConfigurationException("Null value not supported for topic configs: " + nullTopicConfigs.mkString(",")) } - LogConfig.validate(properties, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) + LogConfig.validate(properties, kafkaConfig.extractLogConfigMap, + kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) case BROKER => validateBrokerName(resource.name()) case CLIENT_METRICS => val properties = new Properties() diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index 822310838298c..94a7b349af927 100755 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -17,7 +17,7 @@ package kafka.server -import java.{lang, util} +import java.util import java.util.concurrent.TimeUnit import java.util.{Collections, Properties} import kafka.cluster.EndPoint @@ -1205,8 +1205,6 @@ class KafkaConfig private(doLog: Boolean, val props: java.util.Map[_, _], dynami def usesTopicId: Boolean = usesSelfManagedQuorum || interBrokerProtocolVersion.isTopicIdsSupported - - val isRemoteLogStorageSystemEnabled: lang.Boolean = getBoolean(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP) def logLocalRetentionBytes: java.lang.Long = getLong(RemoteLogManagerConfig.LOG_LOCAL_RETENTION_BYTES_PROP) def logLocalRetentionMs: java.lang.Long = getLong(RemoteLogManagerConfig.LOG_LOCAL_RETENTION_MS_PROP) diff --git a/core/src/main/scala/kafka/server/KafkaServer.scala b/core/src/main/scala/kafka/server/KafkaServer.scala index 933a5df536a5f..738adab0fb0c1 100755 --- a/core/src/main/scala/kafka/server/KafkaServer.scala +++ b/core/src/main/scala/kafka/server/KafkaServer.scala @@ -276,7 +276,7 @@ class KafkaServer( createCurrentControllerIdMetric() /* register broker metrics */ - _brokerTopicStats = new BrokerTopicStats(config.isRemoteLogStorageSystemEnabled) + _brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig.enableRemoteStorageSystem()) quotaManagers = QuotaFactory.instantiate(config, metrics, time, threadNamePrefix.getOrElse("")) KafkaBroker.notifyClusterListeners(clusterId, kafkaMetricsReporters ++ metrics.reporters.asScala) diff --git a/core/src/main/scala/kafka/server/ReplicaManager.scala b/core/src/main/scala/kafka/server/ReplicaManager.scala index aa56269a2f40d..a2a070bcd0331 100644 --- a/core/src/main/scala/kafka/server/ReplicaManager.scala +++ b/core/src/main/scala/kafka/server/ReplicaManager.scala @@ -33,6 +33,7 @@ import kafka.zk.KafkaZkClient import org.apache.kafka.common.errors._ import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.message.DeleteRecordsResponseData.DeleteRecordsPartitionResult +import org.apache.kafka.common.message.DescribeLogDirsResponseData.DescribeLogDirsTopic import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState import org.apache.kafka.common.message.LeaderAndIsrResponseData.{LeaderAndIsrPartitionError, LeaderAndIsrTopicError} import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopic @@ -67,7 +68,7 @@ import java.util import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.locks.Lock import java.util.concurrent.{CompletableFuture, Future, RejectedExecutionException, TimeUnit} -import java.util.{Optional, OptionalInt, OptionalLong} +import java.util.{Collections, Optional, OptionalInt, OptionalLong} import scala.collection.{Map, Seq, Set, mutable} import scala.compat.java8.OptionConverters._ import scala.jdk.CollectionConverters._ @@ -1249,9 +1250,9 @@ class ReplicaManager(val config: KafkaConfig, val fileStore = Files.getFileStore(file) val totalBytes = adjustForLargeFileSystems(fileStore.getTotalSpace) val usableBytes = adjustForLargeFileSystems(fileStore.getUsableSpace) - logsByDir.get(absolutePath) match { + val topicInfos = logsByDir.get(absolutePath) match { case Some(logs) => - val topicInfos = logs.groupBy(_.topicPartition.topic).map{case (topic, logs) => + logs.groupBy(_.topicPartition.topic).map { case (topic, logs) => new DescribeLogDirsResponseData.DescribeLogDirsTopic().setName(topic).setPartitions( logs.filter { log => partitions.contains(log.topicPartition) @@ -1262,17 +1263,19 @@ class ReplicaManager(val config: KafkaConfig, .setOffsetLag(getLogEndOffsetLag(log.topicPartition, log.logEndOffset, log.isFuture)) .setIsFutureKey(log.isFuture) }.toList.asJava) - }.toList.asJava - - new DescribeLogDirsResponseData.DescribeLogDirsResult().setLogDir(absolutePath) - .setErrorCode(Errors.NONE.code).setTopics(topicInfos) - .setTotalBytes(totalBytes).setUsableBytes(usableBytes) + }.filterNot(_.partitions().isEmpty).toList.asJava case None => - new DescribeLogDirsResponseData.DescribeLogDirsResult().setLogDir(absolutePath) - .setErrorCode(Errors.NONE.code) - .setTotalBytes(totalBytes).setUsableBytes(usableBytes) + Collections.emptyList[DescribeLogDirsTopic]() } + val describeLogDirsResult = new DescribeLogDirsResponseData.DescribeLogDirsResult() + .setLogDir(absolutePath).setTopics(topicInfos) + .setErrorCode(Errors.NONE.code) + .setTotalBytes(totalBytes).setUsableBytes(usableBytes) + if (!topicInfos.isEmpty) + describeLogDirsResult.setTopics(topicInfos) + describeLogDirsResult + } catch { case e: KafkaStorageException => warn("Unable to describe replica dirs for %s".format(absolutePath), e) diff --git a/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala b/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala index 048a665757b74..ee7bfa2157ee7 100644 --- a/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala +++ b/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala @@ -29,7 +29,6 @@ import org.apache.kafka.coordinator.group.GroupCoordinator import org.apache.kafka.image.loader.LoaderManifest import org.apache.kafka.image.publisher.MetadataPublisher import org.apache.kafka.image.{MetadataDelta, MetadataImage, TopicDelta} -import org.apache.kafka.server.common.MetadataVersion import org.apache.kafka.server.fault.FaultHandler import java.util.concurrent.CompletableFuture @@ -129,21 +128,6 @@ class BrokerMetadataPublisher( debug(s"Publishing metadata at offset $highestOffsetAndEpoch with $metadataVersionLogMsg.") } - Option(delta.featuresDelta()).foreach { featuresDelta => - featuresDelta.metadataVersionChange().ifPresent{ metadataVersion => - info(s"Updating metadata.version to ${metadataVersion.featureLevel()} at offset $highestOffsetAndEpoch.") - val currentMetadataVersion = delta.image().features().metadataVersion() - if (currentMetadataVersion.isLessThan(MetadataVersion.IBP_3_7_IV2) && metadataVersion.isAtLeast(MetadataVersion.IBP_3_7_IV2)) { - info( - s"""Resending BrokerRegistration with existing incarnation-id to inform the - |controller about log directories in the broker following metadata update: - |previousMetadataVersion: ${delta.image().features().metadataVersion()} - |newMetadataVersion: $metadataVersion""".stripMargin.linesIterator.mkString(" ").trim) - brokerLifecycleManager.handleKraftJBODMetadataVersionUpdate() - } - } - } - // Apply topic deltas. Option(delta.topicsDelta()).foreach { topicsDelta => try { diff --git a/core/src/main/scala/kafka/zk/AdminZkClient.scala b/core/src/main/scala/kafka/zk/AdminZkClient.scala index efecfe854bbf2..604e03c7ed436 100644 --- a/core/src/main/scala/kafka/zk/AdminZkClient.scala +++ b/core/src/main/scala/kafka/zk/AdminZkClient.scala @@ -163,7 +163,7 @@ class AdminZkClient(zkClient: KafkaZkClient, LogConfig.validate(config, kafkaConfig.map(_.extractLogConfigMap).getOrElse(Collections.emptyMap()), - kafkaConfig.exists(_.isRemoteLogStorageSystemEnabled)) + kafkaConfig.exists(_.remoteLogManagerConfig.enableRemoteStorageSystem())) } private def writeTopicPartitionAssignment(topic: String, replicaAssignment: Map[Int, ReplicaAssignment], @@ -481,7 +481,7 @@ class AdminZkClient(zkClient: KafkaZkClient, // remove the topic overrides LogConfig.validate(configs, kafkaConfig.map(_.extractLogConfigMap).getOrElse(Collections.emptyMap()), - kafkaConfig.exists(_.isRemoteLogStorageSystemEnabled)) + kafkaConfig.exists(_.remoteLogManagerConfig.enableRemoteStorageSystem())) } /** diff --git a/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java b/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java index 0ba5d63a8da8a..50b581fdf4ee5 100644 --- a/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java +++ b/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java @@ -222,7 +222,7 @@ void setUp() throws Exception { props.setProperty(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, "true"); props.setProperty(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP, "100"); remoteLogManagerConfig = createRLMConfig(props); - brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).isRemoteLogStorageSystemEnabled()); + brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).remoteLogManagerConfig().enableRemoteStorageSystem()); remoteLogManager = new RemoteLogManager(remoteLogManagerConfig, brokerId, logDir, clusterId, time, tp -> Optional.of(mockLog), diff --git a/core/src/test/java/kafka/test/junit/ClusterTestExtensionsUnitTest.java b/core/src/test/java/kafka/test/junit/ClusterTestExtensionsUnitTest.java index 7a1ae920a6f44..c0944080547d6 100644 --- a/core/src/test/java/kafka/test/junit/ClusterTestExtensionsUnitTest.java +++ b/core/src/test/java/kafka/test/junit/ClusterTestExtensionsUnitTest.java @@ -17,29 +17,64 @@ package kafka.test.junit; +import kafka.test.ClusterConfig; import kafka.test.annotation.ClusterTemplate; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.extension.ExtensionContext; + +import java.lang.reflect.Method; +import java.util.Collections; +import java.util.List; + import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class ClusterTestExtensionsUnitTest { + + static List cfgEmpty() { + return Collections.emptyList(); + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + private ExtensionContext buildExtensionContext(String methodName) throws Exception { + ExtensionContext extensionContext = mock(ExtensionContext.class); + Class clazz = ClusterTestExtensionsUnitTest.class; + Method method = clazz.getDeclaredMethod(methodName); + when(extensionContext.getRequiredTestClass()).thenReturn(clazz); + when(extensionContext.getRequiredTestMethod()).thenReturn(method); + return extensionContext; + } + @Test - void testProcessClusterTemplate() { + void testProcessClusterTemplate() throws Exception { ClusterTestExtensions ext = new ClusterTestExtensions(); - ExtensionContext context = mock(ExtensionContext.class); + ExtensionContext context = buildExtensionContext("cfgEmpty"); ClusterTemplate annot = mock(ClusterTemplate.class); - when(annot.value()).thenReturn("").thenReturn(" "); + when(annot.value()).thenReturn("").thenReturn(" ").thenReturn("cfgEmpty"); + + Assertions.assertEquals( + "ClusterTemplate value can't be empty string.", + Assertions.assertThrows(IllegalStateException.class, () -> + ext.processClusterTemplate(context, annot) + ).getMessage() + ); + - Assertions.assertThrows(IllegalStateException.class, () -> - ext.processClusterTemplate(context, annot) + Assertions.assertEquals( + "ClusterTemplate value can't be empty string.", + Assertions.assertThrows(IllegalStateException.class, () -> + ext.processClusterTemplate(context, annot) + ).getMessage() ); - Assertions.assertThrows(IllegalStateException.class, () -> - ext.processClusterTemplate(context, annot) + Assertions.assertEquals( + "ClusterConfig generator method should provide at least one config", + Assertions.assertThrows(IllegalStateException.class, () -> + ext.processClusterTemplate(context, annot) + ).getMessage() ); } } diff --git a/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala b/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala index 99b1e35e4eed9..b61eb28530ca9 100644 --- a/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala @@ -27,7 +27,7 @@ import org.apache.kafka.common.errors.CorruptRecordException import org.apache.kafka.common.record._ import org.apache.kafka.common.utils.Utils import org.apache.kafka.coordinator.transaction.TransactionLogConfigs -import org.apache.kafka.server.metrics.KafkaMetricsGroup +import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics} import org.apache.kafka.server.util.MockTime import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, CleanerConfig, LogAppendInfo, LogConfig, LogDirFailureChannel, LogFileUtils, LogSegment, LogSegments, LogStartOffsetIncrementReason, OffsetMap, ProducerStateManager, ProducerStateManagerConfig} import org.apache.kafka.storage.internals.utils.Throttler @@ -80,7 +80,6 @@ class LogCleanerTest extends Logging { logs = new Pool[TopicPartition, UnifiedLog](), logDirFailureChannel = new LogDirFailureChannel(1), time = time) - val metricsToVerify = new java.util.HashMap[String, java.util.List[java.util.Map[String, String]]]() logCleaner.cleanerManager.gaugeMetricNameWithTag.asScala.foreach { metricNameAndTags => val tags = new java.util.ArrayList[java.util.Map[String, String]]() @@ -120,6 +119,27 @@ class LogCleanerTest extends Logging { } } + @Test + def testMetricsActiveAfterReconfiguration(): Unit = { + val logCleaner = new LogCleaner(new CleanerConfig(true), + logDirs = Array(TestUtils.tempDir()), + logs = new Pool[TopicPartition, UnifiedLog](), + logDirFailureChannel = new LogDirFailureChannel(1), + time = time) + + try { + logCleaner.startup() + var nonexistent = LogCleaner.MetricNames.diff(KafkaYammerMetrics.defaultRegistry.allMetrics().keySet().asScala.map(_.getName)) + assertEquals(0, nonexistent.size, s"$nonexistent should be existent") + + logCleaner.reconfigure(new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181")), + new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181"))) + + nonexistent = LogCleaner.MetricNames.diff(KafkaYammerMetrics.defaultRegistry.allMetrics().keySet().asScala.map(_.getName)) + assertEquals(0, nonexistent.size, s"$nonexistent should be existent") + } finally logCleaner.shutdown() + } + /** * Test simple log cleaning */ diff --git a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala index ed91c936edc10..2670d6e6f7736 100644 --- a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala @@ -297,7 +297,7 @@ class LogConfigTest { props.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, localRetentionMs.toString) props.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, localRetentionBytes.toString) assertThrows(classOf[ConfigException], - () => LogConfig.validate(props, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) + () => LogConfig.validate(props, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) } @Test @@ -309,17 +309,17 @@ class LogConfigTest { val logProps = new Properties() logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE) logProps.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) + LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, "delete,compact") assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, "compact,delete") assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) } @ParameterizedTest(name = "testEnableRemoteLogStorage with sysRemoteStorageEnabled: {0}") @@ -332,10 +332,10 @@ class LogConfigTest { val logProps = new Properties() logProps.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") if (sysRemoteStorageEnabled) { - LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) + LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) } else { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) assertTrue(message.getMessage.contains("Tiered Storage functionality is disabled in the broker")) } } @@ -355,10 +355,10 @@ class LogConfigTest { logProps.put(TopicConfig.RETENTION_MS_CONFIG, "500") if (sysRemoteStorageEnabled) { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) assertTrue(message.getMessage.contains(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG)) } else { - LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) + LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) } } @@ -377,10 +377,10 @@ class LogConfigTest { logProps.put(TopicConfig.RETENTION_BYTES_CONFIG, "128") if (sysRemoteStorageEnabled) { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) + () => LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) assertTrue(message.getMessage.contains(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG)) } else { - LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) + LogConfig.validate(logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) } } @@ -395,10 +395,10 @@ class LogConfigTest { if (sysRemoteStorageEnabled) { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled)) + () => LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem())) assertTrue(message.getMessage.contains(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG)) } else { - LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.isRemoteLogStorageSystemEnabled) + LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.enableRemoteStorageSystem()) } } diff --git a/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala b/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala index a7415b5d50a2e..3a69669d349e5 100644 --- a/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala @@ -95,7 +95,7 @@ class ApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVersio @ClusterTemplate("testApiVersionsRequestIncludesUnreleasedApisTemplate") @ClusterTest(types = Array(Type.KRAFT, Type.CO_KRAFT), serverProperties = Array( - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "false"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true"), new ClusterConfigProperty(key = "unstable.feature.versions.enable", value = "true"), )) def testApiVersionsRequestIncludesUnreleasedApis(): Unit = { diff --git a/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala b/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala index 34f9d139a03cc..b0162dc635842 100644 --- a/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala @@ -285,7 +285,7 @@ class BrokerLifecycleManagerTest { assertEquals(1000L, manager.brokerEpoch) // Trigger JBOD MV update - manager.handleKraftJBODMetadataVersionUpdate() + manager.resendBrokerRegistrationUnlessZkMode() // Accept new registration, response sets epoch to 1200 nextRegistrationRequest(1200L) diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala index 6b655ea7837eb..151ffb9e1847d 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala @@ -4095,7 +4095,7 @@ class ReplicaManagerTest { val config = new AbstractConfig(RemoteLogManagerConfig.CONFIG_DEF, props) val remoteLogManagerConfig = new RemoteLogManagerConfig(config) val mockLog = mock(classOf[UnifiedLog]) - val brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).isRemoteLogStorageSystemEnabled) + val brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).remoteLogManagerConfig.enableRemoteStorageSystem()) val remoteLogManager = new RemoteLogManager( remoteLogManagerConfig, 0, @@ -4195,7 +4195,7 @@ class ReplicaManagerTest { val config = new AbstractConfig(RemoteLogManagerConfig.CONFIG_DEF, props) val remoteLogManagerConfig = new RemoteLogManagerConfig(config) val dummyLog = mock(classOf[UnifiedLog]) - val brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).isRemoteLogStorageSystemEnabled) + val brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).remoteLogManagerConfig.enableRemoteStorageSystem()) val remoteLogManager = new RemoteLogManager( remoteLogManagerConfig, 0, @@ -6450,6 +6450,39 @@ class ReplicaManagerTest { assertEquals(Errors.NONE.code, response.errorCode) assertTrue(response.totalBytes > 0) assertTrue(response.usableBytes >= 0) + assertFalse(response.topics().isEmpty) + response.topics().forEach(t => assertFalse(t.partitions().isEmpty)) + } + } finally { + replicaManager.shutdown(checkpointHW = false) + } + } + + @Test + def testDescribeLogDirsWithoutAnyPartitionTopic(): Unit = { + val noneTopic = "none-topic" + val topicPartition = 0 + val topicId = Uuid.randomUuid() + val followerBrokerId = 0 + val leaderBrokerId = 1 + val leaderEpoch = 1 + val leaderEpochIncrement = 2 + val countDownLatch = new CountDownLatch(1) + val offsetFromLeader = 5 + + // Prepare the mocked components for the test + val (replicaManager, mockLogMgr) = prepareReplicaManagerAndLogManager(new MockTimer(time), + topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId, leaderBrokerId, countDownLatch, + expectTruncation = false, localLogOffset = Some(10), offsetFromLeader = offsetFromLeader, topicId = Some(topicId)) + + try { + val responses = replicaManager.describeLogDirs(Set(new TopicPartition(noneTopic, topicPartition))) + assertEquals(mockLogMgr.liveLogDirs.size, responses.size) + responses.foreach { response => + assertEquals(Errors.NONE.code, response.errorCode) + assertTrue(response.totalBytes > 0) + assertTrue(response.usableBytes >= 0) + assertTrue(response.topics().isEmpty) } } finally { replicaManager.shutdown(checkpointHW = false) diff --git a/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala b/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala index 456d075f91655..97efd9bcf4cc0 100644 --- a/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala +++ b/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala @@ -704,10 +704,10 @@ class RequestQuotaTest extends BaseRequestTest { new ConsumerGroupDescribeRequest.Builder(new ConsumerGroupDescribeRequestData(), true) case ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS => - new GetTelemetrySubscriptionsRequest.Builder(new GetTelemetrySubscriptionsRequestData(), true) + new GetTelemetrySubscriptionsRequest.Builder(new GetTelemetrySubscriptionsRequestData()) case ApiKeys.PUSH_TELEMETRY => - new PushTelemetryRequest.Builder(new PushTelemetryRequestData(), true) + new PushTelemetryRequest.Builder(new PushTelemetryRequestData()) case ApiKeys.ASSIGN_REPLICAS_TO_DIRS => new AssignReplicasToDirsRequest.Builder(new AssignReplicasToDirsRequestData()) @@ -718,6 +718,18 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.DESCRIBE_TOPIC_PARTITIONS => new DescribeTopicPartitionsRequest.Builder(new DescribeTopicPartitionsRequestData()) + case ApiKeys.SHARE_GROUP_HEARTBEAT => + new ShareGroupHeartbeatRequest.Builder(new ShareGroupHeartbeatRequestData(), true) + + case ApiKeys.SHARE_GROUP_DESCRIBE => + new ShareGroupDescribeRequest.Builder(new ShareGroupDescribeRequestData(), true) + + case ApiKeys.SHARE_FETCH => + new ShareFetchRequest.Builder(new ShareFetchRequestData(), true) + + case ApiKeys.SHARE_ACKNOWLEDGE => + new ShareAcknowledgeRequest.Builder(new ShareAcknowledgeRequestData(), true) + case _ => throw new IllegalArgumentException("Unsupported API key " + apiKey) } diff --git a/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala b/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala index c2926c3b67db9..26f4fb3daee8c 100644 --- a/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala +++ b/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala @@ -30,7 +30,6 @@ import org.apache.kafka.clients.admin.AlterConfigOp.OpType.SET import org.apache.kafka.clients.admin.{Admin, AlterConfigOp, ConfigEntry, NewTopic} import org.apache.kafka.common.config.ConfigResource import org.apache.kafka.common.config.ConfigResource.Type.BROKER -import org.apache.kafka.common.metadata.FeatureLevelRecord import org.apache.kafka.common.utils.Exit import org.apache.kafka.coordinator.group.GroupCoordinator import org.apache.kafka.image.{MetadataDelta, MetadataImage, MetadataImageTest, MetadataProvenance} @@ -43,7 +42,7 @@ import org.junit.jupiter.api.Assertions.{assertEquals, assertNotNull, assertTrue import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} import org.mockito.ArgumentMatchers.any import org.mockito.Mockito -import org.mockito.Mockito.{clearInvocations, doThrow, mock, times, verify, verifyNoInteractions} +import org.mockito.Mockito.{doThrow, mock, verify} import org.mockito.invocation.InvocationOnMock import org.mockito.stubbing.Answer @@ -221,102 +220,4 @@ class BrokerMetadataPublisherTest { verify(groupCoordinator).onNewMetadataImage(image, delta) } - - @Test - def testMetadataVersionUpdateToIBP_3_7_IV2OrAboveTriggersBrokerReRegistration(): Unit = { - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(0, "")) - val metadataCache = new KRaftMetadataCache(0) - val logManager = mock(classOf[LogManager]) - val replicaManager = mock(classOf[ReplicaManager]) - val groupCoordinator = mock(classOf[GroupCoordinator]) - val faultHandler = mock(classOf[FaultHandler]) - val brokerLifecycleManager = mock(classOf[BrokerLifecycleManager]) - - val metadataPublisher = new BrokerMetadataPublisher( - config, - metadataCache, - logManager, - replicaManager, - groupCoordinator, - mock(classOf[TransactionCoordinator]), - mock(classOf[DynamicConfigPublisher]), - mock(classOf[DynamicClientQuotaPublisher]), - mock(classOf[ScramPublisher]), - mock(classOf[DelegationTokenPublisher]), - mock(classOf[AclPublisher]), - faultHandler, - faultHandler, - brokerLifecycleManager, - ) - - var image = MetadataImage.EMPTY - var delta = new MetadataDelta.Builder() - .setImage(image) - .build() - - // We first upgrade metadata version to 3_6_IV2 - delta.replay(new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(MetadataVersion.IBP_3_6_IV2.featureLevel())) - var newImage = delta.apply(new MetadataProvenance(100, 4, 2000)) - - metadataPublisher.onMetadataUpdate(delta, newImage, - LogDeltaManifest.newBuilder() - .provenance(MetadataProvenance.EMPTY) - .leaderAndEpoch(LeaderAndEpoch.UNKNOWN) - .numBatches(1) - .elapsedNs(100) - .numBytes(42) - .build()) - - // This should NOT trigger broker reregistration - verifyNoInteractions(brokerLifecycleManager) - - // We then upgrade to IBP_3_7_IV2 - image = newImage - delta = new MetadataDelta.Builder() - .setImage(image) - .build() - delta.replay(new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(MetadataVersion.IBP_3_7_IV2.featureLevel())) - newImage = delta.apply(new MetadataProvenance(100, 4, 2000)) - - metadataPublisher.onMetadataUpdate(delta, newImage, - LogDeltaManifest.newBuilder() - .provenance(MetadataProvenance.EMPTY) - .leaderAndEpoch(LeaderAndEpoch.UNKNOWN) - .numBatches(1) - .elapsedNs(100) - .numBytes(42) - .build()) - - // This SHOULD trigger a broker registration - verify(brokerLifecycleManager, times(1)).handleKraftJBODMetadataVersionUpdate() - clearInvocations(brokerLifecycleManager) - - // Finally upgrade to IBP_3_8_IV0 - image = newImage - delta = new MetadataDelta.Builder() - .setImage(image) - .build() - delta.replay(new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(MetadataVersion.IBP_3_8_IV0.featureLevel())) - newImage = delta.apply(new MetadataProvenance(200, 4, 3000)) - - metadataPublisher.onMetadataUpdate(delta, newImage, - LogDeltaManifest.newBuilder() - .provenance(MetadataProvenance.EMPTY) - .leaderAndEpoch(LeaderAndEpoch.UNKNOWN) - .numBatches(1) - .elapsedNs(100) - .numBytes(42) - .build()) - - // This should NOT trigger broker reregistration - verify(brokerLifecycleManager, times(0)).handleKraftJBODMetadataVersionUpdate() - - metadataPublisher.close() - } } diff --git a/docs/security.html b/docs/security.html index 7eb0c2cb346f8..e3495f4b5188b 100644 --- a/docs/security.html +++ b/docs/security.html @@ -2267,6 +2267,42 @@

classicGroupMaxSize) { log.info("Cannot downgrade consumer group {} to classic group because its group size is greater than classic group max size.", consumerGroup.groupId()); + return false; } return true; } @@ -1904,24 +1906,28 @@ private Assignment updateTargetAssignment( .withInvertedTargetAssignment(group.invertedTargetAssignment()) .withTopicsImage(metadataImage.topics()) .addOrUpdateMember(updatedMember.memberId(), updatedMember); - TargetAssignmentBuilder.TargetAssignmentResult assignmentResult; - // A new static member is replacing an older one with the same subscriptions. - // We just need to remove the older member and add the newer one. The new member should - // reuse the target assignment of the older member. + if (staticMemberReplaced) { - assignmentResult = assignmentResultBuilder - .removeMember(member.memberId()) - .build(); - } else { - assignmentResult = assignmentResultBuilder - .build(); + // A new static member is replacing an older one with the same subscriptions. + // We just need to remove the older member and add the newer one. The new member should + // reuse the target assignment of the older member. + assignmentResultBuilder.removeMember(member.memberId()); } + TargetAssignmentBuilder.TargetAssignmentResult assignmentResult = + assignmentResultBuilder.build(); + log.info("[GroupId {}] Computed a new target assignment for epoch {} with '{}' assignor: {}.", group.groupId(), groupEpoch, preferredServerAssignor, assignmentResult.targetAssignment()); records.addAll(assignmentResult.records()); - return assignmentResult.targetAssignment().get(updatedMember.memberId()); + + MemberAssignment newMemberAssignment = assignmentResult.targetAssignment().get(updatedMember.memberId()); + if (newMemberAssignment != null) { + return new Assignment(newMemberAssignment.targetPartitions()); + } else { + return Assignment.EMPTY; + } } catch (PartitionAssignorException ex) { String msg = String.format("Failed to compute a new target assignment for epoch %d: %s", groupEpoch, ex.getMessage()); diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilder.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilder.java index 57d6039fa0ba8..daea9938bf45d 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilder.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilder.java @@ -64,11 +64,11 @@ public static class TargetAssignmentResult { /** * The new target assignment for the group. */ - private final Map targetAssignment; + private final Map targetAssignment; TargetAssignmentResult( List records, - Map targetAssignment + Map targetAssignment ) { Objects.requireNonNull(records); Objects.requireNonNull(targetAssignment); @@ -86,7 +86,7 @@ public List records() { /** * @return The target assignment. */ - public Map targetAssignment() { + public Map targetAssignment() { return targetAssignment; } } @@ -347,38 +347,26 @@ public TargetAssignmentResult build() throws PartitionAssignorException { // Compute delta from previous to new target assignment and create the // relevant records. List records = new ArrayList<>(); - Map newTargetAssignment = new HashMap<>(); - memberSpecs.keySet().forEach(memberId -> { + for (String memberId : memberSpecs.keySet()) { Assignment oldMemberAssignment = targetAssignment.get(memberId); Assignment newMemberAssignment = newMemberAssignment(newGroupAssignment, memberId); - newTargetAssignment.put(memberId, newMemberAssignment); - - if (oldMemberAssignment == null) { - // If the member had no assignment, we always create a record for it. + if (!newMemberAssignment.equals(oldMemberAssignment)) { + // If the member had no assignment or had a different assignment, we + // create a record for the new assignment. records.add(newTargetAssignmentRecord( groupId, memberId, newMemberAssignment.partitions() )); - } else { - // If the member had an assignment, we only create a record if the - // new assignment is different. - if (!newMemberAssignment.equals(oldMemberAssignment)) { - records.add(newTargetAssignmentRecord( - groupId, - memberId, - newMemberAssignment.partitions() - )); - } } - }); + } // Bump the target assignment epoch. records.add(newTargetAssignmentEpochRecord(groupId, groupEpoch)); - return new TargetAssignmentResult(records, newTargetAssignment); + return new TargetAssignmentResult(records, newGroupAssignment.members()); } private Assignment newMemberAssignment( diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java index 3664a7a61d295..abf48fd64158a 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java @@ -13166,6 +13166,49 @@ public void testClassicGroupLeaveToConsumerGroupWithoutValidLeaveGroupMember() { assertEquals(Collections.emptyList(), leaveResult.records()); } + @Test + public void testNoConversionWhenSizeExceedsClassicMaxGroupSize() throws Exception { + String groupId = "group-id"; + String nonClassicMemberId = "1"; + + List protocols = Collections.singletonList( + new ConsumerGroupMemberMetadataValue.ClassicProtocol() + .setName("range") + .setMetadata(new byte[0]) + ); + + ConsumerGroupMember member = new ConsumerGroupMember.Builder(nonClassicMemberId).build(); + ConsumerGroupMember classicMember1 = new ConsumerGroupMember.Builder("2") + .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSupportedProtocols(protocols)) + .build(); + ConsumerGroupMember classicMember2 = new ConsumerGroupMember.Builder("3") + .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSupportedProtocols(protocols)) + .build(); + + GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() + .withClassicGroupMaxSize(1) + .withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.DOWNGRADE) + .withConsumerGroup( + new ConsumerGroupBuilder(groupId, 10) + .withMember(member) + .withMember(classicMember1) + .withMember(classicMember2) + ) + .build(); + + assertEquals(Group.GroupType.CONSUMER, context.groupMetadataManager.group(groupId).type()); + + context.consumerGroupHeartbeat( + new ConsumerGroupHeartbeatRequestData() + .setGroupId(groupId) + .setMemberId(nonClassicMemberId) + .setMemberEpoch(LEAVE_GROUP_MEMBER_EPOCH) + .setRebalanceTimeoutMs(5000) + ); + + assertEquals(Group.GroupType.CONSUMER, context.groupMetadataManager.group(groupId).type()); + } + private static void checkJoinGroupResponse( JoinGroupResponseData expectedResponse, JoinGroupResponseData actualResponse, diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilderTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilderTest.java index d5ba038f31895..e2e572b6bf9f1 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilderTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/consumer/TargetAssignmentBuilderTest.java @@ -337,12 +337,12 @@ public void testAssignmentHasNotChanged() { 20 )), result.records()); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-1", new Assignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2, 3), mkTopicAssignment(barTopicId, 1, 2, 3) ))); - expectedAssignment.put("member-2", new Assignment(mkAssignment( + expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( mkTopicAssignment(fooTopicId, 4, 5, 6), mkTopicAssignment(barTopicId, 4, 5, 6) ))); @@ -400,12 +400,12 @@ public void testAssignmentSwapped() { 20 ), result.records().get(2)); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-2", new Assignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2, 3), mkTopicAssignment(barTopicId, 1, 2, 3) ))); - expectedAssignment.put("member-1", new Assignment(mkAssignment( + expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( mkTopicAssignment(fooTopicId, 4, 5, 6), mkTopicAssignment(barTopicId, 4, 5, 6) ))); @@ -474,16 +474,16 @@ public void testNewMember() { 20 ), result.records().get(3)); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-1", new Assignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) ))); - expectedAssignment.put("member-2", new Assignment(mkAssignment( + expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4), mkTopicAssignment(barTopicId, 3, 4) ))); - expectedAssignment.put("member-3", new Assignment(mkAssignment( + expectedAssignment.put("member-3", new MemberAssignment(mkAssignment( mkTopicAssignment(fooTopicId, 5, 6), mkTopicAssignment(barTopicId, 5, 6) ))); @@ -561,16 +561,16 @@ public void testUpdateMember() { 20 ), result.records().get(3)); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-1", new Assignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) ))); - expectedAssignment.put("member-2", new Assignment(mkAssignment( + expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4), mkTopicAssignment(barTopicId, 3, 4) ))); - expectedAssignment.put("member-3", new Assignment(mkAssignment( + expectedAssignment.put("member-3", new MemberAssignment(mkAssignment( mkTopicAssignment(fooTopicId, 5, 6), mkTopicAssignment(barTopicId, 5, 6) ))); @@ -639,16 +639,16 @@ public void testPartialAssignmentUpdate() { 20 ), result.records().get(2)); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-1", new Assignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) ))); - expectedAssignment.put("member-2", new Assignment(mkAssignment( + expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 3, 4, 5) ))); - expectedAssignment.put("member-3", new Assignment(mkAssignment( + expectedAssignment.put("member-3", new MemberAssignment(mkAssignment( mkTopicAssignment(fooTopicId, 6), mkTopicAssignment(barTopicId, 6) ))); @@ -713,12 +713,12 @@ public void testDeleteMember() { 20 ), result.records().get(2)); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-1", new Assignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2, 3), mkTopicAssignment(barTopicId, 1, 2, 3) ))); - expectedAssignment.put("member-2", new Assignment(mkAssignment( + expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( mkTopicAssignment(fooTopicId, 4, 5, 6), mkTopicAssignment(barTopicId, 4, 5, 6) ))); @@ -788,17 +788,17 @@ public void testReplaceStaticMember() { 20 ), result.records().get(1)); - Map expectedAssignment = new HashMap<>(); - expectedAssignment.put("member-1", new Assignment(mkAssignment( + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-1", new MemberAssignment(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) ))); - expectedAssignment.put("member-2", new Assignment(mkAssignment( + expectedAssignment.put("member-2", new MemberAssignment(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4), mkTopicAssignment(barTopicId, 3, 4) ))); - expectedAssignment.put("member-3-a", new Assignment(mkAssignment( + expectedAssignment.put("member-3-a", new MemberAssignment(mkAssignment( mkTopicAssignment(fooTopicId, 5, 6), mkTopicAssignment(barTopicId, 5, 6) ))); diff --git a/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java index 0974c31d1b263..8b9c5b19eae4f 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java @@ -408,6 +408,13 @@ public ControllerResult registerBroker( setBrokerEpoch(brokerEpoch). setRack(request.rack()). setEndPoints(listenerInfo.toBrokerRegistrationRecord()); + + if (existing != null && request.incarnationId().equals(existing.incarnationId())) { + log.info("Amending registration of broker {}", request.brokerId()); + record.setFenced(existing.fenced()); + record.setInControlledShutdown(existing.inControlledShutdown()); + } + for (BrokerRegistrationRequestData.Feature feature : request.features()) { record.features().add(processRegistrationFeature(brokerId, finalizedFeatures, feature)); } diff --git a/metadata/src/main/java/org/apache/kafka/image/publisher/BrokerRegistrationTracker.java b/metadata/src/main/java/org/apache/kafka/image/publisher/BrokerRegistrationTracker.java new file mode 100644 index 0000000000000..51ac2bdfa4bd3 --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/image/publisher/BrokerRegistrationTracker.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.image.publisher; + +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.image.MetadataDelta; +import org.apache.kafka.image.MetadataImage; +import org.apache.kafka.image.loader.LoaderManifest; +import org.apache.kafka.metadata.BrokerRegistration; +import org.apache.kafka.server.common.MetadataVersion; +import org.slf4j.Logger; + +import java.util.List; + +/** + * Tracks the registration of a specific broker, and executes a callback if it should be refreshed. + * + * This tracker handles cases where we might want to re-register the broker. The only such case + * right now is during the transition from non-JBOD mode, to JBOD mode. In other words, the + * transition from a MetadataVersion less than 3.7-IV2, to one greater than or equal to 3.7-IV2. + * In this case, the broker registration will start out containing no directories, and we need to + * resend the BrokerRegistrationRequest to fix that. + * + * As much as possible, the goal here is to keep things simple. We just compare the desired state + * with the actual state, and try to make changes only if necessary. + */ +public class BrokerRegistrationTracker implements MetadataPublisher { + private final Logger log; + private final int id; + private final Runnable refreshRegistrationCallback; + + /** + * Create the tracker. + * + * @param id The ID of this broker. + * @param targetDirectories The directories managed by this broker. + * @param refreshRegistrationCallback Callback to run if we need to refresh the registration. + */ + public BrokerRegistrationTracker( + int id, + List targetDirectories, + Runnable refreshRegistrationCallback + ) { + this.log = new LogContext("[BrokerRegistrationTracker id=" + id + "] "). + logger(BrokerRegistrationTracker.class); + this.id = id; + this.refreshRegistrationCallback = refreshRegistrationCallback; + } + + @Override + public String name() { + return "BrokerRegistrationTracker(id=" + id + ")"; + } + + @Override + public void onMetadataUpdate( + MetadataDelta delta, + MetadataImage newImage, + LoaderManifest manifest + ) { + boolean checkBrokerRegistration = false; + if (delta.featuresDelta() != null) { + if (delta.metadataVersionChanged().isPresent()) { + if (log.isTraceEnabled()) { + log.trace("Metadata version change is present: {}", + delta.metadataVersionChanged()); + } + checkBrokerRegistration = true; + } + } + if (delta.clusterDelta() != null) { + if (delta.clusterDelta().changedBrokers().get(id) != null) { + if (log.isTraceEnabled()) { + log.trace("Broker change is present: {}", + delta.clusterDelta().changedBrokers().get(id)); + } + checkBrokerRegistration = true; + } + } + if (checkBrokerRegistration) { + if (brokerRegistrationNeedsRefresh(newImage.features().metadataVersion(), + delta.clusterDelta().broker(id))) { + refreshRegistrationCallback.run(); + } + } + } + + /** + * Check if the current broker registration needs to be refreshed. + * + * @param metadataVersion The current metadata version. + * @param registration The current broker registration, or null if there is none. + * @return True only if we should refresh. + */ + boolean brokerRegistrationNeedsRefresh( + MetadataVersion metadataVersion, + BrokerRegistration registration + ) { + // If there is no existing registration, the BrokerLifecycleManager must still be sending it. + // So we don't need to do anything yet. + if (registration == null) { + log.debug("No current broker registration to check."); + return false; + } + // Check to see if the directory list has changed. Note that this check could certainly be + // triggered spuriously. For example, if the broker's directory list has been changed in the + // past, and we are in the process of replaying that change log, we will end up here. + // That's fine because resending the broker registration does not cause any problems. And, + // of course, as soon as a snapshot is made, we will no longer need to worry about those + // old metadata log entries being replayed on startup. + if (metadataVersion.isAtLeast(MetadataVersion.IBP_3_7_IV2) && + registration.directories().isEmpty()) { + log.info("Current directory set is empty, but MV supports JBOD. Resending " + + "broker registration."); + return true; + } + log.debug("Broker registration does not need to be resent."); + return false; + } +} diff --git a/metadata/src/test/java/org/apache/kafka/image/publisher/BrokerRegistrationTrackerTest.java b/metadata/src/test/java/org/apache/kafka/image/publisher/BrokerRegistrationTrackerTest.java new file mode 100644 index 0000000000000..855a96cd8aaf3 --- /dev/null +++ b/metadata/src/test/java/org/apache/kafka/image/publisher/BrokerRegistrationTrackerTest.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.image.publisher; + +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.metadata.FeatureLevelRecord; +import org.apache.kafka.common.metadata.RegisterBrokerRecord; +import org.apache.kafka.image.MetadataDelta; +import org.apache.kafka.image.MetadataImage; +import org.apache.kafka.image.MetadataProvenance; +import org.apache.kafka.image.loader.LogDeltaManifest; +import org.apache.kafka.raft.LeaderAndEpoch; +import org.apache.kafka.server.common.MetadataVersion; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.util.Arrays; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +@Timeout(value = 40) +public class BrokerRegistrationTrackerTest { + static final Uuid INCARNATION_ID = Uuid.fromString("jyjLbk31Tpa53pFrU9Y-Ng"); + + static final Uuid A = Uuid.fromString("Ahw3vXfnThqeZbb7HD1w6Q"); + + static final Uuid B = Uuid.fromString("BjOacT0OTNqIvUWIlKhahg"); + + static final Uuid C = Uuid.fromString("CVHi_iv2Rvy5_1rtPdasfg"); + + static class BrokerRegistrationTrackerTestContext { + AtomicInteger numCalls = new AtomicInteger(0); + BrokerRegistrationTracker tracker = new BrokerRegistrationTracker(1, + Arrays.asList(B, A), () -> numCalls.incrementAndGet()); + + MetadataImage image = MetadataImage.EMPTY; + + void onMetadataUpdate(MetadataDelta delta) { + MetadataProvenance provenance = new MetadataProvenance(0, 0, 0); + image = delta.apply(provenance); + LogDeltaManifest manifest = new LogDeltaManifest.Builder(). + provenance(provenance). + leaderAndEpoch(LeaderAndEpoch.UNKNOWN). + numBatches(1). + elapsedNs(1). + numBytes(1). + build(); + tracker.onMetadataUpdate(delta, image, manifest); + } + + MetadataDelta newDelta() { + return new MetadataDelta.Builder(). + setImage(image). + build(); + } + } + + @Test + public void testTrackerName() { + BrokerRegistrationTrackerTestContext ctx = new BrokerRegistrationTrackerTestContext(); + assertEquals("BrokerRegistrationTracker(id=1)", ctx.tracker.name()); + } + + @Test + public void testMetadataVersionUpdateWithoutRegistrationDoesNothing() { + BrokerRegistrationTrackerTestContext ctx = new BrokerRegistrationTrackerTestContext(); + MetadataDelta delta = ctx.newDelta(); + delta.replay(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(MetadataVersion.IBP_3_7_IV2.featureLevel())); + ctx.onMetadataUpdate(delta); + assertEquals(0, ctx.numCalls.get()); + } + + @Test + public void testBrokerUpdateWithoutNewMvDoesNothing() { + BrokerRegistrationTrackerTestContext ctx = new BrokerRegistrationTrackerTestContext(); + MetadataDelta delta = ctx.newDelta(); + delta.replay(new RegisterBrokerRecord(). + setBrokerId(1). + setIncarnationId(INCARNATION_ID). + setLogDirs(Arrays.asList(A, B, C))); + ctx.onMetadataUpdate(delta); + assertEquals(0, ctx.numCalls.get()); + } + + @ParameterizedTest + @ValueSource(booleans = {false, true}) + public void testBrokerUpdateWithNewMv(boolean jbodMv) { + BrokerRegistrationTrackerTestContext ctx = new BrokerRegistrationTrackerTestContext(); + MetadataDelta delta = ctx.newDelta(); + delta.replay(new RegisterBrokerRecord(). + setBrokerId(1). + setIncarnationId(INCARNATION_ID). + setLogDirs(Arrays.asList())); + delta.replay(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(jbodMv ? MetadataVersion.IBP_3_7_IV2.featureLevel() : + MetadataVersion.IBP_3_7_IV1.featureLevel())); + ctx.onMetadataUpdate(delta); + if (jbodMv) { + assertEquals(1, ctx.numCalls.get()); + } else { + assertEquals(0, ctx.numCalls.get()); + } + } + + @ParameterizedTest + @ValueSource(booleans = {false, true}) + public void testBrokerUpdateWithNewMvWithTwoDeltas(boolean jbodMv) { + BrokerRegistrationTrackerTestContext ctx = new BrokerRegistrationTrackerTestContext(); + MetadataDelta delta = ctx.newDelta(); + delta.replay(new RegisterBrokerRecord(). + setBrokerId(1). + setIncarnationId(INCARNATION_ID). + setLogDirs(Arrays.asList())); + ctx.onMetadataUpdate(delta); + // No calls are made because MetadataVersion is 3.0-IV1 initially + assertEquals(0, ctx.numCalls.get()); + + delta = ctx.newDelta(); + delta.replay(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(jbodMv ? MetadataVersion.IBP_3_7_IV2.featureLevel() : + MetadataVersion.IBP_3_7_IV1.featureLevel())); + ctx.onMetadataUpdate(delta); + if (jbodMv) { + assertEquals(1, ctx.numCalls.get()); + } else { + assertEquals(0, ctx.numCalls.get()); + } + } +} diff --git a/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java b/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java index 6ea752886a992..d6cf615c781b3 100644 --- a/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java +++ b/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java @@ -100,6 +100,16 @@ public final class RemoteLogManagerConfig { "segments, fetch remote log indexes and clean up remote log segments."; public static final int DEFAULT_REMOTE_LOG_MANAGER_THREAD_POOL_SIZE = 10; + public static final String REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP = "remote.log.manager.copier.thread.pool.size"; + public static final String REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_DOC = "Size of the thread pool used in " + + "scheduling tasks to copy segments."; + public static final int DEFAULT_REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE = 10; + + public static final String REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP = "remote.log.manager.expiration.thread.pool.size"; + public static final String REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_DOC = "Size of the thread pool used in" + + " scheduling tasks to clean up remote log segments."; + public static final int DEFAULT_REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE = 10; + public static final String REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP = "remote.log.manager.task.interval.ms"; public static final String REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_DOC = "Interval at which remote log manager runs the scheduled tasks like copy " + "segments, and clean up remote log segments."; @@ -241,6 +251,18 @@ public final class RemoteLogManagerConfig { atLeast(1), MEDIUM, REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_DOC) + .defineInternal(REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP, + INT, + DEFAULT_REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE, + atLeast(1), + MEDIUM, + REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_DOC) + .defineInternal(REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP, + INT, + DEFAULT_REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE, + atLeast(1), + MEDIUM, + REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_DOC) .define(REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP, LONG, DEFAULT_REMOTE_LOG_MANAGER_TASK_INTERVAL_MS, @@ -333,6 +355,8 @@ public final class RemoteLogManagerConfig { private final String remoteLogMetadataManagerClassPath; private final long remoteLogIndexFileCacheTotalSizeBytes; private final int remoteLogManagerThreadPoolSize; + private final int remoteLogManagerCopierThreadPoolSize; + private final int remoteLogManagerExpirationThreadPoolSize; private final long remoteLogManagerTaskIntervalMs; private final long remoteLogManagerTaskRetryBackoffMs; private final long remoteLogManagerTaskRetryBackoffMaxMs; @@ -361,6 +385,8 @@ public RemoteLogManagerConfig(AbstractConfig config) { config.getString(REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_PROP), config.getLong(REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_PROP), config.getInt(REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_PROP), + config.getInt(REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP), + config.getInt(REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP), config.getLong(REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP), config.getLong(REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MS_PROP), config.getLong(REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MAX_MS_PROP), @@ -393,6 +419,8 @@ public RemoteLogManagerConfig(boolean enableRemoteStorageSystem, String remoteLogMetadataManagerListenerName, long remoteLogIndexFileCacheTotalSizeBytes, int remoteLogManagerThreadPoolSize, + int remoteLogManagerCopierThreadPoolSize, + int remoteLogManagerExpirationThreadPoolSize, long remoteLogManagerTaskIntervalMs, long remoteLogManagerTaskRetryBackoffMs, long remoteLogManagerTaskRetryBackoffMaxMs, @@ -418,6 +446,8 @@ public RemoteLogManagerConfig(boolean enableRemoteStorageSystem, this.remoteLogMetadataManagerClassPath = remoteLogMetadataManagerClassPath; this.remoteLogIndexFileCacheTotalSizeBytes = remoteLogIndexFileCacheTotalSizeBytes; this.remoteLogManagerThreadPoolSize = remoteLogManagerThreadPoolSize; + this.remoteLogManagerCopierThreadPoolSize = remoteLogManagerCopierThreadPoolSize; + this.remoteLogManagerExpirationThreadPoolSize = remoteLogManagerExpirationThreadPoolSize; this.remoteLogManagerTaskIntervalMs = remoteLogManagerTaskIntervalMs; this.remoteLogManagerTaskRetryBackoffMs = remoteLogManagerTaskRetryBackoffMs; this.remoteLogManagerTaskRetryBackoffMaxMs = remoteLogManagerTaskRetryBackoffMaxMs; @@ -466,6 +496,14 @@ public int remoteLogManagerThreadPoolSize() { return remoteLogManagerThreadPoolSize; } + public int remoteLogManagerCopierThreadPoolSize() { + return remoteLogManagerCopierThreadPoolSize; + } + + public int remoteLogManagerExpirationThreadPoolSize() { + return remoteLogManagerExpirationThreadPoolSize; + } + public long remoteLogManagerTaskIntervalMs() { return remoteLogManagerTaskIntervalMs; } diff --git a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerHarness.java b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerHarness.java index a063fa8820a82..7af78e750a84f 100644 --- a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerHarness.java +++ b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerHarness.java @@ -53,12 +53,6 @@ public void initialize(Set topicIdPartitions, initializeRemoteLogMetadataManager(topicIdPartitions, startConsumerThread, RemoteLogMetadataTopicPartitioner::new, remotePartitionMetadataStoreSupplier); } - public void initializeRemoteLogMetadataManager(Set topicIdPartitions, - boolean startConsumerThread, - Function remoteLogMetadataTopicPartitioner) { - initializeRemoteLogMetadataManager(topicIdPartitions, startConsumerThread, remoteLogMetadataTopicPartitioner, RemotePartitionMetadataStore::new); - } - public void initializeRemoteLogMetadataManager(Set topicIdPartitions, boolean startConsumerThread, Function remoteLogMetadataTopicPartitioner, @@ -70,6 +64,7 @@ public void initializeRemoteLogMetadataManager(Set topicIdPart .startConsumerThread(startConsumerThread) .remoteLogMetadataTopicPartitioner(remoteLogMetadataTopicPartitioner) .remotePartitionMetadataStore(remotePartitionMetadataStoreSupplier) + .overrideRemoteLogMetadataManagerProps(overrideRemoteLogMetadataManagerProps()) .build(); } diff --git a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerRestartTest.java b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerRestartTest.java index c599259ed9416..84b98dcb5be1d 100644 --- a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerRestartTest.java +++ b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerRestartTest.java @@ -16,6 +16,11 @@ */ package org.apache.kafka.server.log.remote.metadata.storage; +import kafka.test.ClusterInstance; +import kafka.test.annotation.ClusterTest; +import kafka.test.junit.ClusterTestExtensions; +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.NewTopic; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; @@ -24,139 +29,99 @@ import org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId; import org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata; import org.apache.kafka.test.TestUtils; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import scala.collection.JavaConverters; -import scala.collection.Seq; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.extension.ExtendWith; -import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; import static org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManagerConfig.LOG_DIR; -@SuppressWarnings("deprecation") // Added for Scala 2.12 compatibility for usages of JavaConverters +@ExtendWith(value = ClusterTestExtensions.class) +@Tag("integration") public class TopicBasedRemoteLogMetadataManagerRestartTest { private static final int SEG_SIZE = 1024 * 1024; private final Time time = new MockTime(1); private final String logDir = TestUtils.tempDirectory("_rlmm_segs_").getAbsolutePath(); + private final ClusterInstance clusterInstance; - private TopicBasedRemoteLogMetadataManagerHarness remoteLogMetadataManagerHarness; - - @BeforeEach - public void setup() { - // Start the cluster and initialize TopicBasedRemoteLogMetadataManager. - remoteLogMetadataManagerHarness = new TopicBasedRemoteLogMetadataManagerHarness() { - protected Map overrideRemoteLogMetadataManagerProps() { - Map props = new HashMap<>(); - props.put(LOG_DIR, logDir); - return props; - } - }; - remoteLogMetadataManagerHarness.initialize(Collections.emptySet(), true); + TopicBasedRemoteLogMetadataManagerRestartTest(ClusterInstance clusterInstance) { // Constructor injections + this.clusterInstance = clusterInstance; } - private void startTopicBasedRemoteLogMetadataManagerHarness(boolean startConsumerThread) { - remoteLogMetadataManagerHarness.initializeRemoteLogMetadataManager(Collections.emptySet(), startConsumerThread, RemoteLogMetadataTopicPartitioner::new); + private TopicBasedRemoteLogMetadataManager createTopicBasedRemoteLogMetadataManager() { + return RemoteLogMetadataManagerTestUtils.builder() + .topicIdPartitions(Collections.emptySet()) + .bootstrapServers(clusterInstance.bootstrapServers()) + .startConsumerThread(true) + .remoteLogMetadataTopicPartitioner(RemoteLogMetadataTopicPartitioner::new) + .overrideRemoteLogMetadataManagerProps(Collections.singletonMap(LOG_DIR, logDir)) + .build(); } - @AfterEach - public void teardown() throws IOException { - if (remoteLogMetadataManagerHarness != null) { - remoteLogMetadataManagerHarness.close(); - } - } - - private void stopTopicBasedRemoteLogMetadataManagerHarness() { - remoteLogMetadataManagerHarness.closeRemoteLogMetadataManager(); - } - - private TopicBasedRemoteLogMetadataManager topicBasedRlmm() { - return remoteLogMetadataManagerHarness.remoteLogMetadataManager(); - } - - @Test + @ClusterTest(brokers = 3) public void testRLMMAPIsAfterRestart() throws Exception { // Create topics. String leaderTopic = "new-leader"; - HashMap> assignedLeaderTopicReplicas = new HashMap<>(); - List leaderTopicReplicas = new ArrayList<>(); - // Set broker id 0 as the first entry which is taken as the leader. - leaderTopicReplicas.add(0); - leaderTopicReplicas.add(1); - leaderTopicReplicas.add(2); - assignedLeaderTopicReplicas.put(0, JavaConverters.asScalaBuffer(leaderTopicReplicas)); - remoteLogMetadataManagerHarness.createTopicWithAssignment( - leaderTopic, JavaConverters.mapAsScalaMap(assignedLeaderTopicReplicas), - remoteLogMetadataManagerHarness.listenerName()); - String followerTopic = "new-follower"; - HashMap> assignedFollowerTopicReplicas = new HashMap<>(); - List followerTopicReplicas = new ArrayList<>(); - // Set broker id 1 as the first entry which is taken as the leader. - followerTopicReplicas.add(1); - followerTopicReplicas.add(2); - followerTopicReplicas.add(0); - assignedFollowerTopicReplicas.put(0, JavaConverters.asScalaBuffer(followerTopicReplicas)); - remoteLogMetadataManagerHarness.createTopicWithAssignment(followerTopic, - JavaConverters.mapAsScalaMap(assignedFollowerTopicReplicas), - remoteLogMetadataManagerHarness.listenerName()); + try (Admin admin = clusterInstance.createAdminClient()) { + // Set broker id 0 as the first entry which is taken as the leader. + NewTopic newLeaderTopic = new NewTopic(leaderTopic, Collections.singletonMap(0, Arrays.asList(0, 1, 2))); + // Set broker id 1 as the first entry which is taken as the leader. + NewTopic newFollowerTopic = new NewTopic(followerTopic, Collections.singletonMap(0, Arrays.asList(1, 2, 0))); + admin.createTopics(Arrays.asList(newLeaderTopic, newFollowerTopic)).all().get(); + } + clusterInstance.waitForTopic(leaderTopic, 1); + clusterInstance.waitForTopic(followerTopic, 1); final TopicIdPartition leaderTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(leaderTopic, 0)); final TopicIdPartition followerTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(followerTopic, 0)); - - // Register these partitions to RLMM. - topicBasedRlmm().onPartitionLeadershipChanges(Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition)); - - // Add segments for these partitions, but they are not available as they have not yet been subscribed. RemoteLogSegmentMetadata leaderSegmentMetadata = new RemoteLogSegmentMetadata( new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L)); - topicBasedRlmm().addRemoteLogSegmentMetadata(leaderSegmentMetadata).get(); - RemoteLogSegmentMetadata followerSegmentMetadata = new RemoteLogSegmentMetadata( new RemoteLogSegmentId(followerTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L)); - topicBasedRlmm().addRemoteLogSegmentMetadata(followerSegmentMetadata).get(); - - // Stop TopicBasedRemoteLogMetadataManager only. - stopTopicBasedRemoteLogMetadataManagerHarness(); - // Start TopicBasedRemoteLogMetadataManager - startTopicBasedRemoteLogMetadataManagerHarness(true); + try (TopicBasedRemoteLogMetadataManager topicBasedRemoteLogMetadataManager = createTopicBasedRemoteLogMetadataManager()) { + // Register these partitions to RemoteLogMetadataManager. + topicBasedRemoteLogMetadataManager.onPartitionLeadershipChanges( + Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition)); - // Register these partitions to RLMM, which loads the respective metadata snapshots. - topicBasedRlmm().onPartitionLeadershipChanges( - Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition)); - - // Check for the stored entries from the earlier run. - TestUtils.waitForCondition(() -> - TestUtils.sameElementsWithoutOrder(Collections.singleton(leaderSegmentMetadata).iterator(), - topicBasedRlmm().listRemoteLogSegments(leaderTopicIdPartition)), - "Remote log segment metadata not available"); - TestUtils.waitForCondition(() -> - TestUtils.sameElementsWithoutOrder(Collections.singleton(followerSegmentMetadata).iterator(), - topicBasedRlmm().listRemoteLogSegments(followerTopicIdPartition)), - "Remote log segment metadata not available"); - // Add one more segment - RemoteLogSegmentMetadata leaderSegmentMetadata2 = new RemoteLogSegmentMetadata( - new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), - 101, 200, -1L, 0, - time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 101L)); - topicBasedRlmm().addRemoteLogSegmentMetadata(leaderSegmentMetadata2).get(); + // Add segments for these partitions, but they are not available as they have not yet been subscribed. + topicBasedRemoteLogMetadataManager.addRemoteLogSegmentMetadata(leaderSegmentMetadata).get(); + topicBasedRemoteLogMetadataManager.addRemoteLogSegmentMetadata(followerSegmentMetadata).get(); + } - // Check that both the stored segment and recently added segment are available. - Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Arrays.asList(leaderSegmentMetadata, leaderSegmentMetadata2).iterator(), - topicBasedRlmm().listRemoteLogSegments(leaderTopicIdPartition))); + try (TopicBasedRemoteLogMetadataManager topicBasedRemoteLogMetadataManager = createTopicBasedRemoteLogMetadataManager()) { + // Register these partitions to RemoteLogMetadataManager, which loads the respective metadata snapshots. + topicBasedRemoteLogMetadataManager.onPartitionLeadershipChanges( + Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition)); + + // Check for the stored entries from the earlier run. + TestUtils.waitForCondition(() -> + TestUtils.sameElementsWithoutOrder(Collections.singleton(leaderSegmentMetadata).iterator(), + topicBasedRemoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition)), + "Remote log segment metadata not available"); + TestUtils.waitForCondition(() -> + TestUtils.sameElementsWithoutOrder(Collections.singleton(followerSegmentMetadata).iterator(), + topicBasedRemoteLogMetadataManager.listRemoteLogSegments(followerTopicIdPartition)), + "Remote log segment metadata not available"); + // Add one more segment + RemoteLogSegmentMetadata leaderSegmentMetadata2 = new RemoteLogSegmentMetadata( + new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), + 101, 200, -1L, 0, + time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 101L)); + topicBasedRemoteLogMetadataManager.addRemoteLogSegmentMetadata(leaderSegmentMetadata2).get(); + + // Check that both the stored segment and recently added segment are available. + Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Arrays.asList(leaderSegmentMetadata, leaderSegmentMetadata2).iterator(), + topicBasedRemoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition))); + } } -} \ No newline at end of file +} diff --git a/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfigTest.java b/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfigTest.java index 45fd6669e7d4f..4e3c2fc26cb66 100644 --- a/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfigTest.java +++ b/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfigTest.java @@ -47,7 +47,7 @@ public void testValidConfigs(boolean useDefaultRemoteLogMetadataManagerClass) { RemoteLogManagerConfig expectedRemoteLogManagerConfig = new RemoteLogManagerConfig(true, "dummy.remote.storage.class", "dummy.remote.storage.class.path", remoteLogMetadataManagerClass, "dummy.remote.log.metadata.class.path", - "listener.name", 1024 * 1024L, 1, 60000L, 100L, 60000L, 0.3, 10, 100, 100, + "listener.name", 1024 * 1024L, 1, 1, 1, 60000L, 100L, 60000L, 0.3, 10, 100, 100, rsmPrefix, rsmProps, rlmmPrefix, rlmmProps, Long.MAX_VALUE, 11, 1, Long.MAX_VALUE, 11, 1); @@ -81,6 +81,10 @@ private Map extractProps(RemoteLogManagerConfig remoteLogManager remoteLogManagerConfig.remoteLogIndexFileCacheTotalSizeBytes()); props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_PROP, remoteLogManagerConfig.remoteLogManagerThreadPoolSize()); + props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP, + remoteLogManagerConfig.remoteLogManagerCopierThreadPoolSize()); + props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP, + remoteLogManagerConfig.remoteLogManagerExpirationThreadPoolSize()); props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP, remoteLogManagerConfig.remoteLogManagerTaskIntervalMs()); props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MS_PROP, diff --git a/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommandTestUtils.java b/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommandTestUtils.java index 5ea53a1c38269..e5177ddaead2c 100644 --- a/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommandTestUtils.java +++ b/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommandTestUtils.java @@ -43,11 +43,37 @@ import static kafka.test.annotation.Type.CO_KRAFT; import static kafka.test.annotation.Type.KRAFT; +import static kafka.test.annotation.Type.ZK; import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG; import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG; import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG; import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG; +/** + * The old test framework {@link kafka.api.BaseConsumerTest#getTestQuorumAndGroupProtocolParametersAll} test for the following cases: + *
    + *
  • (ZK / KRAFT servers) with (group.coordinator.new.enable=false) with (classic group protocol) = 2 cases
  • + *
  • (KRAFT server) with (group.coordinator.new.enable=true) with (classic group protocol) = 1 case
  • + *
  • (KRAFT server) with (group.coordinator.new.enable=true) with (consumer group protocol) = 1 case
  • + *
+ *

+ * The new test framework run seven cases for the following cases: + *

    + *
  • (ZK / KRAFT / CO_KRAFT servers) with (group.coordinator.new.enable=false) with (classic group protocol) = 3 cases
  • + *
  • (KRAFT / CO_KRAFT servers) with (group.coordinator.new.enable=true) with (classic group protocol) = 2 cases
  • + *
  • (KRAFT / CO_KRAFT servers) with (group.coordinator.new.enable=true) with (consumer group protocol) = 2 cases
  • + *
+ *

+ * We can reduce the number of cases as same as the old test framework by using the following methods: + *

    + *
  • {@link #forConsumerGroupCoordinator} for the case of (consumer group protocol)
  • + *
  • (CO_KRAFT servers) with (group.coordinator.new.enable=true) with (classic / consumer group protocols) = 2 cases
  • + *
+ *
    + *
  • {@link #forClassicGroupCoordinator} for the case of (classic group protocol)
  • + *
  • (ZK / KRAFT servers) with (group.coordinator.new.enable=false) with (classic group protocol) = 2 cases
  • + *
+ */ class ConsumerGroupCommandTestUtils { private ConsumerGroupCommandTestUtils() { @@ -66,8 +92,8 @@ static List forConsumerGroupCoordinator() { serverProperties.put(GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, "classic,consumer"); return Collections.singletonList(ClusterConfig.defaultBuilder() - .setTypes(Stream.of(KRAFT, CO_KRAFT).collect(Collectors.toSet())) .setFeatures(Collections.singletonMap(Features.GROUP_VERSION, GroupVersion.GV_1.featureLevel())) + .setTypes(Collections.singleton(CO_KRAFT)) .setServerProperties(serverProperties) .setTags(Collections.singletonList("consumerGroupCoordinator")) .build()); @@ -80,6 +106,7 @@ static List forClassicGroupCoordinator() { serverProperties.put(NEW_GROUP_COORDINATOR_ENABLE_CONFIG, "false"); return Collections.singletonList(ClusterConfig.defaultBuilder() + .setTypes(Stream.of(ZK, KRAFT).collect(Collectors.toSet())) .setServerProperties(serverProperties) .setTags(Collections.singletonList("classicGroupCoordinator")) .build()); From ee2cd4437c483de67f042b6544963a58c927e21c Mon Sep 17 00:00:00 2001 From: brenden20 Date: Tue, 4 Jun 2024 14:00:32 -0500 Subject: [PATCH 40/61] Add comment --- .../kafka/clients/consumer/internals/ConsumerNetworkThread.java | 1 + 1 file changed, 1 insertion(+) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java index 876171f49c823..dd849712ab526 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java @@ -292,6 +292,7 @@ private void closeInternal(final Duration timeout) { /** * Check the unsent queue one last time and poll until all requests are sent or the timer runs out. */ + // Visible for testing protected void sendUnsentRequests(final Timer timer) { if (networkClientDelegate.unsentRequests().isEmpty()) return; From fabd7bded80b858262069bddd791bf2e39afc8d4 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Wed, 5 Jun 2024 10:41:20 -0500 Subject: [PATCH 41/61] Implementing suggestions --- .../internals/ConsumerNetworkThread.java | 3 +-- .../internals/ConsumerNetworkThreadTest.java | 19 +++---------------- 2 files changed, 4 insertions(+), 18 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java index dd849712ab526..adee6594603bb 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java @@ -292,8 +292,7 @@ private void closeInternal(final Duration timeout) { /** * Check the unsent queue one last time and poll until all requests are sent or the timer runs out. */ - // Visible for testing - protected void sendUnsentRequests(final Timer timer) { + private void sendUnsentRequests(final Timer timer) { if (networkClientDelegate.unsentRequests().isEmpty()) return; do { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index d90f4c0b812d5..19a249b8ca8f8 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -45,7 +45,6 @@ import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; -import org.apache.kafka.common.utils.Timer; import org.apache.kafka.test.TestCondition; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.AfterEach; @@ -64,7 +63,6 @@ import java.util.Optional; import java.util.List; import java.util.LinkedList; -import java.util.Queue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.CompletableFuture; import java.util.concurrent.LinkedBlockingQueue; @@ -77,7 +75,6 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; @@ -113,7 +110,7 @@ public class ConsumerNetworkThreadTest { this.metadata = mock(ConsumerMetadata.class); this.applicationEventProcessor = mock(ApplicationEventProcessor.class); this.applicationEventReaper = mock(CompletableEventReaper.class); - this.client = mock(MockClient.class); + this.client = new MockClient(time); this.applicationEventsQueue = new LinkedBlockingQueue<>(); LogContext logContext = new LogContext(); @@ -156,19 +153,9 @@ public void testEnsureCloseStopsRunningThread() { assertFalse(consumerNetworkThread.isRunning()); } - @Test - public void testEnsureSendUnsentRequestPollWIthZeroRunsOnce() { - Timer timer = time.timer(0); - Queue queue = new LinkedList<>(); - queue.add(mock(NetworkClientDelegate.UnsentRequest.class)); - when(networkClientDelegate.unsentRequests()).thenReturn(queue); - consumerNetworkThread.sendUnsentRequests(timer); - verify(networkClientDelegate).poll(eq(0L), anyLong()); - } - @ParameterizedTest @ValueSource(longs = {100, 4999, 5001}) - public void testConsumerNetworkThreadWaitTimeComputations(long exampleTime) { + public void testConsumerNetworkThreadPollTimeComputations(long exampleTime) { List> list = new ArrayList<>(); list.add(Optional.of(coordinatorRequestManager)); list.add(Optional.of(heartbeatRequestManager)); @@ -209,7 +196,7 @@ public void testStartupAndTearDown() throws InterruptedException { } @Test - void testRequestManagersArePolledOnce() { + public void testRequestManagersArePolledOnce() { List> list = new ArrayList<>(); list.add(Optional.of(coordinatorRequestManager)); list.add(Optional.of(heartbeatRequestManager)); From 574fc1366453ca95337a5363edcadf5ac200af5a Mon Sep 17 00:00:00 2001 From: brenden20 Date: Wed, 5 Jun 2024 11:20:57 -0500 Subject: [PATCH 42/61] Updated test name --- .../clients/consumer/internals/ConsumerNetworkThreadTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 19a249b8ca8f8..8c77e7df1d95c 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -212,7 +212,7 @@ public void testRequestManagersArePolledOnce() { } @Test - public void testApplicationEvent() { + public void testPollEvent() { ApplicationEvent e = new PollEvent(100); applicationEventsQueue.add(e); consumerNetworkThread.runOnce(); From a6959588e64d379b298e1e36bcc55473b6efacba Mon Sep 17 00:00:00 2001 From: brenden20 Date: Wed, 5 Jun 2024 13:07:11 -0500 Subject: [PATCH 43/61] Condensed multiple tests into 1 Condensed multiple tests into 1 that is parameterized and performs the same functionality --- .../internals/ConsumerNetworkThreadTest.java | 81 ++++++------------- 1 file changed, 24 insertions(+), 57 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 8c77e7df1d95c..5827486cfef57 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -27,6 +27,7 @@ import org.apache.kafka.clients.consumer.internals.events.CompletableEvent; import org.apache.kafka.clients.consumer.internals.events.CompletableEventReaper; import org.apache.kafka.clients.consumer.internals.events.ListOffsetsEvent; +import org.apache.kafka.clients.consumer.internals.events.NewTopicsMetadataUpdateRequestEvent; import org.apache.kafka.clients.consumer.internals.events.PollEvent; import org.apache.kafka.clients.consumer.internals.events.ResetPositionsEvent; import org.apache.kafka.clients.consumer.internals.events.SyncCommitEvent; @@ -51,6 +52,8 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; import java.time.Duration; @@ -66,6 +69,7 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.CompletableFuture; import java.util.concurrent.LinkedBlockingQueue; +import java.util.stream.Stream; import static org.apache.kafka.clients.consumer.internals.events.CompletableEvent.calculateDeadlineMs; import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; @@ -211,28 +215,29 @@ public void testRequestManagersArePolledOnce() { verify(networkClientDelegate).poll(anyLong(), anyLong()); } - @Test - public void testPollEvent() { - ApplicationEvent e = new PollEvent(100); - applicationEventsQueue.add(e); - consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(e); - } - - @Test - public void testAsyncCommitEvent() { - ApplicationEvent e = new AsyncCommitEvent(new HashMap<>()); + @ParameterizedTest + @MethodSource("appEvents") + public void testEventIsProcessed(ApplicationEvent e) { applicationEventsQueue.add(e); consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(any(AsyncCommitEvent.class)); + verify(applicationEventProcessor).process(any(e.getClass())); + assertTrue(applicationEventsQueue.isEmpty()); } - @Test - public void testSyncCommitEvent() { - ApplicationEvent e = new SyncCommitEvent(new HashMap<>(), calculateDeadlineMs(time, 100)); - applicationEventsQueue.add(e); - consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(any(SyncCommitEvent.class)); + private static Stream appEvents() { + Time time1 = new MockTime(); + Map offset = mockTopicPartitionOffset(); + final long currentTimeMs = time1.milliseconds(); + + return Stream.of( + Arguments.of(new PollEvent(100)), + Arguments.of(new NewTopicsMetadataUpdateRequestEvent()), + Arguments.of(new AsyncCommitEvent(new HashMap<>())), + Arguments.of(new SyncCommitEvent(new HashMap<>(), calculateDeadlineMs(time1, 100))), + Arguments.of(new ResetPositionsEvent(calculateDeadlineMs(time1, 100))), + Arguments.of(new ValidatePositionsEvent(calculateDeadlineMs(time1, 100))), + Arguments.of(new TopicMetadataEvent("topic", Long.MAX_VALUE)), + Arguments.of(new AssignmentChangeEvent(offset, currentTimeMs))); } @ParameterizedTest @@ -246,15 +251,6 @@ public void testListOffsetsEventIsProcessed(boolean requireTimestamp) { assertTrue(applicationEventsQueue.isEmpty()); } - @Test - public void testResetPositionsEventIsProcessed() { - ResetPositionsEvent e = new ResetPositionsEvent(calculateDeadlineMs(time, 100)); - applicationEventsQueue.add(e); - consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(any(ResetPositionsEvent.class)); - assertTrue(applicationEventsQueue.isEmpty()); - } - @Test public void testResetPositionsProcessFailureIsIgnored() { doThrow(new NullPointerException()).when(offsetsRequestManager).resetPositionsIfNeeded(); @@ -266,35 +262,6 @@ public void testResetPositionsProcessFailureIsIgnored() { verify(applicationEventProcessor).process(any(ResetPositionsEvent.class)); } - @Test - public void testValidatePositionsEventIsProcessed() { - ValidatePositionsEvent e = new ValidatePositionsEvent(calculateDeadlineMs(time, 100)); - applicationEventsQueue.add(e); - consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(any(ValidatePositionsEvent.class)); - assertTrue(applicationEventsQueue.isEmpty()); - } - - @Test - public void testAssignmentChangeEvent() { - Map offset = mockTopicPartitionOffset(); - - final long currentTimeMs = time.milliseconds(); - ApplicationEvent e = new AssignmentChangeEvent(offset, currentTimeMs); - applicationEventsQueue.add(e); - - consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(any(AssignmentChangeEvent.class)); - verify(networkClientDelegate).poll(anyLong(), anyLong()); - } - - @Test - void testFetchTopicMetadata() { - applicationEventsQueue.add(new TopicMetadataEvent("topic", Long.MAX_VALUE)); - consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(any(TopicMetadataEvent.class)); - } - @Test void testPollResultTimer() { NetworkClientDelegate.UnsentRequest req = new NetworkClientDelegate.UnsentRequest( @@ -387,7 +354,7 @@ void testRunOnceInvokesReaper() { verify(applicationEventReaper).reap(any(Long.class)); } - private Map mockTopicPartitionOffset() { + static private Map mockTopicPartitionOffset() { final TopicPartition t0 = new TopicPartition("t0", 2); final TopicPartition t1 = new TopicPartition("t0", 3); final Map topicPartitionOffsets = new HashMap<>(); From d443311ec4c980b24eaaad9a5ba80df64fb7de85 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Wed, 5 Jun 2024 13:33:46 -0500 Subject: [PATCH 44/61] Fixing conflict --- .../internals/ConsumerNetworkThreadTest.java | 20 ++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 5827486cfef57..3d51b8296c104 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -226,8 +226,6 @@ public void testEventIsProcessed(ApplicationEvent e) { private static Stream appEvents() { Time time1 = new MockTime(); - Map offset = mockTopicPartitionOffset(); - final long currentTimeMs = time1.milliseconds(); return Stream.of( Arguments.of(new PollEvent(100)), @@ -236,8 +234,7 @@ private static Stream appEvents() { Arguments.of(new SyncCommitEvent(new HashMap<>(), calculateDeadlineMs(time1, 100))), Arguments.of(new ResetPositionsEvent(calculateDeadlineMs(time1, 100))), Arguments.of(new ValidatePositionsEvent(calculateDeadlineMs(time1, 100))), - Arguments.of(new TopicMetadataEvent("topic", Long.MAX_VALUE)), - Arguments.of(new AssignmentChangeEvent(offset, currentTimeMs))); + Arguments.of(new TopicMetadataEvent("topic", Long.MAX_VALUE))); } @ParameterizedTest @@ -262,6 +259,19 @@ public void testResetPositionsProcessFailureIsIgnored() { verify(applicationEventProcessor).process(any(ResetPositionsEvent.class)); } + @Test + public void testAssignmentChangeEvent() { + Map offset = mockTopicPartitionOffset(); + + final long currentTimeMs = time.milliseconds(); + ApplicationEvent e = new AssignmentChangeEvent(offset, currentTimeMs); + applicationEventsQueue.add(e); + + consumerNetworkThread.runOnce(); + verify(applicationEventProcessor).process(any(AssignmentChangeEvent.class)); + verify(networkClientDelegate).poll(anyLong(), anyLong()); + } + @Test void testPollResultTimer() { NetworkClientDelegate.UnsentRequest req = new NetworkClientDelegate.UnsentRequest( @@ -354,7 +364,7 @@ void testRunOnceInvokesReaper() { verify(applicationEventReaper).reap(any(Long.class)); } - static private Map mockTopicPartitionOffset() { + private Map mockTopicPartitionOffset() { final TopicPartition t0 = new TopicPartition("t0", 2); final TopicPartition t1 = new TopicPartition("t0", 3); final Map topicPartitionOffsets = new HashMap<>(); From 907676a89cf12297d979a6b98bce11005ec109a1 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Wed, 5 Jun 2024 13:38:57 -0500 Subject: [PATCH 45/61] Revert "Fixing conflict" This reverts commit d443311ec4c980b24eaaad9a5ba80df64fb7de85. --- .../internals/ConsumerNetworkThreadTest.java | 20 +++++-------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 3d51b8296c104..5827486cfef57 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -226,6 +226,8 @@ public void testEventIsProcessed(ApplicationEvent e) { private static Stream appEvents() { Time time1 = new MockTime(); + Map offset = mockTopicPartitionOffset(); + final long currentTimeMs = time1.milliseconds(); return Stream.of( Arguments.of(new PollEvent(100)), @@ -234,7 +236,8 @@ private static Stream appEvents() { Arguments.of(new SyncCommitEvent(new HashMap<>(), calculateDeadlineMs(time1, 100))), Arguments.of(new ResetPositionsEvent(calculateDeadlineMs(time1, 100))), Arguments.of(new ValidatePositionsEvent(calculateDeadlineMs(time1, 100))), - Arguments.of(new TopicMetadataEvent("topic", Long.MAX_VALUE))); + Arguments.of(new TopicMetadataEvent("topic", Long.MAX_VALUE)), + Arguments.of(new AssignmentChangeEvent(offset, currentTimeMs))); } @ParameterizedTest @@ -259,19 +262,6 @@ public void testResetPositionsProcessFailureIsIgnored() { verify(applicationEventProcessor).process(any(ResetPositionsEvent.class)); } - @Test - public void testAssignmentChangeEvent() { - Map offset = mockTopicPartitionOffset(); - - final long currentTimeMs = time.milliseconds(); - ApplicationEvent e = new AssignmentChangeEvent(offset, currentTimeMs); - applicationEventsQueue.add(e); - - consumerNetworkThread.runOnce(); - verify(applicationEventProcessor).process(any(AssignmentChangeEvent.class)); - verify(networkClientDelegate).poll(anyLong(), anyLong()); - } - @Test void testPollResultTimer() { NetworkClientDelegate.UnsentRequest req = new NetworkClientDelegate.UnsentRequest( @@ -364,7 +354,7 @@ void testRunOnceInvokesReaper() { verify(applicationEventReaper).reap(any(Long.class)); } - private Map mockTopicPartitionOffset() { + static private Map mockTopicPartitionOffset() { final TopicPartition t0 = new TopicPartition("t0", 2); final TopicPartition t1 = new TopicPartition("t0", 3); final Map topicPartitionOffsets = new HashMap<>(); From d25fcb33e3cbb87c9b142a5e2d86736a3330c2b1 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Wed, 5 Jun 2024 14:52:16 -0500 Subject: [PATCH 46/61] Updated new test to work with current file --- .../internals/ConsumerNetworkThreadTest.java | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 49e978cb77759..ab62d166c86c6 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -356,6 +356,17 @@ void testRunOnceInvokesReaper() { @Test void testSendUnsentRequest() { + ConsumerNetworkThread consumerNetworkThread1 = new ConsumerNetworkThread( + new LogContext(), + time, + applicationEventsQueue, + applicationEventReaper, + () -> applicationEventProcessor, + () -> networkClient, + () -> requestManagers + ); + consumerNetworkThread1.initializeResources(); + String groupId = "group-id"; NetworkClientDelegate.UnsentRequest request = new NetworkClientDelegate.UnsentRequest( new FindCoordinatorRequest.Builder( @@ -368,7 +379,7 @@ void testSendUnsentRequest() { assertTrue(networkClient.hasAnyPendingRequests()); assertFalse(networkClient.unsentRequests().isEmpty()); assertFalse(client.hasInFlightRequests()); - consumerNetworkThread.cleanup(); + consumerNetworkThread1.cleanup(); assertTrue(networkClient.unsentRequests().isEmpty()); assertFalse(client.hasInFlightRequests()); From 0fe18c6f792380f86a6cebfd15f9b98a6587b953 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Thu, 6 Jun 2024 12:28:48 -0500 Subject: [PATCH 47/61] Implementing PR suggestions Removed test that was suggested to be removed, changed some test and method names, and updated a few tests --- .../internals/ConsumerNetworkThreadTest.java | 126 ++++++------------ 1 file changed, 41 insertions(+), 85 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index ab62d166c86c6..576de2c0980af 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -23,8 +23,6 @@ import org.apache.kafka.clients.consumer.internals.events.ApplicationEventProcessor; import org.apache.kafka.clients.consumer.internals.events.AssignmentChangeEvent; import org.apache.kafka.clients.consumer.internals.events.AsyncCommitEvent; -import org.apache.kafka.clients.consumer.internals.events.CompletableApplicationEvent; -import org.apache.kafka.clients.consumer.internals.events.CompletableEvent; import org.apache.kafka.clients.consumer.internals.events.CompletableEventReaper; import org.apache.kafka.clients.consumer.internals.events.ListOffsetsEvent; import org.apache.kafka.clients.consumer.internals.events.NewTopicsMetadataUpdateRequestEvent; @@ -33,14 +31,10 @@ import org.apache.kafka.clients.consumer.internals.events.SyncCommitEvent; import org.apache.kafka.clients.consumer.internals.events.TopicMetadataEvent; import org.apache.kafka.clients.consumer.internals.events.ValidatePositionsEvent; -import org.apache.kafka.common.Cluster; -import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.message.FindCoordinatorRequestData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.FindCoordinatorRequest; -import org.apache.kafka.common.requests.FindCoordinatorResponse; import org.apache.kafka.common.requests.OffsetCommitRequest; import org.apache.kafka.common.requests.OffsetCommitResponse; import org.apache.kafka.common.utils.LogContext; @@ -61,13 +55,11 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.Map; import java.util.Optional; import java.util.List; import java.util.LinkedList; import java.util.concurrent.BlockingQueue; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.LinkedBlockingQueue; import java.util.stream.Stream; @@ -79,7 +71,6 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -90,7 +81,6 @@ public class ConsumerNetworkThreadTest { static final int DEFAULT_REQUEST_TIMEOUT_MS = 500; private final Time time; - private final ConsumerMetadata metadata; private final BlockingQueue applicationEventsQueue; private final ApplicationEventProcessor applicationEventProcessor; private final OffsetsRequestManager offsetsRequestManager; @@ -99,31 +89,25 @@ public class ConsumerNetworkThreadTest { private final ConsumerNetworkThread consumerNetworkThread; private final MockClient client; private final NetworkClientDelegate networkClientDelegate; - private final NetworkClientDelegate networkClient; private final RequestManagers requestManagers; private final CompletableEventReaper applicationEventReaper; + private final LogContext logContext; + private final ConsumerConfig config; ConsumerNetworkThreadTest() { - ConsumerConfig config = mock(ConsumerConfig.class); this.time = new MockTime(); + this.client = new MockClient(time); + this.applicationEventsQueue = new LinkedBlockingQueue<>(); + this.logContext = new LogContext(); + + this.config = mock(ConsumerConfig.class); this.networkClientDelegate = mock(NetworkClientDelegate.class); this.requestManagers = mock(RequestManagers.class); this.offsetsRequestManager = mock(OffsetsRequestManager.class); this.heartbeatRequestManager = mock(HeartbeatRequestManager.class); this.coordinatorRequestManager = mock(CoordinatorRequestManager.class); - this.metadata = mock(ConsumerMetadata.class); this.applicationEventProcessor = mock(ApplicationEventProcessor.class); this.applicationEventReaper = mock(CompletableEventReaper.class); - this.client = new MockClient(time); - this.applicationEventsQueue = new LinkedBlockingQueue<>(); - LogContext logContext = new LogContext(); - - this.networkClient = new NetworkClientDelegate( - time, - config, - logContext, - client - ); this.consumerNetworkThread = new ConsumerNetworkThread( logContext, @@ -149,12 +133,12 @@ public void tearDown() { @Test public void testEnsureCloseStopsRunningThread() { - // consumerNetworkThread.running is set to true in its constructor - assertTrue(consumerNetworkThread.isRunning()); + assertTrue(consumerNetworkThread.isRunning(), + "ConsumerNetworkThread should start running when created"); - // close() should make consumerNetworkThread.running false by calling closeInternal(Duration timeout) consumerNetworkThread.close(); - assertFalse(consumerNetworkThread.isRunning()); + assertFalse(consumerNetworkThread.isRunning(), + "close() should make consumerNetworkThread.running false by calling closeInternal(Duration timeout)"); } @ParameterizedTest @@ -200,7 +184,7 @@ public void testStartupAndTearDown() throws InterruptedException { } @Test - public void testRequestManagersArePolledOnce() { + public void testRequestsTransferFromManagersToClientOnThreadRun() { List> list = new ArrayList<>(); list.add(Optional.of(coordinatorRequestManager)); list.add(Optional.of(heartbeatRequestManager)); @@ -216,19 +200,20 @@ public void testRequestManagersArePolledOnce() { } @ParameterizedTest - @MethodSource("appEvents") - public void testEventIsProcessed(ApplicationEvent e) { + @MethodSource("applicationEvents") + public void testApplicationEventIsProcessed(ApplicationEvent e) { applicationEventsQueue.add(e); consumerNetworkThread.runOnce(); verify(applicationEventProcessor).process(any(e.getClass())); assertTrue(applicationEventsQueue.isEmpty()); } - private static Stream appEvents() { + private static Stream applicationEvents() { Time time1 = new MockTime(); Map offset = mockTopicPartitionOffset(); final long currentTimeMs = time1.milliseconds(); + // use 500 for deadlineMs return Stream.of( Arguments.of(new PollEvent(100)), Arguments.of(new NewTopicsMetadataUpdateRequestEvent()), @@ -264,6 +249,13 @@ public void testResetPositionsProcessFailureIsIgnored() { @Test void testPollResultTimer() { + NetworkClientDelegate networkClientDelegate = new NetworkClientDelegate( + time, + config, + logContext, + client + ); + NetworkClientDelegate.UnsentRequest req = new NetworkClientDelegate.UnsentRequest( new FindCoordinatorRequest.Builder( new FindCoordinatorRequestData() @@ -276,12 +268,12 @@ void testPollResultTimer() { NetworkClientDelegate.PollResult success = new NetworkClientDelegate.PollResult( 10, Collections.singletonList(req)); - assertEquals(10, networkClient.addAll(success)); + assertEquals(10, networkClientDelegate.addAll(success)); NetworkClientDelegate.PollResult failure = new NetworkClientDelegate.PollResult( 10, new ArrayList<>()); - assertEquals(10, networkClient.addAll(failure)); + assertEquals(10, networkClientDelegate.addAll(failure)); } @Test @@ -297,49 +289,6 @@ void testMaximumTimeToWait() { assertEquals(DEFAULT_HEARTBEAT_INTERVAL_MS, consumerNetworkThread.maximumTimeToWait()); } - @Test - void testEnsureEventsAreCompleted() { - Cluster cluster = mock(Cluster.class); - when(metadata.fetch()).thenReturn(cluster); - - when(cluster.nodes()).thenReturn(Collections.singletonList(new Node(0, "host", 0))); - - LinkedList queue = new LinkedList<>(); - when(networkClientDelegate.unsentRequests()).thenReturn(queue); - - // Mimic the logic of CompletableEventReaper.reap(Collection): - doAnswer(__ -> { - Iterator i = applicationEventsQueue.iterator(); - - while (i.hasNext()) { - ApplicationEvent event = i.next(); - - if (event instanceof CompletableEvent) - ((CompletableEvent) event).future().completeExceptionally(new TimeoutException()); - - i.remove(); - } - - return null; - }).when(applicationEventReaper).reap(any(Collection.class)); - - Node node = metadata.fetch().nodes().get(0); - coordinatorRequestManager.markCoordinatorUnknown("test", time.milliseconds()); - client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "group-id", node)); - prepareOffsetCommitRequest(new HashMap<>(), Errors.NONE, false); - CompletableApplicationEvent event1 = mock(AsyncCommitEvent.class); - ApplicationEvent event2 = new AsyncCommitEvent(Collections.emptyMap()); - CompletableFuture future = new CompletableFuture<>(); - when(event1.future()).thenReturn(future); - applicationEventsQueue.add(event1); - applicationEventsQueue.add(event2); - assertFalse(future.isDone()); - assertFalse(applicationEventsQueue.isEmpty()); - consumerNetworkThread.cleanup(); - assertTrue(future.isCompletedExceptionally()); - assertTrue(applicationEventsQueue.isEmpty()); - } - @Test void testCleanupInvokesReaper() { LinkedList queue = new LinkedList<>(); @@ -356,16 +305,23 @@ void testRunOnceInvokesReaper() { @Test void testSendUnsentRequest() { - ConsumerNetworkThread consumerNetworkThread1 = new ConsumerNetworkThread( + NetworkClientDelegate networkClientDelegate = new NetworkClientDelegate( + time, + config, + logContext, + client + ); + + ConsumerNetworkThread consumerNetworkThread = new ConsumerNetworkThread( new LogContext(), time, applicationEventsQueue, applicationEventReaper, () -> applicationEventProcessor, - () -> networkClient, + () -> networkClientDelegate, () -> requestManagers ); - consumerNetworkThread1.initializeResources(); + consumerNetworkThread.initializeResources(); String groupId = "group-id"; NetworkClientDelegate.UnsentRequest request = new NetworkClientDelegate.UnsentRequest( @@ -375,15 +331,15 @@ void testSendUnsentRequest() { .setKey(groupId)), Optional.empty()); - networkClient.add(request); - assertTrue(networkClient.hasAnyPendingRequests()); - assertFalse(networkClient.unsentRequests().isEmpty()); + networkClientDelegate.add(request); + assertTrue(networkClientDelegate.hasAnyPendingRequests()); + assertFalse(networkClientDelegate.unsentRequests().isEmpty()); assertFalse(client.hasInFlightRequests()); - consumerNetworkThread1.cleanup(); + consumerNetworkThread.cleanup(); - assertTrue(networkClient.unsentRequests().isEmpty()); + assertTrue(networkClientDelegate.unsentRequests().isEmpty()); assertFalse(client.hasInFlightRequests()); - assertFalse(networkClient.hasAnyPendingRequests()); + assertFalse(networkClientDelegate.hasAnyPendingRequests()); } static private Map mockTopicPartitionOffset() { From 51c7658c163440e52fa07e61bb24c38c243eee5c Mon Sep 17 00:00:00 2001 From: brenden20 Date: Thu, 6 Jun 2024 12:29:42 -0500 Subject: [PATCH 48/61] Updated constructor to look a little better --- .../consumer/internals/ConsumerNetworkThreadTest.java | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 576de2c0980af..f124dbef983a9 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -95,11 +95,6 @@ public class ConsumerNetworkThreadTest { private final ConsumerConfig config; ConsumerNetworkThreadTest() { - this.time = new MockTime(); - this.client = new MockClient(time); - this.applicationEventsQueue = new LinkedBlockingQueue<>(); - this.logContext = new LogContext(); - this.config = mock(ConsumerConfig.class); this.networkClientDelegate = mock(NetworkClientDelegate.class); this.requestManagers = mock(RequestManagers.class); @@ -108,6 +103,10 @@ public class ConsumerNetworkThreadTest { this.coordinatorRequestManager = mock(CoordinatorRequestManager.class); this.applicationEventProcessor = mock(ApplicationEventProcessor.class); this.applicationEventReaper = mock(CompletableEventReaper.class); + this.time = new MockTime(); + this.client = new MockClient(time); + this.applicationEventsQueue = new LinkedBlockingQueue<>(); + this.logContext = new LogContext(); this.consumerNetworkThread = new ConsumerNetworkThread( logContext, From a87f461e66f8f4a18b714e8b4c7457c528814363 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Thu, 6 Jun 2024 12:38:11 -0500 Subject: [PATCH 49/61] Updated testApplicationEventIsProcessed() Updated testApplicationEventIsProcessed() to ensure that for completable events, the applicationEventReaper adds those --- .../consumer/internals/ConsumerNetworkThreadTest.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index f124dbef983a9..c17c1fa901498 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -23,6 +23,7 @@ import org.apache.kafka.clients.consumer.internals.events.ApplicationEventProcessor; import org.apache.kafka.clients.consumer.internals.events.AssignmentChangeEvent; import org.apache.kafka.clients.consumer.internals.events.AsyncCommitEvent; +import org.apache.kafka.clients.consumer.internals.events.CompletableEvent; import org.apache.kafka.clients.consumer.internals.events.CompletableEventReaper; import org.apache.kafka.clients.consumer.internals.events.ListOffsetsEvent; import org.apache.kafka.clients.consumer.internals.events.NewTopicsMetadataUpdateRequestEvent; @@ -203,6 +204,10 @@ public void testRequestsTransferFromManagersToClientOnThreadRun() { public void testApplicationEventIsProcessed(ApplicationEvent e) { applicationEventsQueue.add(e); consumerNetworkThread.runOnce(); + + if (e instanceof CompletableEvent) + verify(applicationEventReaper).add((CompletableEvent) e); + verify(applicationEventProcessor).process(any(e.getClass())); assertTrue(applicationEventsQueue.isEmpty()); } From 7605928e3fc500a68095ef49cac4d69a55d637d0 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Thu, 6 Jun 2024 12:43:21 -0500 Subject: [PATCH 50/61] Fixed checkstyle violation --- .../clients/consumer/internals/ConsumerNetworkThreadTest.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index c17c1fa901498..a0478fff965c4 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -134,11 +134,11 @@ public void tearDown() { @Test public void testEnsureCloseStopsRunningThread() { assertTrue(consumerNetworkThread.isRunning(), - "ConsumerNetworkThread should start running when created"); + "ConsumerNetworkThread should start running when created"); consumerNetworkThread.close(); assertFalse(consumerNetworkThread.isRunning(), - "close() should make consumerNetworkThread.running false by calling closeInternal(Duration timeout)"); + "close() should make consumerNetworkThread.running false by calling closeInternal(Duration timeout)"); } @ParameterizedTest From 65aa03b2ccc206abd7d56092ab3abd36cfe84b4a Mon Sep 17 00:00:00 2001 From: brenden20 Date: Thu, 6 Jun 2024 14:10:39 -0500 Subject: [PATCH 51/61] Fixed reminder comment --- .../consumer/internals/ConsumerNetworkThreadTest.java | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index a0478fff965c4..472faba7e0596 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -217,14 +217,13 @@ private static Stream applicationEvents() { Map offset = mockTopicPartitionOffset(); final long currentTimeMs = time1.milliseconds(); - // use 500 for deadlineMs return Stream.of( Arguments.of(new PollEvent(100)), Arguments.of(new NewTopicsMetadataUpdateRequestEvent()), Arguments.of(new AsyncCommitEvent(new HashMap<>())), - Arguments.of(new SyncCommitEvent(new HashMap<>(), calculateDeadlineMs(time1, 100))), - Arguments.of(new ResetPositionsEvent(calculateDeadlineMs(time1, 100))), - Arguments.of(new ValidatePositionsEvent(calculateDeadlineMs(time1, 100))), + Arguments.of(new SyncCommitEvent(new HashMap<>(), 500)), + Arguments.of(new ResetPositionsEvent(500)), + Arguments.of(new ValidatePositionsEvent(500)), Arguments.of(new TopicMetadataEvent("topic", Long.MAX_VALUE)), Arguments.of(new AssignmentChangeEvent(offset, currentTimeMs))); } From b001ead3558e68e2fc646eefd83ca68ea4989085 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Thu, 6 Jun 2024 15:14:26 -0500 Subject: [PATCH 52/61] Update test scope and testSendUnsentRequests() --- .../internals/ConsumerNetworkThreadTest.java | 43 ++++--------------- 1 file changed, 9 insertions(+), 34 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 472faba7e0596..62fc25d39e365 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -74,6 +74,7 @@ import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -251,7 +252,7 @@ public void testResetPositionsProcessFailureIsIgnored() { } @Test - void testPollResultTimer() { + public void testPollResultTimer() { NetworkClientDelegate networkClientDelegate = new NetworkClientDelegate( time, config, @@ -280,7 +281,7 @@ void testPollResultTimer() { } @Test - void testMaximumTimeToWait() { + public void testMaximumTimeToWait() { // Initial value before runOnce has been called assertEquals(ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS, consumerNetworkThread.maximumTimeToWait()); @@ -293,7 +294,7 @@ void testMaximumTimeToWait() { } @Test - void testCleanupInvokesReaper() { + public void testCleanupInvokesReaper() { LinkedList queue = new LinkedList<>(); when(networkClientDelegate.unsentRequests()).thenReturn(queue); consumerNetworkThread.cleanup(); @@ -301,45 +302,19 @@ void testCleanupInvokesReaper() { } @Test - void testRunOnceInvokesReaper() { + public void testRunOnceInvokesReaper() { consumerNetworkThread.runOnce(); verify(applicationEventReaper).reap(any(Long.class)); } @Test - void testSendUnsentRequest() { - NetworkClientDelegate networkClientDelegate = new NetworkClientDelegate( - time, - config, - logContext, - client - ); - - ConsumerNetworkThread consumerNetworkThread = new ConsumerNetworkThread( - new LogContext(), - time, - applicationEventsQueue, - applicationEventReaper, - () -> applicationEventProcessor, - () -> networkClientDelegate, - () -> requestManagers - ); - consumerNetworkThread.initializeResources(); + public void testSendUnsentRequests() { + when(networkClientDelegate.unsentRequests()).thenReturn(new LinkedList<>()); + when(networkClientDelegate.hasAnyPendingRequests()).thenReturn(true).thenReturn(true).thenReturn(false); - String groupId = "group-id"; - NetworkClientDelegate.UnsentRequest request = new NetworkClientDelegate.UnsentRequest( - new FindCoordinatorRequest.Builder( - new FindCoordinatorRequestData() - .setKeyType(FindCoordinatorRequest.CoordinatorType.TRANSACTION.id()) - .setKey(groupId)), - Optional.empty()); - - networkClientDelegate.add(request); - assertTrue(networkClientDelegate.hasAnyPendingRequests()); - assertFalse(networkClientDelegate.unsentRequests().isEmpty()); - assertFalse(client.hasInFlightRequests()); consumerNetworkThread.cleanup(); + verify(networkClientDelegate, times(2)).poll(anyLong(), anyLong()); assertTrue(networkClientDelegate.unsentRequests().isEmpty()); assertFalse(client.hasInFlightRequests()); assertFalse(networkClientDelegate.hasAnyPendingRequests()); From 23caab830a05a19bb7c80f3dfd4aa99e72d8c990 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Thu, 6 Jun 2024 16:08:01 -0500 Subject: [PATCH 53/61] Cleaned up unused imports and unused methods Cleaned up unused imports and unused methods, --- .../internals/ConsumerNetworkThreadTest.java | 81 +++---------------- 1 file changed, 13 insertions(+), 68 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 62fc25d39e365..153fd8ddea744 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -34,10 +34,7 @@ import org.apache.kafka.clients.consumer.internals.events.ValidatePositionsEvent; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.message.FindCoordinatorRequestData; -import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.FindCoordinatorRequest; -import org.apache.kafka.common.requests.OffsetCommitRequest; -import org.apache.kafka.common.requests.OffsetCommitResponse; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; @@ -53,7 +50,6 @@ import java.time.Duration; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -213,22 +209,6 @@ public void testApplicationEventIsProcessed(ApplicationEvent e) { assertTrue(applicationEventsQueue.isEmpty()); } - private static Stream applicationEvents() { - Time time1 = new MockTime(); - Map offset = mockTopicPartitionOffset(); - final long currentTimeMs = time1.milliseconds(); - - return Stream.of( - Arguments.of(new PollEvent(100)), - Arguments.of(new NewTopicsMetadataUpdateRequestEvent()), - Arguments.of(new AsyncCommitEvent(new HashMap<>())), - Arguments.of(new SyncCommitEvent(new HashMap<>(), 500)), - Arguments.of(new ResetPositionsEvent(500)), - Arguments.of(new ValidatePositionsEvent(500)), - Arguments.of(new TopicMetadataEvent("topic", Long.MAX_VALUE)), - Arguments.of(new AssignmentChangeEvent(offset, currentTimeMs))); - } - @ParameterizedTest @ValueSource(booleans = {true, false}) public void testListOffsetsEventIsProcessed(boolean requireTimestamp) { @@ -320,53 +300,18 @@ public void testSendUnsentRequests() { assertFalse(networkClientDelegate.hasAnyPendingRequests()); } - static private Map mockTopicPartitionOffset() { - final TopicPartition t0 = new TopicPartition("t0", 2); - final TopicPartition t1 = new TopicPartition("t0", 3); - final Map topicPartitionOffsets = new HashMap<>(); - topicPartitionOffsets.put(t0, new OffsetAndMetadata(10L)); - topicPartitionOffsets.put(t1, new OffsetAndMetadata(20L)); - return topicPartitionOffsets; - } - - private void prepareOffsetCommitRequest(final Map expectedOffsets, - final Errors error, - final boolean disconnected) { - Map errors = partitionErrors(expectedOffsets.keySet(), error); - client.prepareResponse(offsetCommitRequestMatcher(expectedOffsets), offsetCommitResponse(errors), disconnected); - } - - private Map partitionErrors(final Collection partitions, - final Errors error) { - final Map errors = new HashMap<>(); - for (TopicPartition partition : partitions) { - errors.put(partition, error); - } - return errors; - } - - private OffsetCommitResponse offsetCommitResponse(final Map responseData) { - return new OffsetCommitResponse(responseData); - } - - private MockClient.RequestMatcher offsetCommitRequestMatcher(final Map expectedOffsets) { - return body -> { - OffsetCommitRequest req = (OffsetCommitRequest) body; - Map offsets = req.offsets(); - if (offsets.size() != expectedOffsets.size()) - return false; - - for (Map.Entry expectedOffset : expectedOffsets.entrySet()) { - if (!offsets.containsKey(expectedOffset.getKey())) { - return false; - } else { - Long actualOffset = offsets.get(expectedOffset.getKey()); - if (!actualOffset.equals(expectedOffset.getValue())) { - return false; - } - } - } - return true; - }; + private static Stream applicationEvents() { + Time time1 = new MockTime(); + Map offset = new HashMap<>(); + final long currentTimeMs = time1.milliseconds(); + return Stream.of( + Arguments.of(new PollEvent(100)), + Arguments.of(new NewTopicsMetadataUpdateRequestEvent()), + Arguments.of(new AsyncCommitEvent(new HashMap<>())), + Arguments.of(new SyncCommitEvent(new HashMap<>(), 500)), + Arguments.of(new ResetPositionsEvent(500)), + Arguments.of(new ValidatePositionsEvent(500)), + Arguments.of(new TopicMetadataEvent("topic", Long.MAX_VALUE)), + Arguments.of(new AssignmentChangeEvent(offset, currentTimeMs))); } } From 1fe289fda7a79748ea920e17eef03439440634de Mon Sep 17 00:00:00 2001 From: brenden20 Date: Fri, 7 Jun 2024 11:39:19 -0500 Subject: [PATCH 54/61] Update testSendUnsentRequests() --- .../consumer/internals/ConsumerNetworkThreadTest.java | 6 ------ 1 file changed, 6 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 153fd8ddea744..e606fbec37d02 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -289,14 +289,8 @@ public void testRunOnceInvokesReaper() { @Test public void testSendUnsentRequests() { - when(networkClientDelegate.unsentRequests()).thenReturn(new LinkedList<>()); when(networkClientDelegate.hasAnyPendingRequests()).thenReturn(true).thenReturn(true).thenReturn(false); - consumerNetworkThread.cleanup(); - - verify(networkClientDelegate, times(2)).poll(anyLong(), anyLong()); - assertTrue(networkClientDelegate.unsentRequests().isEmpty()); - assertFalse(client.hasInFlightRequests()); assertFalse(networkClientDelegate.hasAnyPendingRequests()); } From 4718fc9cd092c51fb8362edc6d2a8f425975cf0e Mon Sep 17 00:00:00 2001 From: brenden20 Date: Fri, 7 Jun 2024 12:33:31 -0500 Subject: [PATCH 55/61] Removed unused import --- .../clients/consumer/internals/ConsumerNetworkThreadTest.java | 1 - 1 file changed, 1 deletion(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index e606fbec37d02..3d08afa6a5ce4 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -70,7 +70,6 @@ import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; From 6dd9de613139159ee9acd2e83f7e4b9855425e97 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Fri, 7 Jun 2024 12:54:06 -0500 Subject: [PATCH 56/61] Fixed some merge issues --- .../internals/ConsumerNetworkThreadTest.java | 41 +------------------ 1 file changed, 1 insertion(+), 40 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 3d08afa6a5ce4..954abddab9285 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -16,8 +16,6 @@ */ package org.apache.kafka.clients.consumer.internals; -import org.apache.kafka.clients.MockClient; -import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.internals.events.ApplicationEvent; import org.apache.kafka.clients.consumer.internals.events.ApplicationEventProcessor; @@ -33,8 +31,6 @@ import org.apache.kafka.clients.consumer.internals.events.TopicMetadataEvent; import org.apache.kafka.clients.consumer.internals.events.ValidatePositionsEvent; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.message.FindCoordinatorRequestData; -import org.apache.kafka.common.requests.FindCoordinatorRequest; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; @@ -75,7 +71,6 @@ public class ConsumerNetworkThreadTest { static final int DEFAULT_HEARTBEAT_INTERVAL_MS = 1000; - static final int DEFAULT_REQUEST_TIMEOUT_MS = 500; private final Time time; private final BlockingQueue applicationEventsQueue; @@ -84,15 +79,11 @@ public class ConsumerNetworkThreadTest { private final HeartbeatRequestManager heartbeatRequestManager; private final CoordinatorRequestManager coordinatorRequestManager; private final ConsumerNetworkThread consumerNetworkThread; - private final MockClient client; private final NetworkClientDelegate networkClientDelegate; private final RequestManagers requestManagers; private final CompletableEventReaper applicationEventReaper; - private final LogContext logContext; - private final ConsumerConfig config; ConsumerNetworkThreadTest() { - this.config = mock(ConsumerConfig.class); this.networkClientDelegate = mock(NetworkClientDelegate.class); this.requestManagers = mock(RequestManagers.class); this.offsetsRequestManager = mock(OffsetsRequestManager.class); @@ -101,9 +92,8 @@ public class ConsumerNetworkThreadTest { this.applicationEventProcessor = mock(ApplicationEventProcessor.class); this.applicationEventReaper = mock(CompletableEventReaper.class); this.time = new MockTime(); - this.client = new MockClient(time); this.applicationEventsQueue = new LinkedBlockingQueue<>(); - this.logContext = new LogContext(); + LogContext logContext = new LogContext(); this.consumerNetworkThread = new ConsumerNetworkThread( logContext, @@ -230,35 +220,6 @@ public void testResetPositionsProcessFailureIsIgnored() { verify(applicationEventProcessor).process(any(ResetPositionsEvent.class)); } - @Test - public void testPollResultTimer() { - NetworkClientDelegate networkClientDelegate = new NetworkClientDelegate( - time, - config, - logContext, - client - ); - - NetworkClientDelegate.UnsentRequest req = new NetworkClientDelegate.UnsentRequest( - new FindCoordinatorRequest.Builder( - new FindCoordinatorRequestData() - .setKeyType(FindCoordinatorRequest.CoordinatorType.TRANSACTION.id()) - .setKey("foobar")), - Optional.empty()); - req.setTimer(time, DEFAULT_REQUEST_TIMEOUT_MS); - - // purposely setting a non-MAX time to ensure it is returning Long.MAX_VALUE upon success - NetworkClientDelegate.PollResult success = new NetworkClientDelegate.PollResult( - 10, - Collections.singletonList(req)); - assertEquals(10, networkClientDelegate.addAll(success)); - - NetworkClientDelegate.PollResult failure = new NetworkClientDelegate.PollResult( - 10, - new ArrayList<>()); - assertEquals(10, networkClientDelegate.addAll(failure)); - } - @Test public void testMaximumTimeToWait() { // Initial value before runOnce has been called From 41750b716230f32e6f5dfc2c665375cfa5e0d48e Mon Sep 17 00:00:00 2001 From: brenden20 Date: Fri, 7 Jun 2024 14:09:44 -0500 Subject: [PATCH 57/61] Implementing suggestions --- .../clients/consumer/internals/ConsumerNetworkThreadTest.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 954abddab9285..88f271042e3f7 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -71,6 +71,7 @@ public class ConsumerNetworkThreadTest { static final int DEFAULT_HEARTBEAT_INTERVAL_MS = 1000; + static final long MAX_POLL_TIMEOUT_MS = 5000; private final Time time; private final BlockingQueue applicationEventsQueue; @@ -148,7 +149,7 @@ public void testConsumerNetworkThreadPollTimeComputations(long exampleTime) { when(networkClientDelegate.addAll(pollResult1)).thenReturn(pollResult1.timeUntilNextPollMs); consumerNetworkThread.runOnce(); - verify(networkClientDelegate).poll(exampleTime < 5001 ? exampleTime : 5000, time.milliseconds()); + verify(networkClientDelegate).poll(exampleTime < MAX_POLL_TIMEOUT_MS + 1 ? exampleTime : MAX_POLL_TIMEOUT_MS, time.milliseconds()); assertEquals(consumerNetworkThread.maximumTimeToWait(), exampleTime); } From 7c0aa57bb52867ee351fb4071a6cb97bbd798c7b Mon Sep 17 00:00:00 2001 From: brenden20 Date: Fri, 7 Jun 2024 14:13:38 -0500 Subject: [PATCH 58/61] Implement suggestion --- .../clients/consumer/internals/ConsumerNetworkThreadTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 88f271042e3f7..0420447732979 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -149,7 +149,7 @@ public void testConsumerNetworkThreadPollTimeComputations(long exampleTime) { when(networkClientDelegate.addAll(pollResult1)).thenReturn(pollResult1.timeUntilNextPollMs); consumerNetworkThread.runOnce(); - verify(networkClientDelegate).poll(exampleTime < MAX_POLL_TIMEOUT_MS + 1 ? exampleTime : MAX_POLL_TIMEOUT_MS, time.milliseconds()); + verify(networkClientDelegate).poll(Math.min(exampleTime, MAX_POLL_TIMEOUT_MS), time.milliseconds()); assertEquals(consumerNetworkThread.maximumTimeToWait(), exampleTime); } From 0236a187e77f622fd7802472a0dda3c2186b6918 Mon Sep 17 00:00:00 2001 From: brenden20 Date: Fri, 7 Jun 2024 15:58:47 -0500 Subject: [PATCH 59/61] Updated testConsumerNetworkThreadPollTimeComputations() Updates testConsumerNetworkThreadPollTimeComputations() and fixed a checkstyle error --- .../internals/ConsumerNetworkThreadTest.java | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 0420447732979..dbac981a4b064 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -70,9 +70,6 @@ import static org.mockito.Mockito.when; public class ConsumerNetworkThreadTest { - static final int DEFAULT_HEARTBEAT_INTERVAL_MS = 1000; - static final long MAX_POLL_TIMEOUT_MS = 5000; - private final Time time; private final BlockingQueue applicationEventsQueue; private final ApplicationEventProcessor applicationEventProcessor; @@ -129,7 +126,7 @@ public void testEnsureCloseStopsRunningThread() { } @ParameterizedTest - @ValueSource(longs = {100, 4999, 5001}) + @ValueSource(longs = {ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS - 1, ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS, ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS + 1}) public void testConsumerNetworkThreadPollTimeComputations(long exampleTime) { List> list = new ArrayList<>(); list.add(Optional.of(coordinatorRequestManager)); @@ -149,7 +146,7 @@ public void testConsumerNetworkThreadPollTimeComputations(long exampleTime) { when(networkClientDelegate.addAll(pollResult1)).thenReturn(pollResult1.timeUntilNextPollMs); consumerNetworkThread.runOnce(); - verify(networkClientDelegate).poll(Math.min(exampleTime, MAX_POLL_TIMEOUT_MS), time.milliseconds()); + verify(networkClientDelegate).poll(Math.min(exampleTime, ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS), time.milliseconds()); assertEquals(consumerNetworkThread.maximumTimeToWait(), exampleTime); } @@ -223,15 +220,16 @@ public void testResetPositionsProcessFailureIsIgnored() { @Test public void testMaximumTimeToWait() { + final int defaultHeartbeatIntervalMs = 1000; // Initial value before runOnce has been called assertEquals(ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS, consumerNetworkThread.maximumTimeToWait()); when(requestManagers.entries()).thenReturn(Collections.singletonList(Optional.of(heartbeatRequestManager))); - when(heartbeatRequestManager.maximumTimeToWait(time.milliseconds())).thenReturn((long) DEFAULT_HEARTBEAT_INTERVAL_MS); + when(heartbeatRequestManager.maximumTimeToWait(time.milliseconds())).thenReturn((long) defaultHeartbeatIntervalMs); consumerNetworkThread.runOnce(); // After runOnce has been called, it takes the default heartbeat interval from the heartbeat request manager - assertEquals(DEFAULT_HEARTBEAT_INTERVAL_MS, consumerNetworkThread.maximumTimeToWait()); + assertEquals(defaultHeartbeatIntervalMs, consumerNetworkThread.maximumTimeToWait()); } @Test From 41948b7521c81e7fa50c5568835141203f38ac7e Mon Sep 17 00:00:00 2001 From: brenden20 Date: Fri, 7 Jun 2024 16:14:03 -0500 Subject: [PATCH 60/61] Fixed error --- .../clients/consumer/internals/ConsumerNetworkThreadTest.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index dbac981a4b064..3f12aef000999 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -66,6 +66,7 @@ import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -250,7 +251,7 @@ public void testRunOnceInvokesReaper() { public void testSendUnsentRequests() { when(networkClientDelegate.hasAnyPendingRequests()).thenReturn(true).thenReturn(true).thenReturn(false); consumerNetworkThread.cleanup(); - assertFalse(networkClientDelegate.hasAnyPendingRequests()); + verify(networkClientDelegate, times(2)).poll(anyLong(), anyLong()); } private static Stream applicationEvents() { From 61f673f9f9c9192a877a97f2e0a571a96e0ae36a Mon Sep 17 00:00:00 2001 From: brenden20 Date: Mon, 10 Jun 2024 12:32:58 -0500 Subject: [PATCH 61/61] Implemented suggestion --- .../clients/consumer/internals/ConsumerNetworkThreadTest.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 3f12aef000999..e02c983ed374e 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -255,9 +255,8 @@ public void testSendUnsentRequests() { } private static Stream applicationEvents() { - Time time1 = new MockTime(); Map offset = new HashMap<>(); - final long currentTimeMs = time1.milliseconds(); + final long currentTimeMs = 12345; return Stream.of( Arguments.of(new PollEvent(100)), Arguments.of(new NewTopicsMetadataUpdateRequestEvent()),