-
Notifications
You must be signed in to change notification settings - Fork 15.1k
KAFKA-9176: Retry on getting local stores from KafkaStreams #8568
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -43,7 +43,6 @@ | |
| import org.apache.kafka.streams.StreamsBuilder; | ||
| import org.apache.kafka.streams.StreamsConfig; | ||
| import org.apache.kafka.streams.KeyQueryMetadata; | ||
| import org.apache.kafka.streams.StoreQueryParameters; | ||
| import org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster; | ||
| import org.apache.kafka.streams.integration.utils.IntegrationTestUtils; | ||
| import org.apache.kafka.streams.kstream.Consumed; | ||
|
|
@@ -119,11 +118,8 @@ public void shouldApplyUpdatesToStandbyStore() throws Exception { | |
| // Assert that all messages in the first batch were processed in a timely manner | ||
| assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true))); | ||
|
|
||
| final ReadOnlyKeyValueStore<Integer, Integer> store1 = kafkaStreams1 | ||
| .store(StoreQueryParameters.fromNameAndType(TABLE_NAME, QueryableStoreTypes.keyValueStore())); | ||
|
|
||
| final ReadOnlyKeyValueStore<Integer, Integer> store2 = kafkaStreams2 | ||
| .store(StoreQueryParameters.fromNameAndType(TABLE_NAME, QueryableStoreTypes.keyValueStore())); | ||
| final ReadOnlyKeyValueStore<Integer, Integer> store1 = IntegrationTestUtils.getStore(TABLE_NAME, kafkaStreams1, QueryableStoreTypes.keyValueStore()); | ||
| final ReadOnlyKeyValueStore<Integer, Integer> store2 = IntegrationTestUtils.getStore(TABLE_NAME, kafkaStreams2, QueryableStoreTypes.keyValueStore()); | ||
|
|
||
| final boolean kafkaStreams1WasFirstActive; | ||
| final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, (topic, somekey, value, numPartitions) -> 0); | ||
|
|
@@ -163,8 +159,10 @@ public void shouldApplyUpdatesToStandbyStore() throws Exception { | |
| // Assert that all messages in the second batch were processed in a timely manner | ||
| assertThat(semaphore.tryAcquire(batch2NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true))); | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
|
|
||
| // Assert that the current value in store reflects all messages being processed | ||
| assertThat(newActiveStore.get(key), is(equalTo(totalNumMessages - 1))); | ||
| TestUtils.retryOnExceptionWithTimeout(100, 60 * 1000, () -> { | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is a minor fix, that we should retry this condition. |
||
| // Assert that the current value in store reflects all messages being processed | ||
| assertThat(newActiveStore.get(key), is(equalTo(totalNumMessages - 1))); | ||
| }); | ||
| } | ||
|
|
||
| private void produceValueRange(final int key, final int start, final int endExclusive) throws Exception { | ||
|
|
@@ -227,10 +225,11 @@ private Properties streamsConfiguration() { | |
| config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.Integer().getClass()); | ||
| config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.Integer().getClass()); | ||
| config.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1); | ||
| config.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100); | ||
| config.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0); | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is a fix to the test itself: with caching the records are delayed sending to the sink topics. |
||
| config.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 100); | ||
| config.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 200); | ||
| config.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 1000); | ||
| config.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100); | ||
| return config; | ||
| } | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why do we have to check for null now?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Since previously we would just throw the exception with the un-wrapped call, here asserting it is not null is equal to make sure that the store is indeed returned.