From 55c2265e296a784416e11e46b09db835a876739f Mon Sep 17 00:00:00 2001 From: Sanskar Jhajharia Date: Fri, 24 May 2024 11:15:09 +0530 Subject: [PATCH 1/5] Code Cleanup - Metadata module --- .../controller/ClusterControlManager.java | 2 +- .../controller/PartitionChangeBuilder.java | 5 +- .../kafka/controller/QuorumController.java | 3 +- .../controller/ReplicationControlManager.java | 2 +- .../errors/ControllerExceptions.java | 6 +- .../errors/EventHandlerExceptionInfo.java | 3 +- .../kafka/metadata/BrokerRegistration.java | 38 +- .../metadata/ControllerRegistration.java | 28 +- .../metadata/FinalizedControllerFeatures.java | 10 +- .../kafka/metadata/PartitionRegistration.java | 26 +- .../controller/AclControlManagerTest.java | 6 +- .../ClientQuotaControlManagerTest.java | 36 +- .../controller/ClusterControlManagerTest.java | 28 +- .../ConfigurationControlManagerTest.java | 27 +- .../controller/FeatureControlManagerTest.java | 14 +- .../controller/OffsetControlManagerTest.java | 16 +- .../PartitionChangeBuilderTest.java | 40 +- .../PartitionReassignmentReplicasTest.java | 2 +- .../PartitionReassignmentRevertTest.java | 7 +- .../QuorumControllerIntegrationTestUtils.java | 11 +- .../controller/QuorumControllerTest.java | 134 ++--- .../controller/QuorumControllerTestEnv.java | 2 +- .../ReplicationControlManagerTest.java | 548 +++++++++--------- .../kafka/image/ClientQuotasImageTest.java | 8 +- .../apache/kafka/image/ClusterImageTest.java | 21 +- .../kafka/image/ImageDowngradeTest.java | 76 +-- .../apache/kafka/image/TopicsImageTest.java | 8 +- .../image/loader/MetadataLoaderTest.java | 128 ++-- .../node/ClusterImageBrokersNodeTest.java | 2 +- .../node/ClusterImageControllersNodeTest.java | 3 +- .../publisher/SnapshotGeneratorTest.java | 5 +- .../image/writer/RaftSnapshotWriterTest.java | 3 +- .../metadata/BrokerRegistrationTest.java | 14 +- .../metadata/DelegationTokenDataTest.java | 3 +- .../kafka/metadata/KafkaConfigSchemaTest.java | 7 +- .../kafka/metadata/ListenerInfoTest.java | 25 +- .../metadata/PartitionRegistrationTest.java | 4 +- .../kafka/metadata/RecordTestUtils.java | 12 +- .../apache/kafka/metadata/ReplicasTest.java | 8 +- .../StandardAclRecordIteratorTest.java | 5 +- .../bootstrap/BootstrapMetadataTest.java | 6 +- .../migration/KRaftMigrationDriverTest.java | 6 +- .../placement/StripedReplicaPlacerTest.java | 6 +- .../placement/TopicAssignmentTest.java | 21 +- .../MetaPropertiesEnsembleTest.java | 4 +- .../apache/kafka/metalog/LocalLogManager.java | 3 +- 46 files changed, 680 insertions(+), 692 deletions(-) diff --git a/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java index f0bd98776bc38..6263094f2b7ff 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java @@ -257,7 +257,7 @@ boolean check() { */ private final boolean zkMigrationEnabled; - private BrokerUncleanShutdownHandler brokerUncleanShutdownHandler; + private final BrokerUncleanShutdownHandler brokerUncleanShutdownHandler; /** * Maps controller IDs to controller registrations. diff --git a/metadata/src/main/java/org/apache/kafka/controller/PartitionChangeBuilder.java b/metadata/src/main/java/org/apache/kafka/controller/PartitionChangeBuilder.java index 0d2c1bd6a9d58..86d7a0f1494f6 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/PartitionChangeBuilder.java +++ b/metadata/src/main/java/org/apache/kafka/controller/PartitionChangeBuilder.java @@ -58,8 +58,7 @@ public static boolean changeRecordIsNoOp(PartitionChangeRecord record) { if (record.removingReplicas() != null) return false; if (record.addingReplicas() != null) return false; if (record.leaderRecoveryState() != LeaderRecoveryState.NO_CHANGE) return false; - if (record.directories() != null) return false; - return true; + return record.directories() == null; } /** @@ -515,7 +514,7 @@ private void maybeUpdateLastKnownLeader(PartitionChangeRecord record) { if (record.isr() != null && record.isr().isEmpty() && (partition.lastKnownElr.length != 1 || partition.lastKnownElr[0] != partition.leader)) { // Only update the last known leader when the first time the partition becomes leaderless. - record.setLastKnownElr(Arrays.asList(partition.leader)); + record.setLastKnownElr(Collections.singletonList(partition.leader)); } else if ((record.leader() >= 0 || (partition.leader != NO_LEADER && record.leader() != NO_LEADER)) && partition.lastKnownElr.length > 0) { // Clear the LastKnownElr field if the partition will have or continues to have a valid leader. diff --git a/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java b/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java index 9d186d83d3ff4..cb2db1858b48c 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java +++ b/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java @@ -130,7 +130,6 @@ import org.slf4j.Logger; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.EnumSet; @@ -1405,7 +1404,7 @@ private void maybeScheduleNextWriteNoOpRecord() { maybeScheduleNextWriteNoOpRecord(); return ControllerResult.of( - Arrays.asList(new ApiMessageAndVersion(new NoOpRecord(), (short) 0)), + Collections.singletonList(new ApiMessageAndVersion(new NoOpRecord(), (short) 0)), null ); }, diff --git a/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java index 9b412ad105da5..3c946954f7fb4 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java @@ -1904,7 +1904,7 @@ void generateLeaderAndIsrUpdates(String context, builder.setElection(PartitionChangeBuilder.Election.UNCLEAN); } if (brokerWithUncleanShutdown != NO_LEADER) { - builder.setUncleanShutdownReplicas(Arrays.asList(brokerWithUncleanShutdown)); + builder.setUncleanShutdownReplicas(Collections.singletonList(brokerWithUncleanShutdown)); } // Note: if brokerToRemove and brokerWithUncleanShutdown were passed as NO_LEADER, this is a no-op (the new diff --git a/metadata/src/main/java/org/apache/kafka/controller/errors/ControllerExceptions.java b/metadata/src/main/java/org/apache/kafka/controller/errors/ControllerExceptions.java index b7e74446a4b6b..3c7427493833e 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/errors/ControllerExceptions.java +++ b/metadata/src/main/java/org/apache/kafka/controller/errors/ControllerExceptions.java @@ -37,8 +37,7 @@ public static boolean isTimeoutException(Throwable exception) { exception = exception.getCause(); if (exception == null) return false; } - if (!(exception instanceof TimeoutException)) return false; - return true; + return exception instanceof TimeoutException; } /** @@ -53,8 +52,7 @@ public static boolean isNotControllerException(Throwable exception) { exception = exception.getCause(); if (exception == null) return false; } - if (!(exception instanceof NotControllerException)) return false; - return true; + return exception instanceof NotControllerException; } /** diff --git a/metadata/src/main/java/org/apache/kafka/controller/errors/EventHandlerExceptionInfo.java b/metadata/src/main/java/org/apache/kafka/controller/errors/EventHandlerExceptionInfo.java index 4c95a553b109c..09848d0c2eef4 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/errors/EventHandlerExceptionInfo.java +++ b/metadata/src/main/java/org/apache/kafka/controller/errors/EventHandlerExceptionInfo.java @@ -116,8 +116,7 @@ static boolean exceptionClassesAndMessagesMatch(Throwable a, Throwable b) { if (a == null) return b == null; if (b == null) return false; if (!a.getClass().equals(b.getClass())) return false; - if (!Objects.equals(a.getMessage(), b.getMessage())) return false; - return true; + return Objects.equals(a.getMessage(), b.getMessage()); } EventHandlerExceptionInfo( diff --git a/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistration.java b/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistration.java index bb9152022920e..896ba5b4e6b84 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistration.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistration.java @@ -366,26 +366,24 @@ public boolean equals(Object o) { @Override public String toString() { - StringBuilder bld = new StringBuilder(); - bld.append("BrokerRegistration(id=").append(id); - bld.append(", epoch=").append(epoch); - bld.append(", incarnationId=").append(incarnationId); - bld.append(", listeners=[").append( - listeners.keySet().stream().sorted(). - map(n -> listeners.get(n).toString()). - collect(Collectors.joining(", "))); - bld.append("], supportedFeatures={").append( - supportedFeatures.keySet().stream().sorted(). - map(k -> k + ": " + supportedFeatures.get(k)). - collect(Collectors.joining(", "))); - bld.append("}"); - bld.append(", rack=").append(rack); - bld.append(", fenced=").append(fenced); - bld.append(", inControlledShutdown=").append(inControlledShutdown); - bld.append(", isMigratingZkBroker=").append(isMigratingZkBroker); - bld.append(", directories=").append(directories); - bld.append(")"); - return bld.toString(); + return "BrokerRegistration(id=" + id + + ", epoch=" + epoch + + ", incarnationId=" + incarnationId + + ", listeners=[" + + listeners.keySet().stream().sorted(). + map(n -> listeners.get(n).toString()). + collect(Collectors.joining(", ")) + + "], supportedFeatures={" + + supportedFeatures.keySet().stream().sorted(). + map(k -> k + ": " + supportedFeatures.get(k)). + collect(Collectors.joining(", ")) + + "}" + + ", rack=" + rack + + ", fenced=" + fenced + + ", inControlledShutdown=" + inControlledShutdown + + ", isMigratingZkBroker=" + isMigratingZkBroker + + ", directories=" + directories + + ")"; } public BrokerRegistration cloneWith( diff --git a/metadata/src/main/java/org/apache/kafka/metadata/ControllerRegistration.java b/metadata/src/main/java/org/apache/kafka/metadata/ControllerRegistration.java index c26880bfd15bb..a6b3d13bea267 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/ControllerRegistration.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/ControllerRegistration.java @@ -214,20 +214,18 @@ public boolean equals(Object o) { @Override public String toString() { - StringBuilder bld = new StringBuilder(); - bld.append("ControllerRegistration(id=").append(id); - bld.append(", incarnationId=").append(incarnationId); - bld.append(", zkMigrationReady=").append(zkMigrationReady); - bld.append(", listeners=[").append( - listeners.keySet().stream().sorted(). - map(n -> listeners.get(n).toString()). - collect(Collectors.joining(", "))); - bld.append("], supportedFeatures={").append( - supportedFeatures.keySet().stream().sorted(). - map(k -> k + ": " + supportedFeatures.get(k)). - collect(Collectors.joining(", "))); - bld.append("}"); - bld.append(")"); - return bld.toString(); + return "ControllerRegistration(id=" + id + + ", incarnationId=" + incarnationId + + ", zkMigrationReady=" + zkMigrationReady + + ", listeners=[" + + listeners.keySet().stream().sorted(). + map(n -> listeners.get(n).toString()). + collect(Collectors.joining(", ")) + + "], supportedFeatures={" + + supportedFeatures.keySet().stream().sorted(). + map(k -> k + ": " + supportedFeatures.get(k)). + collect(Collectors.joining(", ")) + + "}" + + ")"; } } diff --git a/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java b/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java index 05ef45d1e9667..88bb688f6b61f 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java @@ -66,11 +66,9 @@ public boolean equals(Object o) { @Override public String toString() { - StringBuilder bld = new StringBuilder(); - bld.append("{"); - bld.append("featureMap=").append(featureMap.toString()); - bld.append(", epoch=").append(epoch); - bld.append("}"); - return bld.toString(); + return "{" + + "featureMap=" + featureMap.toString() + + ", epoch=" + epoch + + "}"; } } diff --git a/metadata/src/main/java/org/apache/kafka/metadata/PartitionRegistration.java b/metadata/src/main/java/org/apache/kafka/metadata/PartitionRegistration.java index 72476cf206cda..3a6aa51e7e266 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/PartitionRegistration.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/PartitionRegistration.java @@ -439,20 +439,18 @@ public boolean equals(Object o) { @Override public String toString() { - StringBuilder builder = new StringBuilder("PartitionRegistration("); - builder.append("replicas=").append(Arrays.toString(replicas)); - builder.append(", directories=").append(Arrays.toString(directories)); - builder.append(", isr=").append(Arrays.toString(isr)); - builder.append(", removingReplicas=").append(Arrays.toString(removingReplicas)); - builder.append(", addingReplicas=").append(Arrays.toString(addingReplicas)); - builder.append(", elr=").append(Arrays.toString(elr)); - builder.append(", lastKnownElr=").append(Arrays.toString(lastKnownElr)); - builder.append(", leader=").append(leader); - builder.append(", leaderRecoveryState=").append(leaderRecoveryState); - builder.append(", leaderEpoch=").append(leaderEpoch); - builder.append(", partitionEpoch=").append(partitionEpoch); - builder.append(")"); - return builder.toString(); + return "PartitionRegistration(" + "replicas=" + Arrays.toString(replicas) + + ", directories=" + Arrays.toString(directories) + + ", isr=" + Arrays.toString(isr) + + ", removingReplicas=" + Arrays.toString(removingReplicas) + + ", addingReplicas=" + Arrays.toString(addingReplicas) + + ", elr=" + Arrays.toString(elr) + + ", lastKnownElr=" + Arrays.toString(lastKnownElr) + + ", leader=" + leader + + ", leaderRecoveryState=" + leaderRecoveryState + + ", leaderEpoch=" + leaderEpoch + + ", partitionEpoch=" + partitionEpoch + + ")"; } public boolean hasSameAssignment(PartitionRegistration registration) { diff --git a/metadata/src/test/java/org/apache/kafka/controller/AclControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/AclControlManagerTest.java index dd6c2d1518524..f9e9bd54f3367 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/AclControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/AclControlManagerTest.java @@ -318,16 +318,16 @@ public void testDeleteDedupe() { AclBinding aclBinding = new AclBinding(new ResourcePattern(TOPIC, "topic-1", LITERAL), new AccessControlEntry("User:user", "10.0.0.1", AclOperation.ALL, ALLOW)); - ControllerResult> createResult = manager.createAcls(Arrays.asList(aclBinding)); + ControllerResult> createResult = manager.createAcls(Collections.singletonList(aclBinding)); Uuid id = ((AccessControlEntryRecord) createResult.records().get(0).message()).id(); assertEquals(1, createResult.records().size()); - ControllerResult> deleteAclResultsAnyFilter = manager.deleteAcls(Arrays.asList(AclBindingFilter.ANY)); + ControllerResult> deleteAclResultsAnyFilter = manager.deleteAcls(Collections.singletonList(AclBindingFilter.ANY)); assertEquals(1, deleteAclResultsAnyFilter.records().size()); assertEquals(id, ((RemoveAccessControlEntryRecord) deleteAclResultsAnyFilter.records().get(0).message()).id()); assertEquals(1, deleteAclResultsAnyFilter.response().size()); - ControllerResult> deleteAclResultsSpecificFilter = manager.deleteAcls(Arrays.asList(aclBinding.toFilter())); + ControllerResult> deleteAclResultsSpecificFilter = manager.deleteAcls(Collections.singletonList(aclBinding.toFilter())); assertEquals(1, deleteAclResultsSpecificFilter.records().size()); assertEquals(id, ((RemoveAccessControlEntryRecord) deleteAclResultsSpecificFilter.records().get(0).message()).id()); assertEquals(1, deleteAclResultsSpecificFilter.response().size()); diff --git a/metadata/src/test/java/org/apache/kafka/controller/ClientQuotaControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/ClientQuotaControlManagerTest.java index a9d4cbfc6f60f..e647cd597d9d6 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ClientQuotaControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ClientQuotaControlManagerTest.java @@ -228,20 +228,20 @@ public void testEntityTypes() throws Exception { new EntityData().setEntityType("user").setEntityName("user-3"), new EntityData().setEntityType("client-id").setEntityName(null))). setKey("request_percentage").setValue(55.55).setRemove(false), (short) 0), - new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Arrays.asList( - new EntityData().setEntityType("user").setEntityName("user-1"))). + new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Collections.singletonList( + new EntityData().setEntityType("user").setEntityName("user-1"))). setKey("request_percentage").setValue(56.56).setRemove(false), (short) 0), - new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Arrays.asList( - new EntityData().setEntityType("user").setEntityName("user-2"))). + new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Collections.singletonList( + new EntityData().setEntityType("user").setEntityName("user-2"))). setKey("request_percentage").setValue(57.57).setRemove(false), (short) 0), - new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Arrays.asList( - new EntityData().setEntityType("user").setEntityName("user-3"))). + new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Collections.singletonList( + new EntityData().setEntityType("user").setEntityName("user-3"))). setKey("request_percentage").setValue(58.58).setRemove(false), (short) 0), - new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Arrays.asList( - new EntityData().setEntityType("user").setEntityName(null))). + new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Collections.singletonList( + new EntityData().setEntityType("user").setEntityName(null))). setKey("request_percentage").setValue(59.59).setRemove(false), (short) 0), - new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Arrays.asList( - new EntityData().setEntityType("client-id").setEntityName("client-id-2"))). + new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Collections.singletonList( + new EntityData().setEntityType("client-id").setEntityName("client-id-2"))). setKey("request_percentage").setValue(60.60).setRemove(false), (short) 0)); records = new ArrayList<>(records); RecordTestUtils.deepSortRecords(records); @@ -323,7 +323,7 @@ public void testIsValidIpEntityWithLocalhost() { @Test public void testConfigKeysForEntityTypeWithUser() { - testConfigKeysForEntityType(Arrays.asList(ClientQuotaEntity.USER), + testConfigKeysForEntityType(Collections.singletonList(ClientQuotaEntity.USER), Arrays.asList( "producer_byte_rate", "consumer_byte_rate", @@ -334,7 +334,7 @@ public void testConfigKeysForEntityTypeWithUser() { @Test public void testConfigKeysForEntityTypeWithClientId() { - testConfigKeysForEntityType(Arrays.asList(ClientQuotaEntity.CLIENT_ID), + testConfigKeysForEntityType(Collections.singletonList(ClientQuotaEntity.CLIENT_ID), Arrays.asList( "producer_byte_rate", "consumer_byte_rate", @@ -356,10 +356,10 @@ public void testConfigKeysForEntityTypeWithUserAndClientId() { @Test public void testConfigKeysForEntityTypeWithIp() { - testConfigKeysForEntityType(Arrays.asList(ClientQuotaEntity.IP), - Arrays.asList( - "connection_creation_rate" - )); + testConfigKeysForEntityType(Collections.singletonList(ClientQuotaEntity.IP), + Collections.singletonList( + "connection_creation_rate" + )); } private static Map keysToEntity(List entityKeys) { @@ -386,7 +386,7 @@ private static void testConfigKeysForEntityType( @Test public void testConfigKeysForEmptyEntity() { - testConfigKeysError(Arrays.asList(), + testConfigKeysError(Collections.emptyList(), new ApiError(Errors.INVALID_REQUEST, "Invalid empty client quota entity")); } @@ -427,7 +427,7 @@ private static void testConfigKeysError( static { VALID_CLIENT_ID_QUOTA_KEYS = new HashMap<>(); assertEquals(ApiError.NONE, ClientQuotaControlManager.configKeysForEntityType( - keysToEntity(Arrays.asList(ClientQuotaEntity.CLIENT_ID)), VALID_CLIENT_ID_QUOTA_KEYS)); + keysToEntity(Collections.singletonList(ClientQuotaEntity.CLIENT_ID)), VALID_CLIENT_ID_QUOTA_KEYS)); } @Test diff --git a/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java index 20c2b7c690997..e2bfad53bc101 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java @@ -327,19 +327,19 @@ public void testRegisterBrokerRecordVersion(MetadataVersion metadataVersion) { short expectedVersion = metadataVersion.registerBrokerRecordVersion(); assertEquals( - asList(new ApiMessageAndVersion(new RegisterBrokerRecord(). - setBrokerEpoch(123L). - setBrokerId(0). - setRack(null). - setIncarnationId(Uuid.fromString("0H4fUu1xQEKXFYwB1aBjhg")). - setFenced(true). - setLogDirs(logDirs). - setFeatures(new RegisterBrokerRecord.BrokerFeatureCollection(asList( - new RegisterBrokerRecord.BrokerFeature(). - setName(MetadataVersion.FEATURE_NAME). - setMinSupportedVersion((short) 1). - setMaxSupportedVersion((short) 1)).iterator())). - setInControlledShutdown(false), expectedVersion)), + Collections.singletonList(new ApiMessageAndVersion(new RegisterBrokerRecord(). + setBrokerEpoch(123L). + setBrokerId(0). + setRack(null). + setIncarnationId(Uuid.fromString("0H4fUu1xQEKXFYwB1aBjhg")). + setFenced(true). + setLogDirs(logDirs). + setFeatures(new RegisterBrokerRecord.BrokerFeatureCollection(Collections.singletonList( + new RegisterBrokerRecord.BrokerFeature(). + setName(MetadataVersion.FEATURE_NAME). + setMinSupportedVersion((short) 1). + setMaxSupportedVersion((short) 1)).iterator())). + setInControlledShutdown(false), expectedVersion)), result.records()); } @@ -673,7 +673,7 @@ public void testDefaultDir() { RegisterBrokerRecord brokerRecord = new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(1).setLogDirs(Collections.emptyList()); brokerRecord.endPoints().add(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9092).setName("PLAINTEXT").setHost("127.0.0.1")); clusterControl.replay(brokerRecord, 100L); - registerNewBrokerWithDirs(clusterControl, 2, asList(Uuid.fromString("singleOnlineDirectoryA"))); + registerNewBrokerWithDirs(clusterControl, 2, Collections.singletonList(Uuid.fromString("singleOnlineDirectoryA"))); registerNewBrokerWithDirs(clusterControl, 3, asList(Uuid.fromString("s4fRmyNFSH6J0vI8AVA5ew"), Uuid.fromString("UbtxBcqYSnKUEMcnTyZFWw"))); assertEquals(DirectoryId.MIGRATING, clusterControl.defaultDir(1)); assertEquals(Uuid.fromString("singleOnlineDirectoryA"), clusterControl.defaultDir(2)); diff --git a/metadata/src/test/java/org/apache/kafka/controller/ConfigurationControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/ConfigurationControlManagerTest.java index 5e76114d2e975..b24848147878d 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ConfigurationControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ConfigurationControlManagerTest.java @@ -33,7 +33,6 @@ import org.apache.kafka.server.policy.AlterConfigPolicy.RequestMetadata; import java.util.AbstractMap.SimpleImmutableEntry; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; @@ -57,7 +56,7 @@ import static org.apache.kafka.common.metadata.MetadataRecordType.CONFIG_RECORD; import static org.apache.kafka.server.config.ConfigSynonym.HOURS_TO_MILLISECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertNull; @Timeout(value = 40) @@ -80,9 +79,9 @@ public class ConfigurationControlManagerTest { public static final Map> SYNONYMS = new HashMap<>(); static { - SYNONYMS.put("abc", Arrays.asList(new ConfigSynonym("foo.bar"))); - SYNONYMS.put("def", Arrays.asList(new ConfigSynonym("baz"))); - SYNONYMS.put("quuux", Arrays.asList(new ConfigSynonym("quux", HOURS_TO_MILLISECONDS))); + SYNONYMS.put("abc", Collections.singletonList(new ConfigSynonym("foo.bar"))); + SYNONYMS.put("def", Collections.singletonList(new ConfigSynonym("baz"))); + SYNONYMS.put("quuux", Collections.singletonList(new ConfigSynonym("quux", HOURS_TO_MILLISECONDS))); } static final KafkaConfigSchema SCHEMA = new KafkaConfigSchema(CONFIGS, SYNONYMS); @@ -138,7 +137,7 @@ public void testReplay() throws Exception { assertEquals(toMap(entry("abc", "x,y,z"), entry("def", "blah")), manager.getConfigs(MYTOPIC)); assertEquals("x,y,z", manager.getTopicConfig(MYTOPIC.name(), "abc")); - assertTrue(manager.getTopicConfig(MYTOPIC.name(), "none-exists") == null); + assertNull(manager.getTopicConfig(MYTOPIC.name(), "none-exists")); } @Test @@ -382,14 +381,14 @@ expectedRecords1, toMap(entry(MYTOPIC, ApiError.NONE))), for (ApiMessageAndVersion message : expectedRecords1) { manager.replay((ConfigRecord) message.message()); } - assertEquals(ControllerResult.atomicOf(asList( - new ApiMessageAndVersion( - new ConfigRecord() - .setResourceType(TOPIC.id()) - .setResourceName("mytopic") - .setName("abc") - .setValue(null), - CONFIG_RECORD.highestSupportedVersion())), + assertEquals(ControllerResult.atomicOf(Collections.singletonList( + new ApiMessageAndVersion( + new ConfigRecord() + .setResourceType(TOPIC.id()) + .setResourceName("mytopic") + .setName("abc") + .setValue(null), + CONFIG_RECORD.highestSupportedVersion())), toMap(entry(MYTOPIC, ApiError.NONE))), manager.legacyAlterConfigs(toMap(entry(MYTOPIC, toMap(entry("def", "901")))), true)); diff --git a/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java index b5f2239cd5a78..16f2809792687 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java @@ -169,8 +169,8 @@ public void testUpdateFeaturesErrorCases() { setQuorumFeatures(features("foo", 1, 5, "bar", 0, 3)). setSnapshotRegistry(snapshotRegistry). setClusterFeatureSupportDescriber(createFakeClusterFeatureSupportDescriber( - Arrays.asList(new SimpleImmutableEntry<>(5, Collections.singletonMap("bar", VersionRange.of(0, 3)))), - Arrays.asList())). + Collections.singletonList(new SimpleImmutableEntry<>(5, singletonMap("bar", VersionRange.of(0, 3)))), + emptyList())). build(); assertEquals(ControllerResult.atomicOf(emptyList(), @@ -389,15 +389,15 @@ public void testCreateFeatureLevelRecords() { FeatureControlManager manager = new FeatureControlManager.Builder(). setQuorumFeatures(new QuorumFeatures(0, localSupportedFeatures, emptyList())). setClusterFeatureSupportDescriber(createFakeClusterFeatureSupportDescriber( - Arrays.asList(new SimpleImmutableEntry<>(1, Collections.singletonMap("foo", VersionRange.of(0, 3)))), - Arrays.asList())). + Collections.singletonList(new SimpleImmutableEntry<>(1, singletonMap("foo", VersionRange.of(0, 3)))), + emptyList())). build(); ControllerResult> result = manager.updateFeatures( Collections.singletonMap("foo", (short) 1), Collections.singletonMap("foo", FeatureUpdate.UpgradeType.UPGRADE), false); - assertEquals(ControllerResult.atomicOf(Arrays.asList(new ApiMessageAndVersion( - new FeatureLevelRecord().setName("foo").setFeatureLevel((short) 1), (short) 0)), + assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion( + new FeatureLevelRecord().setName("foo").setFeatureLevel((short) 1), (short) 0)), Collections.singletonMap("foo", ApiError.NONE)), result); RecordTestUtils.replayAll(manager, result.records()); assertEquals(Optional.of((short) 1), manager.finalizedFeatures(Long.MAX_VALUE).get("foo")); @@ -406,7 +406,7 @@ public void testCreateFeatureLevelRecords() { Collections.singletonMap("foo", (short) 0), Collections.singletonMap("foo", FeatureUpdate.UpgradeType.UNSAFE_DOWNGRADE), false); - assertEquals(ControllerResult.atomicOf(Arrays.asList(new ApiMessageAndVersion( + assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion( new FeatureLevelRecord().setName("foo").setFeatureLevel((short) 0), (short) 0)), Collections.singletonMap("foo", ApiError.NONE)), result2); RecordTestUtils.replayAll(manager, result2.records()); diff --git a/metadata/src/test/java/org/apache/kafka/controller/OffsetControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/OffsetControlManagerTest.java index 2b5133f55ba0a..f2f774517e797 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/OffsetControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/OffsetControlManagerTest.java @@ -54,7 +54,7 @@ public void testInitialValues() { assertEquals(-1L, offsetControl.transactionStartOffset()); assertEquals(-1L, offsetControl.nextWriteOffset()); assertFalse(offsetControl.active()); - assertEquals(Arrays.asList(-1L), offsetControl.snapshotRegistry().epochsList()); + assertEquals(Collections.singletonList(-1L), offsetControl.snapshotRegistry().epochsList()); } @Test @@ -64,7 +64,7 @@ public void testActivate() { assertEquals(1000L, offsetControl.nextWriteOffset()); assertTrue(offsetControl.active()); assertTrue(offsetControl.metrics().active()); - assertEquals(Arrays.asList(-1L), offsetControl.snapshotRegistry().epochsList()); + assertEquals(Collections.singletonList(-1L), offsetControl.snapshotRegistry().epochsList()); } @Test @@ -122,7 +122,7 @@ public void testHandleCommitBatch() { OffsetControlManager offsetControl = new OffsetControlManager.Builder().build(); offsetControl.handleCommitBatch(newFakeBatch(1000L, 200, 3000L)); - assertEquals(Arrays.asList(1000L), offsetControl.snapshotRegistry().epochsList()); + assertEquals(Collections.singletonList(1000L), offsetControl.snapshotRegistry().epochsList()); assertEquals(1000L, offsetControl.lastCommittedOffset()); assertEquals(200, offsetControl.lastCommittedEpoch()); assertEquals(1000L, offsetControl.lastStableOffset()); @@ -149,7 +149,7 @@ public void testHandleScheduleAtomicAppend() { offsetControl.handleCommitBatch(newFakeBatch(2000L, 200, 3000L)); assertEquals(2000L, offsetControl.lastStableOffset()); assertEquals(2000L, offsetControl.lastCommittedOffset()); - assertEquals(Arrays.asList(2000L), offsetControl.snapshotRegistry().epochsList()); + assertEquals(Collections.singletonList(2000L), offsetControl.snapshotRegistry().epochsList()); } @Test @@ -163,14 +163,14 @@ public void testHandleLoadSnapshot() { assertEquals(Arrays.asList("snapshot[-1]", "reset"), snapshotRegistry.operations()); assertEquals(new OffsetAndEpoch(4000L, 300), offsetControl.currentSnapshotId()); assertEquals("00000000000000004000-0000000300", offsetControl.currentSnapshotName()); - assertEquals(Arrays.asList(), offsetControl.snapshotRegistry().epochsList()); + assertEquals(Collections.emptyList(), offsetControl.snapshotRegistry().epochsList()); offsetControl.endLoadSnapshot(3456L); assertEquals(Arrays.asList("snapshot[-1]", "reset", "snapshot[4000]"), snapshotRegistry.operations()); assertNull(offsetControl.currentSnapshotId()); assertNull(offsetControl.currentSnapshotName()); - assertEquals(Arrays.asList(4000L), offsetControl.snapshotRegistry().epochsList()); + assertEquals(Collections.singletonList(4000L), offsetControl.snapshotRegistry().epochsList()); assertEquals(4000L, offsetControl.lastCommittedOffset()); assertEquals(300, offsetControl.lastCommittedEpoch()); assertEquals(4000L, offsetControl.lastStableOffset()); @@ -236,7 +236,7 @@ public void testReplayTransaction(boolean aborted) { assertEquals(1550L, offsetControl.lastCommittedOffset()); assertEquals(100, offsetControl.lastCommittedEpoch()); assertEquals(1499L, offsetControl.lastStableOffset()); - assertEquals(Arrays.asList(1499L), offsetControl.snapshotRegistry().epochsList()); + assertEquals(Collections.singletonList(1499L), offsetControl.snapshotRegistry().epochsList()); if (aborted) { offsetControl.replay(new AbortTransactionRecord(), 1600L); @@ -252,7 +252,7 @@ public void testReplayTransaction(boolean aborted) { offsetControl.handleCommitBatch(newFakeBatch(1650, 100, 2100L)); assertEquals(1650, offsetControl.lastStableOffset()); - assertEquals(Arrays.asList(1650L), offsetControl.snapshotRegistry().epochsList()); + assertEquals(Collections.singletonList(1650L), offsetControl.snapshotRegistry().epochsList()); } @Test diff --git a/metadata/src/test/java/org/apache/kafka/controller/PartitionChangeBuilderTest.java b/metadata/src/test/java/org/apache/kafka/controller/PartitionChangeBuilderTest.java index 044402d50872e..2bbcf01611bbf 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/PartitionChangeBuilderTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/PartitionChangeBuilderTest.java @@ -82,13 +82,13 @@ public void testChangeRecordIsNoOp() { assertFalse(changeRecordIsNoOp(new PartitionChangeRecord(). setIsr(Arrays.asList(1, 2, 3)))); assertFalse(changeRecordIsNoOp(new PartitionChangeRecord(). - setRemovingReplicas(Arrays.asList(1)))); + setRemovingReplicas(Collections.singletonList(1)))); assertFalse(changeRecordIsNoOp(new PartitionChangeRecord(). - setAddingReplicas(Arrays.asList(4)))); + setAddingReplicas(Collections.singletonList(4)))); assertFalse(changeRecordIsNoOp(new PartitionChangeRecord(). - setEligibleLeaderReplicas(Arrays.asList(5)))); + setEligibleLeaderReplicas(Collections.singletonList(5)))); assertFalse(changeRecordIsNoOp(new PartitionChangeRecord(). - setLastKnownElr(Arrays.asList(6)))); + setLastKnownElr(Collections.singletonList(6)))); assertFalse( changeRecordIsNoOp( new PartitionChangeRecord() @@ -274,12 +274,12 @@ public void testElectLeader(short version) { assertElectLeaderEquals(createFooBuilder(version).setElection(Election.UNCLEAN) .setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Arrays.asList(1, 3))), 1, false); assertElectLeaderEquals(createFooBuilder(version) - .setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Arrays.asList(3))), NO_LEADER, false); + .setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Collections.singletonList(3))), NO_LEADER, false); assertElectLeaderEquals(createFooBuilder(version).setElection(Election.UNCLEAN). - setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Arrays.asList(3))), 2, true); + setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Collections.singletonList(3))), 2, true); assertElectLeaderEquals( createFooBuilder(version).setElection(Election.UNCLEAN) - .setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Arrays.asList(4))).setTargetReplicas(Arrays.asList(2, 1, 3, 4)), + .setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Collections.singletonList(4))).setTargetReplicas(Arrays.asList(2, 1, 3, 4)), 4, false ); @@ -424,7 +424,7 @@ public void testNoLeaderEpochBumpOnEmptyTargetIsr(String metadataVersionString) 2). setEligibleLeaderReplicasEnabled(metadataVersion.isElrSupported()). setDefaultDirProvider(DEFAULT_DIR_PROVIDER). - setTargetReplicas(Arrays.asList()); + setTargetReplicas(Collections.emptyList()); PartitionChangeRecord record = new PartitionChangeRecord(); builder.triggerLeaderEpochBumpForIsrShrinkIfNeeded(record); assertEquals(NO_LEADER_CHANGE, record.leader()); @@ -593,7 +593,7 @@ public void testUncleanLeaderElection(short version) { new PartitionChangeRecord() .setTopicId(FOO_ID) .setPartitionId(0) - .setIsr(Arrays.asList(2)) + .setIsr(Collections.singletonList(2)) .setLeader(2) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERING.value()), version @@ -601,13 +601,13 @@ public void testUncleanLeaderElection(short version) { assertEquals( Optional.of(expectedRecord), createFooBuilder(version).setElection(Election.UNCLEAN) - .setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Arrays.asList(3))).build() + .setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Collections.singletonList(3))).build() ); PartitionChangeRecord record = new PartitionChangeRecord() .setTopicId(OFFLINE_ID) .setPartitionId(0) - .setIsr(Arrays.asList(1)) + .setIsr(Collections.singletonList(1)) .setLeader(1) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERING.value()); @@ -626,7 +626,7 @@ public void testUncleanLeaderElection(short version) { assertEquals( Optional.of(expectedRecord), createOfflineBuilder(version).setElection(Election.UNCLEAN) - .setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Arrays.asList(2))).build() + .setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Collections.singletonList(2))).build() ); } @@ -1017,7 +1017,7 @@ public void testEligibleLeaderReplicas_RemoveUncleanShutdownReplicasFromElr(shor .setDefaultDirProvider(DEFAULT_DIR_PROVIDER) .setUseLastKnownLeaderInBalancedRecovery(false); - builder.setUncleanShutdownReplicas(Arrays.asList(3)); + builder.setUncleanShutdownReplicas(Collections.singletonList(3)); PartitionChangeRecord record = new PartitionChangeRecord() .setTopicId(topicId) @@ -1025,8 +1025,8 @@ public void testEligibleLeaderReplicas_RemoveUncleanShutdownReplicasFromElr(shor .setLeader(-2) .setLeaderRecoveryState(LeaderRecoveryState.NO_CHANGE); if (version >= 2) { - record.setEligibleLeaderReplicas(Arrays.asList(2)) - .setLastKnownElr(Arrays.asList(3)); + record.setEligibleLeaderReplicas(Collections.singletonList(2)) + .setLastKnownElr(Collections.singletonList(3)); } else { record.setEligibleLeaderReplicas(Collections.emptyList()); } @@ -1146,8 +1146,8 @@ public void testEligibleLeaderReplicas_ElrCanBeElected(boolean lastKnownLeaderEn new PartitionChangeRecord() .setTopicId(topicId) .setPartitionId(0) - .setIsr(Arrays.asList(3)) - .setEligibleLeaderReplicas(Arrays.asList(1)) + .setIsr(Collections.singletonList(3)) + .setEligibleLeaderReplicas(Collections.singletonList(1)) .setLeader(3) .setLeaderRecoveryState(LeaderRecoveryState.NO_CHANGE), version @@ -1200,7 +1200,7 @@ public void testEligibleLeaderReplicas_IsrCanShrinkToZero(boolean lastKnownLeade .setEligibleLeaderReplicas(Arrays.asList(1, 2, 3, 4)); if (lastKnownLeaderEnabled) { - record.setLastKnownElr(Arrays.asList(1)); + record.setLastKnownElr(Collections.singletonList(1)); } ApiMessageAndVersion expectedRecord = new ApiMessageAndVersion(record, version); @@ -1213,7 +1213,7 @@ public void testEligibleLeaderReplicas_IsrCanShrinkToZero(boolean lastKnownLeade metadataVersionForPartitionChangeRecordVersion(version), 3) .setElection(Election.PREFERRED) .setEligibleLeaderReplicasEnabled(true) - .setUncleanShutdownReplicas(Arrays.asList(2)) + .setUncleanShutdownReplicas(Collections.singletonList(2)) .setDefaultDirProvider(DEFAULT_DIR_PROVIDER) .setUseLastKnownLeaderInBalancedRecovery(lastKnownLeaderEnabled); PartitionChangeRecord changeRecord = (PartitionChangeRecord) builder.build().get().message(); @@ -1253,7 +1253,7 @@ public void testEligibleLeaderReplicas_ElectLastKnownLeader() { new PartitionChangeRecord() .setTopicId(topicId) .setPartitionId(0) - .setIsr(Arrays.asList(1)) + .setIsr(Collections.singletonList(1)) .setLeader(1) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERING.value()) .setLastKnownElr(Collections.emptyList()), diff --git a/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentReplicasTest.java b/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentReplicasTest.java index 17be98d47f0b1..e35f468132983 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentReplicasTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentReplicasTest.java @@ -202,7 +202,7 @@ public void testDoesNotCompleteReassignmentIfIsrDoesNotHaveAllTargetReplicas() { partitionAssignment(Arrays.asList(0, 1, 2)), partitionAssignment(Arrays.asList(0, 1, 3))); assertTrue(replicas.isReassignmentInProgress()); Optional reassignmentOptional = - replicas.maybeCompleteReassignment(Arrays.asList(3)); + replicas.maybeCompleteReassignment(Collections.singletonList(3)); assertFalse(reassignmentOptional.isPresent()); } diff --git a/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentRevertTest.java b/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentRevertTest.java index 05148813e8108..ae1251e1c2afa 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentRevertTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentRevertTest.java @@ -18,6 +18,7 @@ package org.apache.kafka.controller; import java.util.Arrays; +import java.util.Collections; import org.apache.kafka.common.Uuid; import org.apache.kafka.metadata.LeaderRecoveryState; @@ -78,7 +79,7 @@ public void testSomeAdding() { setAddingReplicas(new int[]{4, 5}).setLeader(3).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(200).build(); PartitionReassignmentRevert revert = new PartitionReassignmentRevert(registration); assertEquals(Arrays.asList(3, 2, 1), revert.replicas()); - assertEquals(Arrays.asList(2), revert.isr()); + assertEquals(Collections.singletonList(2), revert.isr()); assertFalse(revert.unclean()); } @@ -96,7 +97,7 @@ public void testSomeRemovingAndAdding() { setRemovingReplicas(new int[]{2}).setAddingReplicas(new int[]{4, 5}).setLeader(3).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(200).build(); PartitionReassignmentRevert revert = new PartitionReassignmentRevert(registration); assertEquals(Arrays.asList(3, 2, 1), revert.replicas()); - assertEquals(Arrays.asList(2), revert.isr()); + assertEquals(Collections.singletonList(2), revert.isr()); assertFalse(revert.unclean()); } @@ -114,7 +115,7 @@ public void testIsrSpecialCase() { setRemovingReplicas(new int[]{2}).setAddingReplicas(new int[]{4, 5}).setLeader(3).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(200).build(); PartitionReassignmentRevert revert = new PartitionReassignmentRevert(registration); assertEquals(Arrays.asList(3, 2, 1), revert.replicas()); - assertEquals(Arrays.asList(3), revert.isr()); + assertEquals(Collections.singletonList(3), revert.isr()); assertTrue(revert.unclean()); } } diff --git a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerIntegrationTestUtils.java b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerIntegrationTestUtils.java index 9fbb8ee855c36..bb3f0bbd57b8b 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerIntegrationTestUtils.java +++ b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerIntegrationTestUtils.java @@ -17,7 +17,6 @@ package org.apache.kafka.controller; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -100,11 +99,11 @@ static Map registerBrokersAndUnfence( Uuid.fromString("TESTBROKER" + Integer.toString(100000 + brokerId).substring(1) + "DIRAAAA") )) .setListeners(new ListenerCollection( - Arrays.asList( - new Listener() - .setName("PLAINTEXT") - .setHost("localhost") - .setPort(9092 + brokerId) + Collections.singletonList( + new Listener() + .setName("PLAINTEXT") + .setHost("localhost") + .setPort(9092 + brokerId) ).iterator() ) ) diff --git a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java index 1b18c9648de4f..96843f63207e9 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java @@ -270,7 +270,7 @@ private void testDelayedConfigurationOperations( @Test public void testFenceMultipleBrokers() throws Throwable { List allBrokers = Arrays.asList(1, 2, 3, 4, 5); - List brokersToKeepUnfenced = Arrays.asList(1); + List brokersToKeepUnfenced = singletonList(1); List brokersToFence = Arrays.asList(2, 3, 4, 5); short replicationFactor = (short) allBrokers.size(); short numberOfPartitions = (short) allBrokers.size(); @@ -484,7 +484,7 @@ public void testUncleanShutdownBroker() throws Throwable { assertArrayEquals(lastKnownElr, partition.lastKnownElr, partition.toString()); // Unfence the last one in the ELR, it should be elected. - sendBrokerHeartbeatToUnfenceBrokers(active, Arrays.asList(brokerToBeTheLeader), brokerEpochs); + sendBrokerHeartbeatToUnfenceBrokers(active, singletonList(brokerToBeTheLeader), brokerEpochs); TestUtils.waitForCondition(() -> { return active.clusterControl().isUnfenced(brokerToBeTheLeader); }, sessionTimeoutMillis * 3, @@ -798,21 +798,21 @@ public void testSnapshotSaveAndLoad() throws Throwable { setIncarnationId(new Uuid(3465346L, i)). setZkMigrationReady(false). setListeners(new ControllerRegistrationRequestData.ListenerCollection( - Arrays.asList( - new ControllerRegistrationRequestData.Listener(). - setName("CONTROLLER"). - setHost("localhost"). - setPort(8000 + i). - setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) + singletonList( + new ControllerRegistrationRequestData.Listener(). + setName("CONTROLLER"). + setHost("localhost"). + setPort(8000 + i). + setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) ).iterator() )). setFeatures(new ControllerRegistrationRequestData.FeatureCollection( - Arrays.asList( - new ControllerRegistrationRequestData.Feature(). - setName(MetadataVersion.FEATURE_NAME). - setMinSupportedVersion(MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel()). - setMaxSupportedVersion(MetadataVersion.IBP_3_7_IV0.featureLevel()) - ).iterator() + singletonList( + new ControllerRegistrationRequestData.Feature(). + setName(MetadataVersion.FEATURE_NAME). + setMinSupportedVersion(MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel()). + setMaxSupportedVersion(MetadataVersion.IBP_3_7_IV0.featureLevel()) + ).iterator() ))).get(); } for (int i = 0; i < numBrokers; i++) { @@ -823,9 +823,9 @@ public void testSnapshotSaveAndLoad() throws Throwable { setClusterId(active.clusterId()). setFeatures(brokerFeatures(MetadataVersion.IBP_3_0_IV1, MetadataVersion.IBP_3_7_IV0)). setIncarnationId(Uuid.fromString("kxAT73dKQsitIedpiPtwB" + i)). - setListeners(new ListenerCollection(Arrays.asList(new Listener(). - setName("PLAINTEXT").setHost("localhost"). - setPort(9092 + i)).iterator()))).get(); + setListeners(new ListenerCollection(singletonList(new Listener(). + setName("PLAINTEXT").setHost("localhost"). + setPort(9092 + i)).iterator()))).get(); brokerEpochs.put(i, reply.epoch()); } for (int i = 0; i < numBrokers - 1; i++) { @@ -872,68 +872,68 @@ private List generateTestRecords(Uuid fooId, Map generateTestRecords(Uuid fooId, Map controllerBuilderInitializer = __ -> { }; private OptionalLong sessionTimeoutMillis = OptionalLong.empty(); private OptionalLong leaderImbalanceCheckIntervalNs = OptionalLong.empty(); - private boolean eligibleLeaderReplicasEnabled = false; + private final boolean eligibleLeaderReplicasEnabled = false; private BootstrapMetadata bootstrapMetadata = BootstrapMetadata. fromVersion(MetadataVersion.latestTesting(), "test-provided version"); diff --git a/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java index a5cfcce07b00f..ec4a383f0096d 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java @@ -893,7 +893,7 @@ public void testTopicNameCollision() { topics.add(new CreatableTopic().setName("foo.bar")); topics.add(new CreatableTopic().setName("woo.bar_foo")); Map> collisionMap = new HashMap<>(); - collisionMap.put("foo_bar", new TreeSet<>(Arrays.asList("foo_bar"))); + collisionMap.put("foo_bar", new TreeSet<>(singletonList("foo_bar"))); collisionMap.put("woo_bar_foo", new TreeSet<>(Arrays.asList("woo.bar.foo", "woo_bar.foo"))); ReplicationControlManager.validateNewTopicNames(topicErrors, topics, collisionMap); Map expectedTopicErrors = new HashMap<>(); @@ -1157,11 +1157,11 @@ public void testAlterPartitionHandleUnknownTopicIdOrName(short version) { AlterPartitionRequestData request = new AlterPartitionRequestData() .setBrokerId(0) .setBrokerEpoch(100) - .setTopics(asList(new AlterPartitionRequestData.TopicData() - .setTopicName(version <= 1 ? topicName : "") - .setTopicId(version > 1 ? topicId : Uuid.ZERO_UUID) - .setPartitions(asList(new PartitionData() - .setPartitionIndex(0))))); + .setTopics(singletonList(new TopicData() + .setTopicName(version <= 1 ? topicName : "") + .setTopicId(version > 1 ? topicId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new PartitionData() + .setPartitionIndex(0))))); ControllerRequestContext requestContext = anonymousContextFor(ApiKeys.ALTER_PARTITION, version); @@ -1171,12 +1171,12 @@ public void testAlterPartitionHandleUnknownTopicIdOrName(short version) { Errors expectedError = version > 1 ? UNKNOWN_TOPIC_ID : UNKNOWN_TOPIC_OR_PARTITION; AlterPartitionResponseData expectedResponse = new AlterPartitionResponseData() - .setTopics(asList(new AlterPartitionResponseData.TopicData() - .setTopicName(version <= 1 ? topicName : "") - .setTopicId(version > 1 ? topicId : Uuid.ZERO_UUID) - .setPartitions(asList(new AlterPartitionResponseData.PartitionData() - .setPartitionIndex(0) - .setErrorCode(expectedError.code()))))); + .setTopics(singletonList(new AlterPartitionResponseData.TopicData() + .setTopicName(version <= 1 ? topicName : "") + .setTopicId(version > 1 ? topicId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new AlterPartitionResponseData.PartitionData() + .setPartitionIndex(0) + .setErrorCode(expectedError.code()))))); assertEquals(expectedResponse, result.response()); } @@ -1509,17 +1509,17 @@ public void testCreatePartitions() { ctx.replay(createPartitionsResult.records()); List topics2 = new ArrayList<>(); topics2.add(new CreatePartitionsTopic(). - setName("foo").setCount(6).setAssignments(asList( - new CreatePartitionsAssignment().setBrokerIds(asList(1, 3))))); + setName("foo").setCount(6).setAssignments(singletonList( + new CreatePartitionsAssignment().setBrokerIds(asList(1, 3))))); topics2.add(new CreatePartitionsTopic(). - setName("bar").setCount(5).setAssignments(asList( - new CreatePartitionsAssignment().setBrokerIds(asList(1))))); + setName("bar").setCount(5).setAssignments(singletonList( + new CreatePartitionsAssignment().setBrokerIds(singletonList(1))))); topics2.add(new CreatePartitionsTopic(). - setName("quux").setCount(4).setAssignments(asList( - new CreatePartitionsAssignment().setBrokerIds(asList(1, 0))))); + setName("quux").setCount(4).setAssignments(singletonList( + new CreatePartitionsAssignment().setBrokerIds(asList(1, 0))))); topics2.add(new CreatePartitionsTopic(). - setName("foo2").setCount(3).setAssignments(asList( - new CreatePartitionsAssignment().setBrokerIds(asList(2, 0))))); + setName("foo2").setCount(3).setAssignments(singletonList( + new CreatePartitionsAssignment().setBrokerIds(asList(2, 0))))); ControllerResult> createPartitionsResult2 = replicationControl.createPartitions(requestContext, topics2); assertEquals(asList(new CreatePartitionsTopicResult(). @@ -1579,8 +1579,8 @@ public void testCreatePartitionsWithMutationQuotaExceeded() { // now test the explicit assignment case List topics2 = new ArrayList<>(); topics2.add(new CreatePartitionsTopic(). - setName("foo").setCount(4).setAssignments(asList( - new CreatePartitionsAssignment().setBrokerIds(asList(1, 0))))); + setName("foo").setCount(4).setAssignments(singletonList( + new CreatePartitionsAssignment().setBrokerIds(asList(1, 0))))); ControllerResult> createPartitionsResult2 = replicationControl.createPartitions(createPartitionsRequestContext, topics2); assertEquals(expectedThrottled, createPartitionsResult2.response()); @@ -1600,7 +1600,7 @@ public void testCreatePartitionsFailsWhenAllBrokersAreFencedOrInControlledShutdo ControllerRequestContext requestContext = anonymousContextFor(ApiKeys.CREATE_TOPICS); ControllerResult createTopicResult = replicationControl. - createTopics(requestContext, request, new HashSet<>(Arrays.asList("foo"))); + createTopics(requestContext, request, new HashSet<>(singletonList("foo"))); ctx.replay(createTopicResult.records()); ctx.registerBrokers(0, 1); @@ -1614,11 +1614,11 @@ public void testCreatePartitionsFailsWhenAllBrokersAreFencedOrInControlledShutdo replicationControl.createPartitions(requestContext, topics); assertEquals( - asList(new CreatePartitionsTopicResult(). - setName("foo"). - setErrorCode(INVALID_REPLICATION_FACTOR.code()). - setErrorMessage("Unable to replicate the partition 2 time(s): All " + - "brokers are currently fenced or in controlled shutdown.")), + singletonList(new CreatePartitionsTopicResult(). + setName("foo"). + setErrorCode(INVALID_REPLICATION_FACTOR.code()). + setErrorMessage("Unable to replicate the partition 2 time(s): All " + + "brokers are currently fenced or in controlled shutdown.")), createPartitionsResult.response()); } @@ -1640,8 +1640,8 @@ public void testCreatePartitionsISRInvariants() { replicationControl.createTopics(requestContext, request, Collections.singleton("foo")); ctx.replay(result.records()); - List topics = asList(new CreatePartitionsTopic(). - setName("foo").setCount(2).setAssignments(null)); + List topics = singletonList(new CreatePartitionsTopic(). + setName("foo").setCount(2).setAssignments(null)); ControllerResult> createPartitionsResult = replicationControl.createPartitions(requestContext, topics); @@ -1670,9 +1670,9 @@ public void testCreatePartitionsISRInvariants() { public void testValidateGoodManualPartitionAssignments() { ReplicationControlTestContext ctx = new ReplicationControlTestContext.Builder().build(); ctx.registerBrokers(1, 2, 3); - ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(asList(1)), + ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(singletonList(1)), OptionalInt.of(1)); - ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(asList(1)), + ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(singletonList(1)), OptionalInt.empty()); ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(asList(1, 2, 3)), OptionalInt.of(3)); @@ -1686,7 +1686,7 @@ public void testValidateBadManualPartitionAssignments() { ctx.registerBrokers(1, 2); assertEquals("The manual partition assignment includes an empty replica list.", assertThrows(InvalidReplicaAssignmentException.class, () -> - ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(asList()), + ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(Collections.emptyList()), OptionalInt.empty())).getMessage()); assertEquals("The manual partition assignment includes broker 3, but no such " + "broker is registered.", assertThrows(InvalidReplicaAssignmentException.class, () -> @@ -1748,19 +1748,19 @@ public void testReassignPartitions(short version) { ctx.replay(alterResult.records()); ListPartitionReassignmentsResponseData currentReassigning = new ListPartitionReassignmentsResponseData().setErrorMessage(null). - setTopics(asList(new OngoingTopicReassignment(). - setName("foo").setPartitions(asList( - new OngoingPartitionReassignment().setPartitionIndex(1). - setRemovingReplicas(asList(3)). - setAddingReplicas(asList(0)). - setReplicas(asList(0, 2, 1, 3)))))); + setTopics(singletonList(new OngoingTopicReassignment(). + setName("foo").setPartitions(singletonList( + new OngoingPartitionReassignment().setPartitionIndex(1). + setRemovingReplicas(singletonList(3)). + setAddingReplicas(singletonList(0)). + setReplicas(asList(0, 2, 1, 3)))))); assertEquals(currentReassigning, replication.listPartitionReassignments(null, Long.MAX_VALUE)); - assertEquals(NONE_REASSIGNING, replication.listPartitionReassignments(asList( + assertEquals(NONE_REASSIGNING, replication.listPartitionReassignments(singletonList( new ListPartitionReassignmentsTopics().setName("bar"). - setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); - assertEquals(currentReassigning, replication.listPartitionReassignments(asList( - new ListPartitionReassignmentsTopics().setName("foo"). - setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); + setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); + assertEquals(currentReassigning, replication.listPartitionReassignments(singletonList( + new ListPartitionReassignmentsTopics().setName("foo"). + setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); ControllerResult cancelResult = replication.alterPartitionReassignments( new AlterPartitionReassignmentsRequestData().setTopics(asList( @@ -1771,9 +1771,9 @@ public void testReassignPartitions(short version) { setReplicas(null), new ReassignablePartition().setPartitionIndex(2). setReplicas(null))), - new ReassignableTopic().setName("bar").setPartitions(asList( - new ReassignablePartition().setPartitionIndex(0). - setReplicas(null)))))); + new ReassignableTopic().setName("bar").setPartitions(singletonList( + new ReassignablePartition().setPartitionIndex(0). + setReplicas(null)))))); assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion( new PartitionChangeRecord().setTopicId(fooId). setPartitionId(1). @@ -1795,10 +1795,10 @@ public void testReassignPartitions(short version) { new ReassignablePartitionResponse().setPartitionIndex(2). setErrorCode(UNKNOWN_TOPIC_OR_PARTITION.code()). setErrorMessage("Unable to find partition foo:2."))), - new ReassignableTopicResponse().setName("bar").setPartitions(asList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorCode(NO_REASSIGNMENT_IN_PROGRESS.code()). - setErrorMessage(null)))))), + new ReassignableTopicResponse().setName("bar").setPartitions(singletonList( + new ReassignablePartitionResponse().setPartitionIndex(0). + setErrorCode(NO_REASSIGNMENT_IN_PROGRESS.code()). + setErrorMessage(null)))))), cancelResult); log.info("running final alterPartition..."); ControllerRequestContext requestContext = @@ -1806,26 +1806,26 @@ public void testReassignPartitions(short version) { AlterPartitionRequestData alterPartitionRequestData = new AlterPartitionRequestData(). setBrokerId(3). setBrokerEpoch(103). - setTopics(asList(new TopicData(). - setTopicName(version <= 1 ? "foo" : ""). - setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID). - setPartitions(asList(new PartitionData(). - setPartitionIndex(1). - setPartitionEpoch(1). - setLeaderEpoch(0). - setNewIsrWithEpochs(isrWithDefaultEpoch(3, 0, 2, 1)))))); + setTopics(singletonList(new TopicData(). + setTopicName(version <= 1 ? "foo" : ""). + setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID). + setPartitions(singletonList(new PartitionData(). + setPartitionIndex(1). + setPartitionEpoch(1). + setLeaderEpoch(0). + setNewIsrWithEpochs(isrWithDefaultEpoch(3, 0, 2, 1)))))); ControllerResult alterPartitionResult = replication.alterPartition( requestContext, new AlterPartitionRequest.Builder(alterPartitionRequestData, version > 1).build(version).data()); Errors expectedError = version > 1 ? NEW_LEADER_ELECTED : FENCED_LEADER_EPOCH; - assertEquals(new AlterPartitionResponseData().setTopics(asList( - new AlterPartitionResponseData.TopicData(). - setTopicName(version <= 1 ? "foo" : ""). - setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID). - setPartitions(asList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(1). - setErrorCode(expectedError.code()))))), + assertEquals(new AlterPartitionResponseData().setTopics(singletonList( + new AlterPartitionResponseData.TopicData(). + setTopicName(version <= 1 ? "foo" : ""). + setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID). + setPartitions(singletonList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(1). + setErrorCode(expectedError.code()))))), alterPartitionResult.response()); ctx.replay(alterPartitionResult.records()); assertEquals(NONE_REASSIGNING, replication.listPartitionReassignments(null, Long.MAX_VALUE)); @@ -1867,14 +1867,14 @@ public void testAlterPartitionShouldRejectFencedBrokers(short version) { AlterPartitionRequestData alterIsrRequest = new AlterPartitionRequestData() .setBrokerId(1) .setBrokerEpoch(101) - .setTopics(asList(new TopicData() - .setTopicName(version <= 1 ? "foo" : "") - .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) - .setPartitions(asList(new PartitionData() - .setPartitionIndex(0) - .setPartitionEpoch(1) - .setLeaderEpoch(0) - .setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3, 4)))))); + .setTopics(singletonList(new TopicData() + .setTopicName(version <= 1 ? "foo" : "") + .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new PartitionData() + .setPartitionIndex(0) + .setPartitionEpoch(1) + .setLeaderEpoch(0) + .setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3, 4)))))); ControllerRequestContext requestContext = anonymousContextFor(ApiKeys.ALTER_PARTITION, version); @@ -1885,12 +1885,12 @@ public void testAlterPartitionShouldRejectFencedBrokers(short version) { Errors expectedError = version <= 1 ? OPERATION_NOT_ATTEMPTED : INELIGIBLE_REPLICA; assertEquals( new AlterPartitionResponseData() - .setTopics(asList(new AlterPartitionResponseData.TopicData() - .setTopicName(version <= 1 ? "foo" : "") - .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) - .setPartitions(asList(new AlterPartitionResponseData.PartitionData() - .setPartitionIndex(0) - .setErrorCode(expectedError.code()))))), + .setTopics(singletonList(new AlterPartitionResponseData.TopicData() + .setTopicName(version <= 1 ? "foo" : "") + .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new AlterPartitionResponseData.PartitionData() + .setPartitionIndex(0) + .setErrorCode(expectedError.code()))))), alterPartitionResult.response()); fenceRecords = new ArrayList<>(); @@ -1901,16 +1901,16 @@ public void testAlterPartitionShouldRejectFencedBrokers(short version) { assertEquals( new AlterPartitionResponseData() - .setTopics(asList(new AlterPartitionResponseData.TopicData() - .setTopicName(version <= 1 ? "foo" : "") - .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) - .setPartitions(asList(new AlterPartitionResponseData.PartitionData() - .setPartitionIndex(0) - .setLeaderId(1) - .setLeaderEpoch(0) - .setIsr(asList(1, 2, 3, 4)) - .setPartitionEpoch(2) - .setErrorCode(NONE.code()))))), + .setTopics(singletonList(new AlterPartitionResponseData.TopicData() + .setTopicName(version <= 1 ? "foo" : "") + .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new AlterPartitionResponseData.PartitionData() + .setPartitionIndex(0) + .setLeaderId(1) + .setLeaderEpoch(0) + .setIsr(asList(1, 2, 3, 4)) + .setPartitionEpoch(2) + .setErrorCode(NONE.code()))))), alterPartitionResult.response()); } @@ -1931,14 +1931,14 @@ public void testAlterPartitionShouldRejectBrokersWithStaleEpoch(short version) { AlterPartitionRequestData alterIsrRequest = new AlterPartitionRequestData(). setBrokerId(1). setBrokerEpoch(101). - setTopics(asList(new TopicData(). - setTopicName(version <= 1 ? "foo" : ""). - setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID). - setPartitions(asList(new PartitionData(). - setPartitionIndex(0). - setPartitionEpoch(1). - setLeaderEpoch(0). - setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3, 4)))))); + setTopics(singletonList(new TopicData(). + setTopicName(version <= 1 ? "foo" : ""). + setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID). + setPartitions(singletonList(new PartitionData(). + setPartitionIndex(0). + setPartitionEpoch(1). + setLeaderEpoch(0). + setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3, 4)))))); // The broker 4 has failed silently and now registers again. long newEpoch = defaultBrokerEpoch(4) + 1000; @@ -1971,12 +1971,12 @@ public void testAlterPartitionShouldRejectBrokersWithStaleEpoch(short version) { if (version >= 3) { assertEquals( new AlterPartitionResponseData(). - setTopics(asList(new AlterPartitionResponseData.TopicData(). - setTopicName(""). - setTopicId(fooId). - setPartitions(asList(new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(0). - setErrorCode(INELIGIBLE_REPLICA.code()))))), + setTopics(singletonList(new AlterPartitionResponseData.TopicData(). + setTopicName(""). + setTopicId(fooId). + setPartitions(singletonList(new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(0). + setErrorCode(INELIGIBLE_REPLICA.code()))))), alterPartitionResult.response()); } else { assertEquals(NONE.code(), alterPartitionResult.response().errorCode()); @@ -2017,14 +2017,14 @@ public void testAlterPartitionShouldRejectShuttingDownBrokers(short version) { AlterPartitionRequestData alterIsrRequest = new AlterPartitionRequestData() .setBrokerId(1) .setBrokerEpoch(101) - .setTopics(asList(new TopicData() - .setTopicName(version <= 1 ? "foo" : "") - .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) - .setPartitions(asList(new PartitionData() - .setPartitionIndex(0) - .setPartitionEpoch(0) - .setLeaderEpoch(0) - .setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3, 4)))))); + .setTopics(singletonList(new TopicData() + .setTopicName(version <= 1 ? "foo" : "") + .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new PartitionData() + .setPartitionIndex(0) + .setPartitionEpoch(0) + .setLeaderEpoch(0) + .setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3, 4)))))); ControllerRequestContext requestContext = anonymousContextFor(ApiKeys.ALTER_PARTITION, version); @@ -2035,12 +2035,12 @@ public void testAlterPartitionShouldRejectShuttingDownBrokers(short version) { Errors expectedError = version <= 1 ? OPERATION_NOT_ATTEMPTED : INELIGIBLE_REPLICA; assertEquals( new AlterPartitionResponseData() - .setTopics(asList(new AlterPartitionResponseData.TopicData() - .setTopicName(version <= 1 ? "foo" : "") - .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) - .setPartitions(asList(new AlterPartitionResponseData.PartitionData() - .setPartitionIndex(0) - .setErrorCode(expectedError.code()))))), + .setTopics(singletonList(new AlterPartitionResponseData.TopicData() + .setTopicName(version <= 1 ? "foo" : "") + .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new AlterPartitionResponseData.PartitionData() + .setPartitionIndex(0) + .setErrorCode(expectedError.code()))))), alterPartitionResult.response()); } @@ -2081,10 +2081,10 @@ public void testCancelReassignPartitions() { new ReassignablePartition().setPartitionIndex(2). setReplicas(asList(5, 6, 7)), new ReassignablePartition().setPartitionIndex(3). - setReplicas(asList()))), - new ReassignableTopic().setName("bar").setPartitions(asList( + setReplicas(Collections.emptyList()))), + new ReassignableTopic().setName("bar").setPartitions(singletonList( new ReassignablePartition().setPartitionIndex(0). - setReplicas(asList(1, 2, 3, 4, 0))))))); + setReplicas(asList(1, 2, 3, 4, 0))))))); assertEquals(new AlterPartitionReassignmentsResponseData(). setErrorMessage(null).setResponses(asList( new ReassignableTopicResponse().setName("foo").setPartitions(asList( @@ -2100,9 +2100,9 @@ public void testCancelReassignPartitions() { setErrorCode(INVALID_REPLICA_ASSIGNMENT.code()). setErrorMessage("The manual partition assignment includes an empty " + "replica list."))), - new ReassignableTopicResponse().setName("bar").setPartitions(asList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorMessage(null))))), + new ReassignableTopicResponse().setName("bar").setPartitions(singletonList( + new ReassignablePartitionResponse().setPartitionIndex(0). + setErrorMessage(null))))), alterResult.response()); ctx.replay(alterResult.records()); assertEquals(new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 4}).setIsr(new int[] {1, 2, 4}). @@ -2131,44 +2131,44 @@ public void testCancelReassignPartitions() { setAddingReplicas(new int[] {0, 1}).setLeader(4).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(0).setPartitionEpoch(2).build(), replication.getPartition(barId, 0)); ListPartitionReassignmentsResponseData currentReassigning = new ListPartitionReassignmentsResponseData().setErrorMessage(null). - setTopics(asList(new OngoingTopicReassignment(). - setName("bar").setPartitions(asList( - new OngoingPartitionReassignment().setPartitionIndex(0). - setRemovingReplicas(Collections.emptyList()). - setAddingReplicas(asList(0, 1)). - setReplicas(asList(1, 2, 3, 4, 0)))))); + setTopics(singletonList(new OngoingTopicReassignment(). + setName("bar").setPartitions(singletonList( + new OngoingPartitionReassignment().setPartitionIndex(0). + setRemovingReplicas(Collections.emptyList()). + setAddingReplicas(asList(0, 1)). + setReplicas(asList(1, 2, 3, 4, 0)))))); assertEquals(currentReassigning, replication.listPartitionReassignments(null, Long.MAX_VALUE)); - assertEquals(NONE_REASSIGNING, replication.listPartitionReassignments(asList( - new ListPartitionReassignmentsTopics().setName("foo"). - setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); - assertEquals(currentReassigning, replication.listPartitionReassignments(asList( - new ListPartitionReassignmentsTopics().setName("bar"). - setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); + assertEquals(NONE_REASSIGNING, replication.listPartitionReassignments(singletonList( + new ListPartitionReassignmentsTopics().setName("foo"). + setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); + assertEquals(currentReassigning, replication.listPartitionReassignments(singletonList( + new ListPartitionReassignmentsTopics().setName("bar"). + setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); ControllerResult alterPartitionResult = replication.alterPartition( anonymousContextFor(ApiKeys.ALTER_PARTITION), new AlterPartitionRequestData().setBrokerId(4).setBrokerEpoch(104). - setTopics(asList(new TopicData().setTopicId(barId).setPartitions(asList( - new PartitionData().setPartitionIndex(0).setPartitionEpoch(2). - setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(4, 1, 2, 0))))))); - assertEquals(new AlterPartitionResponseData().setTopics(asList( - new AlterPartitionResponseData.TopicData().setTopicId(barId).setPartitions(asList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(0). - setLeaderId(4). - setLeaderEpoch(0). - setIsr(asList(4, 1, 2, 0)). - setPartitionEpoch(3). - setErrorCode(NONE.code()))))), + setTopics(singletonList(new TopicData().setTopicId(barId).setPartitions(singletonList( + new PartitionData().setPartitionIndex(0).setPartitionEpoch(2). + setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(4, 1, 2, 0))))))); + assertEquals(new AlterPartitionResponseData().setTopics(singletonList( + new AlterPartitionResponseData.TopicData().setTopicId(barId).setPartitions(singletonList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(0). + setLeaderId(4). + setLeaderEpoch(0). + setIsr(asList(4, 1, 2, 0)). + setPartitionEpoch(3). + setErrorCode(NONE.code()))))), alterPartitionResult.response()); ControllerResult cancelResult = replication.alterPartitionReassignments( new AlterPartitionReassignmentsRequestData().setTopics(asList( - new ReassignableTopic().setName("foo").setPartitions(asList( - new ReassignablePartition().setPartitionIndex(0). - setReplicas(null))), - new ReassignableTopic().setName("bar").setPartitions(asList( - new ReassignablePartition().setPartitionIndex(0). - setReplicas(null)))))); + new ReassignableTopic().setName("foo").setPartitions(singletonList( + new ReassignablePartition().setPartitionIndex(0). + setReplicas(null))), + new ReassignableTopic().setName("bar").setPartitions(singletonList( + new ReassignablePartition().setPartitionIndex(0). + setReplicas(null)))))); assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion( new PartitionChangeRecord().setTopicId(barId). setPartitionId(0). @@ -2182,12 +2182,12 @@ public void testCancelReassignPartitions() { setRemovingReplicas(null). setAddingReplicas(Collections.emptyList()), MetadataVersion.latestTesting().partitionChangeRecordVersion())), new AlterPartitionReassignmentsResponseData().setErrorMessage(null).setResponses(asList( - new ReassignableTopicResponse().setName("foo").setPartitions(asList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorCode(NO_REASSIGNMENT_IN_PROGRESS.code()).setErrorMessage(null))), - new ReassignableTopicResponse().setName("bar").setPartitions(asList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorMessage(null)))))), + new ReassignableTopicResponse().setName("foo").setPartitions(singletonList( + new ReassignablePartitionResponse().setPartitionIndex(0). + setErrorCode(NO_REASSIGNMENT_IN_PROGRESS.code()).setErrorMessage(null))), + new ReassignableTopicResponse().setName("bar").setPartitions(singletonList( + new ReassignablePartitionResponse().setPartitionIndex(0). + setErrorMessage(null)))))), cancelResult); ctx.replay(cancelResult.records()); assertEquals(NONE_REASSIGNING, replication.listPartitionReassignments(null, Long.MAX_VALUE)); @@ -2461,30 +2461,30 @@ public void testElectPreferredLeaders() { ControllerResult alterPartitionResult = replication.alterPartition( anonymousContextFor(ApiKeys.ALTER_PARTITION), new AlterPartitionRequestData().setBrokerId(2).setBrokerEpoch(102). - setTopics(asList(new AlterPartitionRequestData.TopicData().setTopicId(fooId). - setPartitions(asList( - new AlterPartitionRequestData.PartitionData(). - setPartitionIndex(0).setPartitionEpoch(0). - setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3)), - new AlterPartitionRequestData.PartitionData(). - setPartitionIndex(2).setPartitionEpoch(0). - setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(0, 2, 1))))))); - assertEquals(new AlterPartitionResponseData().setTopics(asList( - new AlterPartitionResponseData.TopicData().setTopicId(fooId).setPartitions(asList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(0). - setLeaderId(2). - setLeaderEpoch(0). - setIsr(asList(1, 2, 3)). - setPartitionEpoch(1). - setErrorCode(NONE.code()), - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(2). - setLeaderId(2). - setLeaderEpoch(0). - setIsr(asList(0, 2, 1)). - setPartitionEpoch(1). - setErrorCode(NONE.code()))))), + setTopics(singletonList(new TopicData().setTopicId(fooId). + setPartitions(asList( + new PartitionData(). + setPartitionIndex(0).setPartitionEpoch(0). + setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3)), + new PartitionData(). + setPartitionIndex(2).setPartitionEpoch(0). + setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(0, 2, 1))))))); + assertEquals(new AlterPartitionResponseData().setTopics(singletonList( + new AlterPartitionResponseData.TopicData().setTopicId(fooId).setPartitions(asList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(0). + setLeaderId(2). + setLeaderEpoch(0). + setIsr(asList(1, 2, 3)). + setPartitionEpoch(1). + setErrorCode(NONE.code()), + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(2). + setLeaderId(2). + setLeaderEpoch(0). + setIsr(asList(0, 2, 1)). + setPartitionEpoch(1). + setErrorCode(NONE.code()))))), alterPartitionResult.response()); ElectLeadersResponseData expectedResponse2 = buildElectLeadersResponse(NONE, false, Utils.mkMap( @@ -2547,19 +2547,19 @@ public void testBalancePartitionLeaders() { ControllerResult alterPartitionResult = replication.alterPartition( anonymousContextFor(ApiKeys.ALTER_PARTITION), new AlterPartitionRequestData().setBrokerId(2).setBrokerEpoch(102). - setTopics(asList(new AlterPartitionRequestData.TopicData().setTopicId(fooId). - setPartitions(asList(new AlterPartitionRequestData.PartitionData(). - setPartitionIndex(0).setPartitionEpoch(0). - setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3))))))); - assertEquals(new AlterPartitionResponseData().setTopics(asList( - new AlterPartitionResponseData.TopicData().setTopicId(fooId).setPartitions(asList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(0). - setLeaderId(2). - setLeaderEpoch(0). - setIsr(asList(1, 2, 3)). - setPartitionEpoch(1). - setErrorCode(NONE.code()))))), + setTopics(singletonList(new TopicData().setTopicId(fooId). + setPartitions(singletonList(new PartitionData(). + setPartitionIndex(0).setPartitionEpoch(0). + setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3))))))); + assertEquals(new AlterPartitionResponseData().setTopics(singletonList( + new AlterPartitionResponseData.TopicData().setTopicId(fooId).setPartitions(singletonList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(0). + setLeaderId(2). + setLeaderEpoch(0). + setIsr(asList(1, 2, 3)). + setPartitionEpoch(1). + setErrorCode(NONE.code()))))), alterPartitionResult.response()); ctx.replay(alterPartitionResult.records()); @@ -2570,7 +2570,7 @@ public void testBalancePartitionLeaders() { .setPartitionId(0) .setTopicId(fooId) .setLeader(1); - assertEquals(asList(new ApiMessageAndVersion(expectedChangeRecord, MetadataVersion.latestTesting().partitionChangeRecordVersion())), balanceResult.records()); + assertEquals(singletonList(new ApiMessageAndVersion(expectedChangeRecord, MetadataVersion.latestTesting().partitionChangeRecordVersion())), balanceResult.records()); assertTrue(replication.arePartitionLeadersImbalanced()); assertFalse(balanceResult.response()); @@ -2579,19 +2579,19 @@ public void testBalancePartitionLeaders() { alterPartitionResult = replication.alterPartition( anonymousContextFor(ApiKeys.ALTER_PARTITION), new AlterPartitionRequestData().setBrokerId(2).setBrokerEpoch(102). - setTopics(asList(new AlterPartitionRequestData.TopicData().setTopicId(fooId). - setPartitions(asList(new AlterPartitionRequestData.PartitionData(). - setPartitionIndex(2).setPartitionEpoch(0). - setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(0, 2, 1))))))); - assertEquals(new AlterPartitionResponseData().setTopics(asList( - new AlterPartitionResponseData.TopicData().setTopicId(fooId).setPartitions(asList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(2). - setLeaderId(2). - setLeaderEpoch(0). - setIsr(asList(0, 2, 1)). - setPartitionEpoch(1). - setErrorCode(NONE.code()))))), + setTopics(singletonList(new TopicData().setTopicId(fooId). + setPartitions(singletonList(new PartitionData(). + setPartitionIndex(2).setPartitionEpoch(0). + setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(0, 2, 1))))))); + assertEquals(new AlterPartitionResponseData().setTopics(singletonList( + new AlterPartitionResponseData.TopicData().setTopicId(fooId).setPartitions(singletonList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(2). + setLeaderId(2). + setLeaderEpoch(0). + setIsr(asList(0, 2, 1)). + setPartitionEpoch(1). + setErrorCode(NONE.code()))))), alterPartitionResult.response()); ctx.replay(alterPartitionResult.records()); @@ -2602,7 +2602,7 @@ public void testBalancePartitionLeaders() { .setPartitionId(2) .setTopicId(fooId) .setLeader(0); - assertEquals(asList(new ApiMessageAndVersion(expectedChangeRecord, MetadataVersion.latestTesting().partitionChangeRecordVersion())), balanceResult.records()); + assertEquals(singletonList(new ApiMessageAndVersion(expectedChangeRecord, MetadataVersion.latestTesting().partitionChangeRecordVersion())), balanceResult.records()); assertFalse(replication.arePartitionLeadersImbalanced()); assertFalse(balanceResult.response()); } @@ -2664,7 +2664,7 @@ public void testKRaftClusterDescriber() { ctx.registerBrokersWithDirs( 0, Collections.emptyList(), 1, Collections.emptyList(), - 2, asList(Uuid.fromString("ozwqsVMFSNiYQUPSJA3j0w")), + 2, singletonList(Uuid.fromString("ozwqsVMFSNiYQUPSJA3j0w")), 3, asList(Uuid.fromString("SSDgCZ4BTyec5QojGT65qg"), Uuid.fromString("K8KwMrviRcOUvgI8FPOJWg")), 4, Collections.emptyList() ); @@ -2773,25 +2773,25 @@ public void testReassignPartitionsHandlesNewReassignmentThatRemovesPreviouslyAdd // Reassign to [2, 3] ControllerResult alterResultOne = replication.alterPartitionReassignments( - new AlterPartitionReassignmentsRequestData().setTopics(asList( - new ReassignableTopic().setName(topic).setPartitions(asList( - new ReassignablePartition().setPartitionIndex(0). - setReplicas(asList(2, 3))))))); + new AlterPartitionReassignmentsRequestData().setTopics(singletonList( + new ReassignableTopic().setName(topic).setPartitions(singletonList( + new ReassignablePartition().setPartitionIndex(0). + setReplicas(asList(2, 3))))))); assertEquals(new AlterPartitionReassignmentsResponseData(). - setErrorMessage(null).setResponses(asList( - new ReassignableTopicResponse().setName(topic).setPartitions(asList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorMessage(null))))), alterResultOne.response()); + setErrorMessage(null).setResponses(singletonList( + new ReassignableTopicResponse().setName(topic).setPartitions(singletonList( + new ReassignablePartitionResponse().setPartitionIndex(0). + setErrorMessage(null))))), alterResultOne.response()); ctx.replay(alterResultOne.records()); ListPartitionReassignmentsResponseData currentReassigning = new ListPartitionReassignmentsResponseData().setErrorMessage(null). - setTopics(asList(new OngoingTopicReassignment(). - setName(topic).setPartitions(asList( - new OngoingPartitionReassignment().setPartitionIndex(0). - setRemovingReplicas(asList(0, 1)). - setAddingReplicas(asList(2, 3)). - setReplicas(asList(2, 3, 0, 1)))))); + setTopics(singletonList(new OngoingTopicReassignment(). + setName(topic).setPartitions(singletonList( + new OngoingPartitionReassignment().setPartitionIndex(0). + setRemovingReplicas(asList(0, 1)). + setAddingReplicas(asList(2, 3)). + setReplicas(asList(2, 3, 0, 1)))))); // Make sure the reassignment metadata is as expected. assertEquals(currentReassigning, replication.listPartitionReassignments(null, Long.MAX_VALUE)); @@ -2802,25 +2802,25 @@ public void testReassignPartitionsHandlesNewReassignmentThatRemovesPreviouslyAdd AlterPartitionRequestData alterPartitionRequestData = new AlterPartitionRequestData(). setBrokerId(partition.leader). setBrokerEpoch(ctx.currentBrokerEpoch(partition.leader)). - setTopics(asList(new TopicData(). - setTopicId(topicId). - setPartitions(asList(new PartitionData(). - setPartitionIndex(0). - setPartitionEpoch(partition.partitionEpoch). - setLeaderEpoch(partition.leaderEpoch). - setNewIsrWithEpochs(isrWithDefaultEpoch(0, 1, 2)))))); + setTopics(singletonList(new TopicData(). + setTopicId(topicId). + setPartitions(singletonList(new PartitionData(). + setPartitionIndex(0). + setPartitionEpoch(partition.partitionEpoch). + setLeaderEpoch(partition.leaderEpoch). + setNewIsrWithEpochs(isrWithDefaultEpoch(0, 1, 2)))))); ControllerResult alterPartitionResult = replication.alterPartition( anonymousContextFor(ApiKeys.ALTER_PARTITION), new AlterPartitionRequest.Builder(alterPartitionRequestData, true).build().data()); - assertEquals(new AlterPartitionResponseData().setTopics(asList( - new AlterPartitionResponseData.TopicData(). - setTopicId(topicId). - setPartitions(asList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(0). - setIsr(Arrays.asList(0, 1, 2)). - setPartitionEpoch(partition.partitionEpoch + 1). - setErrorCode(NONE.code()))))), + assertEquals(new AlterPartitionResponseData().setTopics(singletonList( + new AlterPartitionResponseData.TopicData(). + setTopicId(topicId). + setPartitions(singletonList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(0). + setIsr(asList(0, 1, 2)). + setPartitionEpoch(partition.partitionEpoch + 1). + setErrorCode(NONE.code()))))), alterPartitionResult.response()); ctx.replay(alterPartitionResult.records()); @@ -2832,9 +2832,9 @@ public void testReassignPartitionsHandlesNewReassignmentThatRemovesPreviouslyAdd ); ControllerResult electLeaderTwoResult = replication.electLeaders(request); ReplicaElectionResult replicaElectionResult = new ReplicaElectionResult().setTopic(topic); - replicaElectionResult.setPartitionResult(Arrays.asList(new PartitionResult().setPartitionId(0).setErrorCode(NONE.code()).setErrorMessage(null))); + replicaElectionResult.setPartitionResult(singletonList(new PartitionResult().setPartitionId(0).setErrorCode(NONE.code()).setErrorMessage(null))); assertEquals( - new ElectLeadersResponseData().setErrorCode(NONE.code()).setReplicaElectionResults(Arrays.asList(replicaElectionResult)), + new ElectLeadersResponseData().setErrorCode(NONE.code()).setReplicaElectionResults(singletonList(replicaElectionResult)), electLeaderTwoResult.response() ); ctx.replay(electLeaderTwoResult.records()); @@ -2845,26 +2845,26 @@ public void testReassignPartitionsHandlesNewReassignmentThatRemovesPreviouslyAdd // Reassign to [4, 5] ControllerResult alterResultTwo = replication.alterPartitionReassignments( - new AlterPartitionReassignmentsRequestData().setTopics(asList( - new ReassignableTopic().setName(topic).setPartitions(asList( - new ReassignablePartition().setPartitionIndex(0). - setReplicas(asList(4, 5))))))); + new AlterPartitionReassignmentsRequestData().setTopics(singletonList( + new ReassignableTopic().setName(topic).setPartitions(singletonList( + new ReassignablePartition().setPartitionIndex(0). + setReplicas(asList(4, 5))))))); assertEquals(new AlterPartitionReassignmentsResponseData(). - setErrorMessage(null).setResponses(asList( - new ReassignableTopicResponse().setName(topic).setPartitions(asList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorMessage(null))))), alterResultTwo.response()); + setErrorMessage(null).setResponses(singletonList( + new ReassignableTopicResponse().setName(topic).setPartitions(singletonList( + new ReassignablePartitionResponse().setPartitionIndex(0). + setErrorMessage(null))))), alterResultTwo.response()); ctx.replay(alterResultTwo.records()); // Make sure the replicas list contains all the previous replicas 0, 1, 2, 3 as well as the new replicas 3, 4 currentReassigning = new ListPartitionReassignmentsResponseData().setErrorMessage(null). - setTopics(asList(new OngoingTopicReassignment(). - setName(topic).setPartitions(asList( - new OngoingPartitionReassignment().setPartitionIndex(0). - setRemovingReplicas(asList(0, 1, 2, 3)). - setAddingReplicas(asList(4, 5)). - setReplicas(asList(4, 5, 0, 1, 2, 3)))))); + setTopics(singletonList(new OngoingTopicReassignment(). + setName(topic).setPartitions(singletonList( + new OngoingPartitionReassignment().setPartitionIndex(0). + setRemovingReplicas(asList(0, 1, 2, 3)). + setAddingReplicas(asList(4, 5)). + setReplicas(asList(4, 5, 0, 1, 2, 3)))))); assertEquals(currentReassigning, replication.listPartitionReassignments(null, Long.MAX_VALUE)); @@ -2877,23 +2877,23 @@ public void testReassignPartitionsHandlesNewReassignmentThatRemovesPreviouslyAdd AlterPartitionRequestData alterPartitionRequestDataTwo = new AlterPartitionRequestData(). setBrokerId(partition.leader). setBrokerEpoch(ctx.currentBrokerEpoch(partition.leader)). - setTopics(asList(new TopicData(). - setTopicId(topicId). - setPartitions(asList(new PartitionData(). - setPartitionIndex(0). - setPartitionEpoch(partition.partitionEpoch). - setLeaderEpoch(partition.leaderEpoch). - setNewIsrWithEpochs(isrWithDefaultEpoch(0, 1, 2, 3, 4, 5)))))); + setTopics(singletonList(new TopicData(). + setTopicId(topicId). + setPartitions(singletonList(new PartitionData(). + setPartitionIndex(0). + setPartitionEpoch(partition.partitionEpoch). + setLeaderEpoch(partition.leaderEpoch). + setNewIsrWithEpochs(isrWithDefaultEpoch(0, 1, 2, 3, 4, 5)))))); ControllerResult alterPartitionResultTwo = replication.alterPartition( anonymousContextFor(ApiKeys.ALTER_PARTITION), new AlterPartitionRequest.Builder(alterPartitionRequestDataTwo, true).build().data()); - assertEquals(new AlterPartitionResponseData().setTopics(asList( - new AlterPartitionResponseData.TopicData(). - setTopicId(topicId). - setPartitions(asList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(0). - setErrorCode(NEW_LEADER_ELECTED.code()))))), + assertEquals(new AlterPartitionResponseData().setTopics(singletonList( + new AlterPartitionResponseData.TopicData(). + setTopicId(topicId). + setPartitions(singletonList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(0). + setErrorCode(NEW_LEADER_ELECTED.code()))))), alterPartitionResultTwo.response()); ctx.replay(alterPartitionResultTwo.records()); diff --git a/metadata/src/test/java/org/apache/kafka/image/ClientQuotasImageTest.java b/metadata/src/test/java/org/apache/kafka/image/ClientQuotasImageTest.java index 3b9fd83910b4f..bef1d35efc3ed 100644 --- a/metadata/src/test/java/org/apache/kafka/image/ClientQuotasImageTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/ClientQuotasImageTest.java @@ -73,14 +73,14 @@ public class ClientQuotasImageTest { setRemove(true), CLIENT_QUOTA_RECORD.highestSupportedVersion())); // alter quota DELTA1_RECORDS.add(new ApiMessageAndVersion(new ClientQuotaRecord(). - setEntity(Arrays.asList( - new EntityData().setEntityType(ClientQuotaEntity.USER).setEntityName("foo"))). + setEntity(Collections.singletonList( + new EntityData().setEntityType(ClientQuotaEntity.USER).setEntityName("foo"))). setKey(QuotaConfigs.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG). setValue(234.0), CLIENT_QUOTA_RECORD.highestSupportedVersion())); // add quota to entity with existing quota DELTA1_RECORDS.add(new ApiMessageAndVersion(new ClientQuotaRecord(). - setEntity(Arrays.asList( - new EntityData().setEntityType(ClientQuotaEntity.USER).setEntityName("foo"))). + setEntity(Collections.singletonList( + new EntityData().setEntityType(ClientQuotaEntity.USER).setEntityName("foo"))). setKey(QuotaConfigs.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG). setValue(999.0), CLIENT_QUOTA_RECORD.highestSupportedVersion())); diff --git a/metadata/src/test/java/org/apache/kafka/image/ClusterImageTest.java b/metadata/src/test/java/org/apache/kafka/image/ClusterImageTest.java index 820d5a83fa82f..730cfec4963ef 100644 --- a/metadata/src/test/java/org/apache/kafka/image/ClusterImageTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/ClusterImageTest.java @@ -45,7 +45,6 @@ import org.junit.jupiter.api.Timeout; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -83,7 +82,7 @@ public class ClusterImageTest { setId(0). setEpoch(1000). setIncarnationId(Uuid.fromString("vZKYST0pSA2HO5x_6hoO2Q")). - setListeners(Arrays.asList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9092))). + setListeners(Collections.singletonList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9092))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 3))). setRack(Optional.empty()). setFenced(true). @@ -92,7 +91,7 @@ public class ClusterImageTest { setId(1). setEpoch(1001). setIncarnationId(Uuid.fromString("U52uRe20RsGI0RvpcTx33Q")). - setListeners(Arrays.asList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9093))). + setListeners(Collections.singletonList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9093))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 3))). setRack(Optional.empty()). setFenced(false). @@ -101,7 +100,7 @@ public class ClusterImageTest { setId(2). setEpoch(123). setIncarnationId(Uuid.fromString("hr4TVh3YQiu3p16Awkka6w")). - setListeners(Arrays.asList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9094))). + setListeners(Collections.singletonList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9094))). setSupportedFeatures(Collections.emptyMap()). setRack(Optional.of("arack")). setFenced(false). @@ -154,7 +153,7 @@ public class ClusterImageTest { setId(0). setEpoch(1000). setIncarnationId(Uuid.fromString("vZKYST0pSA2HO5x_6hoO2Q")). - setListeners(Arrays.asList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9092))). + setListeners(Collections.singletonList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9092))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 3))). setRack(Optional.empty()). setFenced(false). @@ -163,7 +162,7 @@ public class ClusterImageTest { setId(1). setEpoch(1001). setIncarnationId(Uuid.fromString("U52uRe20RsGI0RvpcTx33Q")). - setListeners(Arrays.asList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9093))). + setListeners(Collections.singletonList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9093))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 3))). setRack(Optional.empty()). setFenced(true). @@ -194,8 +193,8 @@ public class ClusterImageTest { DELTA2_RECORDS.add(new ApiMessageAndVersion(new RegisterBrokerRecord(). setBrokerId(2).setIsMigratingZkBroker(true).setIncarnationId(Uuid.fromString("Am5Yse7GQxaw0b2alM74bP")). setBrokerEpoch(1002).setEndPoints(new BrokerEndpointCollection( - Arrays.asList(new BrokerEndpoint().setName("PLAINTEXT").setHost("localhost"). - setPort(9094).setSecurityProtocol((short) 0)).iterator())). + Collections.singletonList(new BrokerEndpoint().setName("PLAINTEXT").setHost("localhost"). + setPort(9094).setSecurityProtocol((short) 0)).iterator())). setFeatures(new BrokerFeatureCollection( Collections.singleton(new BrokerFeature(). setName(MetadataVersion.FEATURE_NAME). @@ -212,7 +211,7 @@ public class ClusterImageTest { setId(0). setEpoch(1000). setIncarnationId(Uuid.fromString("vZKYST0pSA2HO5x_6hoO2Q")). - setListeners(Arrays.asList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9092))). + setListeners(Collections.singletonList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9092))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 3))). setRack(Optional.empty()). setFenced(true). @@ -221,7 +220,7 @@ public class ClusterImageTest { setId(1). setEpoch(1001). setIncarnationId(Uuid.fromString("U52uRe20RsGI0RvpcTx33Q")). - setListeners(Arrays.asList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9093))). + setListeners(Collections.singletonList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9093))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 3))). setRack(Optional.empty()). setFenced(false). @@ -230,7 +229,7 @@ public class ClusterImageTest { setId(2). setEpoch(1002). setIncarnationId(Uuid.fromString("Am5Yse7GQxaw0b2alM74bP")). - setListeners(Arrays.asList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9094))). + setListeners(Collections.singletonList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9094))). setSupportedFeatures(Collections.singletonMap("metadata.version", VersionRange.of(MetadataVersion.IBP_3_3_IV3.featureLevel(), MetadataVersion.IBP_3_6_IV0.featureLevel()))). setRack(Optional.of("rack3")). diff --git a/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java b/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java index 4a29779e0acff..369ebd36d88e2 100644 --- a/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java @@ -83,18 +83,18 @@ static ApiMessageAndVersion metadataVersionRecord(MetadataVersion metadataVersio @Test public void testPremodernVersion() { writeWithExpectedLosses(MetadataVersion.IBP_3_2_IV0, - Arrays.asList( - "feature flag(s): foo.feature"), - Arrays.asList( - metadataVersionRecord(MetadataVersion.IBP_3_3_IV0), - TEST_RECORDS.get(0), - TEST_RECORDS.get(1), - new ApiMessageAndVersion(new FeatureLevelRecord(). - setName("foo.feature"). - setFeatureLevel((short) 4), (short) 0)), - Arrays.asList( - TEST_RECORDS.get(0), - TEST_RECORDS.get(1))); + Collections.singletonList( + "feature flag(s): foo.feature"), + Arrays.asList( + metadataVersionRecord(MetadataVersion.IBP_3_3_IV0), + TEST_RECORDS.get(0), + TEST_RECORDS.get(1), + new ApiMessageAndVersion(new FeatureLevelRecord(). + setName("foo.feature"). + setFeatureLevel((short) 4), (short) 0)), + Arrays.asList( + TEST_RECORDS.get(0), + TEST_RECORDS.get(1))); } /** @@ -103,7 +103,7 @@ public void testPremodernVersion() { @Test public void testPreControlledShutdownStateVersion() { writeWithExpectedLosses(MetadataVersion.IBP_3_3_IV2, - Arrays.asList( + Collections.singletonList( "the inControlledShutdown state of one or more brokers"), Arrays.asList( metadataVersionRecord(MetadataVersion.IBP_3_3_IV3), @@ -134,31 +134,31 @@ public void testPreControlledShutdownStateVersion() { @Test public void testPreZkMigrationSupportVersion() { writeWithExpectedLosses(MetadataVersion.IBP_3_3_IV3, - Arrays.asList( - "the isMigratingZkBroker state of one or more brokers"), - Arrays.asList( - metadataVersionRecord(MetadataVersion.IBP_3_4_IV0), - new ApiMessageAndVersion(new RegisterBrokerRecord(). - setBrokerId(123). - setIncarnationId(Uuid.fromString("XgjKo16hRWeWrTui0iR5Nw")). - setBrokerEpoch(456). - setRack(null). - setFenced(false). - setInControlledShutdown(true). - setIsMigratingZkBroker(true), (short) 2), - TEST_RECORDS.get(0), - TEST_RECORDS.get(1)), - Arrays.asList( - metadataVersionRecord(MetadataVersion.IBP_3_3_IV3), - new ApiMessageAndVersion(new RegisterBrokerRecord(). - setBrokerId(123). - setIncarnationId(Uuid.fromString("XgjKo16hRWeWrTui0iR5Nw")). - setBrokerEpoch(456). - setRack(null). - setFenced(false). - setInControlledShutdown(true), (short) 1), - TEST_RECORDS.get(0), - TEST_RECORDS.get(1))); + Collections.singletonList( + "the isMigratingZkBroker state of one or more brokers"), + Arrays.asList( + metadataVersionRecord(MetadataVersion.IBP_3_4_IV0), + new ApiMessageAndVersion(new RegisterBrokerRecord(). + setBrokerId(123). + setIncarnationId(Uuid.fromString("XgjKo16hRWeWrTui0iR5Nw")). + setBrokerEpoch(456). + setRack(null). + setFenced(false). + setInControlledShutdown(true). + setIsMigratingZkBroker(true), (short) 2), + TEST_RECORDS.get(0), + TEST_RECORDS.get(1)), + Arrays.asList( + metadataVersionRecord(MetadataVersion.IBP_3_3_IV3), + new ApiMessageAndVersion(new RegisterBrokerRecord(). + setBrokerId(123). + setIncarnationId(Uuid.fromString("XgjKo16hRWeWrTui0iR5Nw")). + setBrokerEpoch(456). + setRack(null). + setFenced(false). + setInControlledShutdown(true), (short) 1), + TEST_RECORDS.get(0), + TEST_RECORDS.get(1))); } @Test diff --git a/metadata/src/test/java/org/apache/kafka/image/TopicsImageTest.java b/metadata/src/test/java/org/apache/kafka/image/TopicsImageTest.java index eabb63ff858e2..be85516a19c2e 100644 --- a/metadata/src/test/java/org/apache/kafka/image/TopicsImageTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/TopicsImageTest.java @@ -247,11 +247,11 @@ public void testBasicLocalChanges() { LocalReplicaChanges changes = delta.localChanges(localId); assertEquals( - new HashSet<>(Arrays.asList(new TopicPartition("baz", 0))), + new HashSet<>(Collections.singletonList(new TopicPartition("baz", 0))), changes.electedLeaders().keySet() ); assertEquals( - new HashSet<>(Arrays.asList(new TopicPartition("baz", 0))), + new HashSet<>(Collections.singletonList(new TopicPartition("baz", 0))), changes.leaders().keySet() ); assertEquals( @@ -303,7 +303,7 @@ public void testDeleteAfterChanges() { RecordTestUtils.replayAll(delta, topicRecords); LocalReplicaChanges changes = delta.localChanges(localId); - assertEquals(new HashSet<>(Arrays.asList(new TopicPartition("zoo", 0))), changes.deletes()); + assertEquals(new HashSet<>(Collections.singletonList(new TopicPartition("zoo", 0))), changes.deletes()); assertEquals(Collections.emptyMap(), changes.electedLeaders()); assertEquals(Collections.emptyMap(), changes.leaders()); assertEquals(Collections.emptyMap(), changes.followers()); @@ -345,7 +345,7 @@ public void testUpdatedLeaders() { assertEquals(Collections.emptySet(), changes.deletes()); assertEquals(Collections.emptyMap(), changes.electedLeaders()); assertEquals( - new HashSet<>(Arrays.asList(new TopicPartition("zoo", 0))), + new HashSet<>(Collections.singletonList(new TopicPartition("zoo", 0))), changes.leaders().keySet() ); assertEquals(Collections.emptyMap(), changes.followers()); diff --git a/metadata/src/test/java/org/apache/kafka/image/loader/MetadataLoaderTest.java b/metadata/src/test/java/org/apache/kafka/image/loader/MetadataLoaderTest.java index 921c241a09afb..84f2542522c7d 100644 --- a/metadata/src/test/java/org/apache/kafka/image/loader/MetadataLoaderTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/loader/MetadataLoaderTest.java @@ -249,19 +249,19 @@ public void testPublisherCannotBeInstalledMoreThanOnce( setFaultHandler(faultHandler). setHighWaterMarkAccessor(() -> OptionalLong.of(0L)). build()) { - loader.installPublishers(asList(publisher)).get(); + loader.installPublishers(Collections.singletonList(publisher)).get(); if (loadSnapshot) { MockSnapshotReader snapshotReader = new MockSnapshotReader( new MetadataProvenance(200, 100, 4000), - asList( - Batch.control( - 200, - 100, - 4000, - 10, - asList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) + Collections.singletonList( + Batch.control( + 200, + 100, + 4000, + 10, + Collections.singletonList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) + ) ) - ) ); loader.handleLoadSnapshot(snapshotReader); TestUtils.retryOnExceptionWithTimeout(30_000, () -> { @@ -277,13 +277,13 @@ public void testPublisherCannotBeInstalledMoreThanOnce( assertEquals("testPublisherCannotBeInstalledMoreThanOnce: Attempted to install " + "publisher MockPublisher, which is already installed.", assertThrows(ExecutionException.class, - () -> loader.installPublishers(asList(publisher)).get()). + () -> loader.installPublishers(Collections.singletonList(publisher)).get()). getCause().getMessage()); } else { assertEquals("testPublisherCannotBeInstalledMoreThanOnce: Attempted to install " + "a new publisher named MockPublisher, but there is already a publisher with that name.", assertThrows(ExecutionException.class, - () -> loader.installPublishers(asList(new MockPublisher())).get()). + () -> loader.installPublishers(Collections.singletonList(new MockPublisher())).get()). getCause().getMessage()); } } @@ -305,11 +305,11 @@ public void testRemovePublisher() throws Exception { loader.installPublishers(publishers.subList(0, 2)).get(); loader.removeAndClosePublisher(publishers.get(1)).get(); MockSnapshotReader snapshotReader = MockSnapshotReader.fromRecordLists( - new MetadataProvenance(100, 50, 2000), - asList(asList(new ApiMessageAndVersion( - new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(IBP_3_3_IV2.featureLevel()), (short) 0)))); + new MetadataProvenance(100, 50, 2000), + Collections.singletonList(Collections.singletonList(new ApiMessageAndVersion( + new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(IBP_3_3_IV2.featureLevel()), (short) 0)))); assertFalse(snapshotReader.closed); loader.handleLoadSnapshot(snapshotReader); loader.waitForAllEventsToBeHandled(); @@ -334,7 +334,7 @@ public void testRemovePublisher() throws Exception { public void testLoadEmptySnapshot() throws Exception { MockFaultHandler faultHandler = new MockFaultHandler("testLoadEmptySnapshot"); MockTime time = new MockTime(); - List publishers = asList(new MockPublisher()); + List publishers = Collections.singletonList(new MockPublisher()); try (MetadataLoader loader = new MetadataLoader.Builder(). setFaultHandler(faultHandler). setTime(time). @@ -364,15 +364,15 @@ private void loadEmptySnapshot( ) throws Exception { MockSnapshotReader snapshotReader = new MockSnapshotReader( new MetadataProvenance(offset, 100, 4000), - asList( - Batch.control( - 200, - 100, - 4000, - 10, - asList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) + Collections.singletonList( + Batch.control( + 200, + 100, + 4000, + 10, + Collections.singletonList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) + ) ) - ) ); if (loader.time() instanceof MockTime) { snapshotReader.setTime((MockTime) loader.time()); @@ -452,7 +452,7 @@ public Batch next() { public void testLoadEmptyBatch() throws Exception { MockFaultHandler faultHandler = new MockFaultHandler("testLoadEmptyBatch"); MockTime time = new MockTime(); - List publishers = asList(new MockPublisher()); + List publishers = Collections.singletonList(new MockPublisher()); try (MetadataLoader loader = new MetadataLoader.Builder(). setFaultHandler(faultHandler). setTime(time). @@ -462,16 +462,16 @@ public void testLoadEmptyBatch() throws Exception { loadTestSnapshot(loader, 200); publishers.get(0).firstPublish.get(10, TimeUnit.SECONDS); MockBatchReader batchReader = new MockBatchReader( - 300, - asList( - Batch.control( - 300, - 100, - 4000, - 10, - asList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) + 300, + Collections.singletonList( + Batch.control( + 300, + 100, + 4000, + 10, + Collections.singletonList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) + ) ) - ) ).setTime(time); loader.handleCommit(batchReader); loader.waitForAllEventsToBeHandled(); @@ -508,22 +508,22 @@ public void testLastAppliedOffset() throws Exception { loader.installPublishers(publishers).get(); loader.handleLoadSnapshot(MockSnapshotReader.fromRecordLists( new MetadataProvenance(200, 100, 4000), asList( - asList(new ApiMessageAndVersion(new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(IBP_3_3_IV1.featureLevel()), (short) 0)), - asList(new ApiMessageAndVersion(new TopicRecord(). - setName("foo"). - setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)) + Collections.singletonList(new ApiMessageAndVersion(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(IBP_3_3_IV1.featureLevel()), (short) 0)), + Collections.singletonList(new ApiMessageAndVersion(new TopicRecord(). + setName("foo"). + setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)) ))); for (MockPublisher publisher : publishers) { publisher.firstPublish.get(1, TimeUnit.MINUTES); } loader.waitForAllEventsToBeHandled(); assertEquals(200L, loader.lastAppliedOffset()); - loader.handleCommit(new MockBatchReader(201, asList( - MockBatchReader.newBatch(201, 100, asList( - new ApiMessageAndVersion(new RemoveTopicRecord(). - setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)))))); + loader.handleCommit(new MockBatchReader(201, Collections.singletonList( + MockBatchReader.newBatch(201, 100, Collections.singletonList( + new ApiMessageAndVersion(new RemoveTopicRecord(). + setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)))))); loader.waitForAllEventsToBeHandled(); assertEquals(201L, loader.lastAppliedOffset()); } @@ -578,10 +578,10 @@ private void loadTestSnapshot( ) throws Exception { loader.handleLoadSnapshot(MockSnapshotReader.fromRecordLists( new MetadataProvenance(offset, 100, 4000), asList( - asList(new ApiMessageAndVersion(new FeatureLevelRecord(). + Collections.singletonList(new ApiMessageAndVersion(new FeatureLevelRecord(). setName(MetadataVersion.FEATURE_NAME). setFeatureLevel(IBP_3_3_IV1.featureLevel()), (short) 0)), - asList(new ApiMessageAndVersion(new TopicRecord(). + Collections.singletonList(new ApiMessageAndVersion(new TopicRecord(). setName("foo"). setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)) ))); @@ -594,10 +594,10 @@ private void loadTestSnapshot2( ) throws Exception { loader.handleLoadSnapshot(MockSnapshotReader.fromRecordLists( new MetadataProvenance(offset, 100, 4000), asList( - asList(new ApiMessageAndVersion(new FeatureLevelRecord(). + Collections.singletonList(new ApiMessageAndVersion(new FeatureLevelRecord(). setName(MetadataVersion.FEATURE_NAME). setFeatureLevel(IBP_3_3_IV2.featureLevel()), (short) 0)), - asList(new ApiMessageAndVersion(new TopicRecord(). + Collections.singletonList(new ApiMessageAndVersion(new TopicRecord(). setName("bar"). setTopicId(Uuid.fromString("VcL2Mw-cT4aL6XV9VujzoQ")), (short) 0)) ))); @@ -610,7 +610,7 @@ private void loadTestSnapshot2( @Test public void testReloadSnapshot() throws Exception { MockFaultHandler faultHandler = new MockFaultHandler("testLastAppliedOffset"); - List publishers = asList(new MockPublisher("a")); + List publishers = Collections.singletonList(new MockPublisher("a")); try (MetadataLoader loader = new MetadataLoader.Builder(). setFaultHandler(faultHandler). setHighWaterMarkAccessor(() -> OptionalLong.of(0)). @@ -637,11 +637,11 @@ public void testReloadSnapshot() throws Exception { assertFalse(publishers.get(0).latestImage.topics().topicsByName().containsKey("foo")); assertTrue(publishers.get(0).latestImage.topics().topicsByName().containsKey("bar")); - loader.handleCommit(new MockBatchReader(500, asList( - MockBatchReader.newBatch(500, 100, asList( - new ApiMessageAndVersion(new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(IBP_3_5_IV0.featureLevel()), (short) 0)))))); + loader.handleCommit(new MockBatchReader(500, Collections.singletonList( + MockBatchReader.newBatch(500, 100, Collections.singletonList( + new ApiMessageAndVersion(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(IBP_3_5_IV0.featureLevel()), (short) 0)))))); loader.waitForAllEventsToBeHandled(); assertEquals(IBP_3_5_IV0.featureLevel(), loader.metrics().currentMetadataVersion().featureLevel()); @@ -690,8 +690,8 @@ public void testPublishTransaction(boolean abortTxn) throws Exception { if (abortTxn) { loader.handleCommit( - MockBatchReader.newSingleBatchReader(500, 100, Arrays.asList( - new ApiMessageAndVersion(new AbortTransactionRecord(), (short) 0) + MockBatchReader.newSingleBatchReader(500, 100, Collections.singletonList( + new ApiMessageAndVersion(new AbortTransactionRecord(), (short) 0) ))); loader.waitForAllEventsToBeHandled(); @@ -699,8 +699,8 @@ public void testPublishTransaction(boolean abortTxn) throws Exception { "Topic should not be visible since the transaction was aborted"); } else { loader.handleCommit( - MockBatchReader.newSingleBatchReader(500, 100, Arrays.asList( - new ApiMessageAndVersion(new EndTransactionRecord(), (short) 0) + MockBatchReader.newSingleBatchReader(500, 100, Collections.singletonList( + new ApiMessageAndVersion(new EndTransactionRecord(), (short) 0) ))); loader.waitForAllEventsToBeHandled(); @@ -768,11 +768,11 @@ public void testSnapshotDuringTransaction() throws Exception { // loading a snapshot discards any in-flight transaction loader.handleLoadSnapshot(MockSnapshotReader.fromRecordLists( - new MetadataProvenance(600, 101, 4000), asList( - asList(new ApiMessageAndVersion(new TopicRecord(). - setName("foo"). - setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)) - ))); + new MetadataProvenance(600, 101, 4000), Collections.singletonList( + Collections.singletonList(new ApiMessageAndVersion(new TopicRecord(). + setName("foo"). + setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)) + ))); loader.waitForAllEventsToBeHandled(); assertEquals("Uum7sfhHQP-obSvfywmNUA", publisher.latestImage.topics().getTopic("foo").id().toString()); diff --git a/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageBrokersNodeTest.java b/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageBrokersNodeTest.java index 23d943a160318..f8e9021fd25dd 100644 --- a/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageBrokersNodeTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageBrokersNodeTest.java @@ -53,7 +53,7 @@ public class ClusterImageBrokersNodeTest { @Test public void testChildNames() { - assertEquals(Arrays.asList("1"), NODE.childNames()); + assertEquals(Collections.singletonList("1"), NODE.childNames()); } @Test diff --git a/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageControllersNodeTest.java b/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageControllersNodeTest.java index 3d347ec3178f4..4540e83502512 100644 --- a/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageControllersNodeTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageControllersNodeTest.java @@ -25,7 +25,6 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import java.util.Arrays; import java.util.Collections; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -50,7 +49,7 @@ public class ClusterImageControllersNodeTest { @Test public void testChildNames() { - assertEquals(Arrays.asList("2"), NODE.childNames()); + assertEquals(Collections.singletonList("2"), NODE.childNames()); } @Test diff --git a/metadata/src/test/java/org/apache/kafka/image/publisher/SnapshotGeneratorTest.java b/metadata/src/test/java/org/apache/kafka/image/publisher/SnapshotGeneratorTest.java index ebbe52d24e014..68cc77bcf6e5f 100644 --- a/metadata/src/test/java/org/apache/kafka/image/publisher/SnapshotGeneratorTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/publisher/SnapshotGeneratorTest.java @@ -31,7 +31,6 @@ import org.junit.jupiter.api.Timeout; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; @@ -118,7 +117,7 @@ public void testCreateSnapshot() throws Exception { assertEquals(Collections.emptyList(), emitter.images()); emitter.setReady(); } - assertEquals(Arrays.asList(TEST_IMAGE), emitter.images()); + assertEquals(Collections.singletonList(TEST_IMAGE), emitter.images()); faultHandler.maybeRethrowFirstException(); } @@ -163,7 +162,7 @@ public void testTimeBasedSnapshots() throws Exception { // so this does not trigger a new snapshot. generator.publishLogDelta(TEST_DELTA, TEST_IMAGE, logDeltaManifestBuilder().numBytes(150).build()); } - assertEquals(Arrays.asList(TEST_IMAGE), emitter.images()); + assertEquals(Collections.singletonList(TEST_IMAGE), emitter.images()); faultHandler.maybeRethrowFirstException(); } diff --git a/metadata/src/test/java/org/apache/kafka/image/writer/RaftSnapshotWriterTest.java b/metadata/src/test/java/org/apache/kafka/image/writer/RaftSnapshotWriterTest.java index 0137aeb077224..a7f7578fe590b 100644 --- a/metadata/src/test/java/org/apache/kafka/image/writer/RaftSnapshotWriterTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/writer/RaftSnapshotWriterTest.java @@ -22,6 +22,7 @@ import org.junit.jupiter.api.Timeout; import java.util.Arrays; +import java.util.Collections; import static java.util.Collections.emptyList; import static org.apache.kafka.metadata.RecordTestUtils.testRecord; @@ -44,7 +45,7 @@ public void testFreezeAndClose() { assertTrue(snapshotWriter.isClosed()); assertEquals(Arrays.asList( Arrays.asList(testRecord(0), testRecord(1)), - Arrays.asList(testRecord(2))), snapshotWriter.batches()); + Collections.singletonList(testRecord(2))), snapshotWriter.batches()); } @Test diff --git a/metadata/src/test/java/org/apache/kafka/metadata/BrokerRegistrationTest.java b/metadata/src/test/java/org/apache/kafka/metadata/BrokerRegistrationTest.java index 35551e7c6570d..8f3eac706f04a 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/BrokerRegistrationTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/BrokerRegistrationTest.java @@ -48,7 +48,7 @@ public class BrokerRegistrationTest { setId(0). setEpoch(0). setIncarnationId(Uuid.fromString("pc1GhUlBS92cGGaKXl6ipw")). - setListeners(Arrays.asList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9090))). + setListeners(Collections.singletonList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9090))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 2))). setRack(Optional.empty()). setFenced(false). @@ -57,7 +57,7 @@ public class BrokerRegistrationTest { setId(1). setEpoch(0). setIncarnationId(Uuid.fromString("3MfdxWlNSn2UDYsmDP1pYg")). - setListeners(Arrays.asList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9091))). + setListeners(Collections.singletonList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9091))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 2))). setRack(Optional.empty()). setFenced(true). @@ -66,7 +66,7 @@ public class BrokerRegistrationTest { setId(2). setEpoch(0). setIncarnationId(Uuid.fromString("eY7oaG1RREie5Kk9uy1l6g")). - setListeners(Arrays.asList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9092))). + setListeners(Collections.singletonList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9092))). setSupportedFeatures(Stream.of(new SimpleEntry<>("foo", VersionRange.of((short) 2, (short) 3)), new SimpleEntry<>("bar", VersionRange.of((short) 1, (short) 4))).collect( Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue))). @@ -77,14 +77,14 @@ public class BrokerRegistrationTest { setId(3). setEpoch(0). setIncarnationId(Uuid.fromString("1t8VyWx2TCSTpUWuqj-FOw")). - setListeners(Arrays.asList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9093))). + setListeners(Collections.singletonList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9093))). setSupportedFeatures(Stream.of(new SimpleEntry<>("metadata.version", VersionRange.of((short) 7, (short) 7))) .collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue))). setRack(Optional.empty()). setFenced(false). setInControlledShutdown(true). setIsMigratingZkBroker(true). - setDirectories(Arrays.asList(Uuid.fromString("r4HpEsMuST6nQ4rznIEJVA"))). + setDirectories(Collections.singletonList(Uuid.fromString("r4HpEsMuST6nQ4rznIEJVA"))). build()); @Test @@ -172,7 +172,7 @@ public void testDirectoriesAreSorted() { setId(0). setEpoch(0). setIncarnationId(Uuid.fromString("ik32HZbLTW6ulw1yyrC8jQ")). - setListeners(Arrays.asList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9090))). + setListeners(Collections.singletonList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9090))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 2))). setRack(Optional.empty()). setFenced(false). @@ -202,7 +202,7 @@ void testHasOnlineDir() { setId(0). setEpoch(0). setIncarnationId(Uuid.fromString("m6CiJvfITZeKVC6UuhlZew")). - setListeners(Arrays.asList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9090))). + setListeners(Collections.singletonList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9090))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 2))). setRack(Optional.empty()). setFenced(false). diff --git a/metadata/src/test/java/org/apache/kafka/metadata/DelegationTokenDataTest.java b/metadata/src/test/java/org/apache/kafka/metadata/DelegationTokenDataTest.java index cc0f74b9c581c..2c84d8d6031f1 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/DelegationTokenDataTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/DelegationTokenDataTest.java @@ -26,6 +26,7 @@ import org.junit.jupiter.api.Timeout; import java.util.Arrays; +import java.util.Collections; import java.util.List; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -39,7 +40,7 @@ public class DelegationTokenDataTest { Uuid.randomUuid().toString(), Uuid.randomUuid().toString()); - private static final List EMPTYRENEWERS = Arrays.asList(); + private static final List EMPTYRENEWERS = Collections.emptyList(); private static final List TOKENINFORMATION = Arrays.asList( new TokenInformation( diff --git a/metadata/src/test/java/org/apache/kafka/metadata/KafkaConfigSchemaTest.java b/metadata/src/test/java/org/apache/kafka/metadata/KafkaConfigSchemaTest.java index 49597fff9ae7b..2574747f0a1ed 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/KafkaConfigSchemaTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/KafkaConfigSchemaTest.java @@ -26,6 +26,7 @@ import org.junit.jupiter.api.Timeout; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -61,9 +62,9 @@ public class KafkaConfigSchemaTest { public static final Map> SYNONYMS = new HashMap<>(); static { - SYNONYMS.put("abc", Arrays.asList(new ConfigSynonym("foo.bar"))); - SYNONYMS.put("def", Arrays.asList(new ConfigSynonym("quux", HOURS_TO_MILLISECONDS))); - SYNONYMS.put("ghi", Arrays.asList(new ConfigSynonym("ghi"))); + SYNONYMS.put("abc", Collections.singletonList(new ConfigSynonym("foo.bar"))); + SYNONYMS.put("def", Collections.singletonList(new ConfigSynonym("quux", HOURS_TO_MILLISECONDS))); + SYNONYMS.put("ghi", Collections.singletonList(new ConfigSynonym("ghi"))); SYNONYMS.put("xyz", Arrays.asList(new ConfigSynonym("quuux"), new ConfigSynonym("quuux2"))); } diff --git a/metadata/src/test/java/org/apache/kafka/metadata/ListenerInfoTest.java b/metadata/src/test/java/org/apache/kafka/metadata/ListenerInfoTest.java index f4f2a843c250a..ac90599d7d9d0 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/ListenerInfoTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/ListenerInfoTest.java @@ -26,6 +26,7 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -65,23 +66,23 @@ public class ListenerInfoTest { @Test public void testNullHostname() { - assertNull(ListenerInfo.create(Arrays.asList(INTERNAL)).firstListener().host()); + assertNull(ListenerInfo.create(Collections.singletonList(INTERNAL)).firstListener().host()); } @Test public void testNullHostnameGetsResolved() throws Exception { - assertNotNull(ListenerInfo.create(Arrays.asList(INTERNAL)). + assertNotNull(ListenerInfo.create(Collections.singletonList(INTERNAL)). withWildcardHostnamesResolved().firstListener().host()); } @Test public void testEmptyHostname() { - assertEquals("", ListenerInfo.create(Arrays.asList(SSL)).firstListener().host()); + assertEquals("", ListenerInfo.create(Collections.singletonList(SSL)).firstListener().host()); } @Test public void testEmptyHostnameGetsResolved() throws Exception { - assertNotEquals("", ListenerInfo.create(Arrays.asList(SSL)). + assertNotEquals("", ListenerInfo.create(Collections.singletonList(SSL)). withWildcardHostnamesResolved().firstListener().host()); } @@ -118,14 +119,14 @@ public void testRoundTripToControllerRegistrationRequest() throws Exception { @Test public void testToControllerRegistrationRequestFailsOnNullHost() { assertThrows(RuntimeException.class, - () -> ListenerInfo.create(Arrays.asList(INTERNAL)). + () -> ListenerInfo.create(Collections.singletonList(INTERNAL)). toControllerRegistrationRequest()); } @Test public void testToControllerRegistrationRequestFailsOnZeroPort() { assertThrows(RuntimeException.class, - () -> ListenerInfo.create(Arrays.asList(INTERNAL)). + () -> ListenerInfo.create(Collections.singletonList(INTERNAL)). withWildcardHostnamesResolved(). toControllerRegistrationRequest()); } @@ -143,14 +144,14 @@ public void testRoundTripToControllerRegistrationRecord() throws Exception { @Test public void testToControllerRegistrationRecordFailsOnNullHost() { assertThrows(RuntimeException.class, - () -> ListenerInfo.create(Arrays.asList(INTERNAL)). + () -> ListenerInfo.create(Collections.singletonList(INTERNAL)). toControllerRegistrationRecord()); } @Test public void testToControllerRegistrationRecordFailsOnZeroPort() { assertThrows(RuntimeException.class, - () -> ListenerInfo.create(Arrays.asList(INTERNAL)). + () -> ListenerInfo.create(Collections.singletonList(INTERNAL)). withWildcardHostnamesResolved(). toControllerRegistrationRecord()); } @@ -168,14 +169,14 @@ public void testRoundTripToBrokerRegistrationRequest() throws Exception { @Test public void testToBrokerRegistrationRequestFailsOnNullHost() { assertThrows(RuntimeException.class, - () -> ListenerInfo.create(Arrays.asList(INTERNAL)). + () -> ListenerInfo.create(Collections.singletonList(INTERNAL)). toBrokerRegistrationRequest()); } @Test public void testToBrokerRegistrationRequestFailsOnZeroPort() { assertThrows(RuntimeException.class, - () -> ListenerInfo.create(Arrays.asList(INTERNAL)). + () -> ListenerInfo.create(Collections.singletonList(INTERNAL)). withWildcardHostnamesResolved(). toBrokerRegistrationRequest()); } @@ -193,14 +194,14 @@ public void testRoundTripToBrokerRegistrationRecord() throws Exception { @Test public void testToBrokerRegistrationRecordFailsOnNullHost() { assertThrows(RuntimeException.class, - () -> ListenerInfo.create(Arrays.asList(INTERNAL)). + () -> ListenerInfo.create(Collections.singletonList(INTERNAL)). toBrokerRegistrationRecord()); } @Test public void testToBrokerRegistrationRecordFailsOnZeroPort() { assertThrows(RuntimeException.class, - () -> ListenerInfo.create(Arrays.asList(INTERNAL)). + () -> ListenerInfo.create(Collections.singletonList(INTERNAL)). withWildcardHostnamesResolved(). toBrokerRegistrationRecord()); } diff --git a/metadata/src/test/java/org/apache/kafka/metadata/PartitionRegistrationTest.java b/metadata/src/test/java/org/apache/kafka/metadata/PartitionRegistrationTest.java index 8816f2f141de9..9cf47faa23aa2 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/PartitionRegistrationTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/PartitionRegistrationTest.java @@ -74,7 +74,7 @@ public void testPartitionControlInfoMergeAndDiff() { setReplicas(new int[]{1, 2, 3}).setDirectories(DirectoryId.unassignedArray(3)). setIsr(new int[]{1}).setLastKnownElr(new int[]{3}).setElr(new int[]{2}).setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(0).setPartitionEpoch(1).build(); assertEquals(b, a.merge(new PartitionChangeRecord(). - setLeader(3).setIsr(Arrays.asList(3)))); + setLeader(3).setIsr(Collections.singletonList(3)))); assertEquals("isr: [1, 2] -> [3], leader: 1 -> 3, leaderEpoch: 0 -> 1, partitionEpoch: 0 -> 1", b.diff(a)); assertEquals("isr: [1, 2] -> [1], elr: [] -> [2], lastKnownElr: [] -> [3], partitionEpoch: 0 -> 1", @@ -320,7 +320,7 @@ public void testPartitionRegistrationToRecord(MetadataVersion metadataVersion) { if (metadataVersion.isElrSupported()) { expectRecord. setEligibleLeaderReplicas(Arrays.asList(2, 3)). - setLastKnownElr(Arrays.asList(4)); + setLastKnownElr(Collections.singletonList(4)); } if (metadataVersion.isDirectoryAssignmentSupported()) { expectRecord.setDirectories(Arrays.asList( diff --git a/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java b/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java index b35f807564522..608665e751110 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java @@ -373,12 +373,12 @@ public static RegisterControllerRecord createTestControllerRegistration( ).iterator() )). setFeatures(new RegisterControllerRecord.ControllerFeatureCollection( - Arrays.asList( - new RegisterControllerRecord.ControllerFeature(). - setName(MetadataVersion.FEATURE_NAME). - setMinSupportedVersion(MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel()). - setMaxSupportedVersion(MetadataVersion.IBP_3_6_IV1.featureLevel()) - ).iterator() + Collections.singletonList( + new RegisterControllerRecord.ControllerFeature(). + setName(MetadataVersion.FEATURE_NAME). + setMinSupportedVersion(MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel()). + setMaxSupportedVersion(MetadataVersion.IBP_3_6_IV1.featureLevel()) + ).iterator() )); } } diff --git a/metadata/src/test/java/org/apache/kafka/metadata/ReplicasTest.java b/metadata/src/test/java/org/apache/kafka/metadata/ReplicasTest.java index 7a26d48f63b3b..365c5eb1690bc 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/ReplicasTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/ReplicasTest.java @@ -35,15 +35,15 @@ public class ReplicasTest { @Test public void testToList() { assertEquals(Arrays.asList(1, 2, 3, 4), Replicas.toList(new int[] {1, 2, 3, 4})); - assertEquals(Arrays.asList(), Replicas.toList(Replicas.NONE)); - assertEquals(Arrays.asList(2), Replicas.toList(new int[] {2})); + assertEquals(Collections.emptyList(), Replicas.toList(Replicas.NONE)); + assertEquals(Collections.singletonList(2), Replicas.toList(new int[] {2})); } @Test public void testToArray() { assertArrayEquals(new int[] {3, 2, 1}, Replicas.toArray(Arrays.asList(3, 2, 1))); - assertArrayEquals(new int[] {}, Replicas.toArray(Arrays.asList())); - assertArrayEquals(new int[] {2}, Replicas.toArray(Arrays.asList(2))); + assertArrayEquals(new int[] {}, Replicas.toArray(Collections.emptyList())); + assertArrayEquals(new int[] {2}, Replicas.toArray(Collections.singletonList(2))); } @Test diff --git a/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAclRecordIteratorTest.java b/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAclRecordIteratorTest.java index de21438c8c848..51ab544b18913 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAclRecordIteratorTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAclRecordIteratorTest.java @@ -22,6 +22,7 @@ import org.junit.jupiter.api.Timeout; import java.util.Arrays; +import java.util.Collections; import java.util.NoSuchElementException; import static org.apache.kafka.metadata.authorizer.StandardAclWithIdTest.TEST_ACLS; @@ -47,8 +48,8 @@ public void testIteration() { new ApiMessageAndVersion(TEST_ACLS.get(3).toRecord(), (short) 0)), iterator.next()); assertTrue(iterator.hasNext()); - assertEquals(Arrays.asList( - new ApiMessageAndVersion(TEST_ACLS.get(4).toRecord(), (short) 0)), + assertEquals(Collections.singletonList( + new ApiMessageAndVersion(TEST_ACLS.get(4).toRecord(), (short) 0)), iterator.next()); assertFalse(iterator.hasNext()); } diff --git a/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadataTest.java b/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadataTest.java index f22351f10fbaf..731a944639bf0 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadataTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadataTest.java @@ -76,10 +76,10 @@ public void testCopyWithOnlyVersion() { BootstrapMetadata.fromRecords(SAMPLE_RECORDS1, "baz").copyWithOnlyVersion()); } - final static List RECORDS_WITH_OLD_METADATA_VERSION = unmodifiableList(asList( + final static List RECORDS_WITH_OLD_METADATA_VERSION = unmodifiableList(Collections.singletonList( new ApiMessageAndVersion(new FeatureLevelRecord(). - setName(FEATURE_NAME). - setFeatureLevel(IBP_3_0_IV1.featureLevel()), (short) 0))); + setName(FEATURE_NAME). + setFeatureLevel(IBP_3_0_IV1.featureLevel()), (short) 0))); @Test public void testFromRecordsListWithOldMetadataVersion() { diff --git a/metadata/src/test/java/org/apache/kafka/metadata/migration/KRaftMigrationDriverTest.java b/metadata/src/test/java/org/apache/kafka/metadata/migration/KRaftMigrationDriverTest.java index 14f347ca1e1ea..4f0b3c37c974b 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/migration/KRaftMigrationDriverTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/migration/KRaftMigrationDriverTest.java @@ -471,9 +471,9 @@ public void testShouldNotMoveToNextStateIfControllerNodesAreNotReadyToMigrate( startAndWaitForRecoveringMigrationStateFromZK(driver); if (allNodePresent) { - setupDeltaWithControllerRegistrations(delta, Arrays.asList(4, 5, 6), Arrays.asList()); + setupDeltaWithControllerRegistrations(delta, Arrays.asList(4, 5, 6), Collections.emptyList()); } else { - setupDeltaWithControllerRegistrations(delta, Arrays.asList(), Arrays.asList(4, 5)); + setupDeltaWithControllerRegistrations(delta, Collections.emptyList(), Arrays.asList(4, 5)); } delta.replay(zkBrokerRecord(1)); MetadataProvenance provenance = new MetadataProvenance(100, 1, 1); @@ -493,7 +493,7 @@ public void testShouldNotMoveToNextStateIfControllerNodesAreNotReadyToMigrate( // Update so that all controller nodes are zkMigrationReady. Now we should be able to move to the next state. delta = new MetadataDelta(image); - setupDeltaWithControllerRegistrations(delta, Arrays.asList(), Arrays.asList(4, 5, 6)); + setupDeltaWithControllerRegistrations(delta, Collections.emptyList(), Arrays.asList(4, 5, 6)); image = delta.apply(new MetadataProvenance(200, 1, 2)); driver.onMetadataUpdate(delta, image, new LogDeltaManifest.Builder(). provenance(image.provenance()). diff --git a/metadata/src/test/java/org/apache/kafka/metadata/placement/StripedReplicaPlacerTest.java b/metadata/src/test/java/org/apache/kafka/metadata/placement/StripedReplicaPlacerTest.java index 924fcdb7559aa..d0b4ad956ea1a 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/placement/StripedReplicaPlacerTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/placement/StripedReplicaPlacerTest.java @@ -119,9 +119,9 @@ public Uuid defaultDir(int brokerId) { public void testMultiPartitionTopicPlacementOnSingleUnfencedBroker() { MockRandom random = new MockRandom(); StripedReplicaPlacer placer = new StripedReplicaPlacer(random); - assertEquals(new TopicAssignment(Arrays.asList(partitionAssignment(Arrays.asList(0)), - partitionAssignment(Arrays.asList(0)), - partitionAssignment(Arrays.asList(0)))), + assertEquals(new TopicAssignment(Arrays.asList(partitionAssignment(Collections.singletonList(0)), + partitionAssignment(Collections.singletonList(0)), + partitionAssignment(Collections.singletonList(0)))), place(placer, 0, 3, (short) 1, Arrays.asList( new UsableBroker(0, Optional.empty(), false), new UsableBroker(1, Optional.empty(), true)))); diff --git a/metadata/src/test/java/org/apache/kafka/metadata/placement/TopicAssignmentTest.java b/metadata/src/test/java/org/apache/kafka/metadata/placement/TopicAssignmentTest.java index 26f8841d834f5..289739b53292f 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/placement/TopicAssignmentTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/placement/TopicAssignmentTest.java @@ -25,6 +25,7 @@ import org.junit.jupiter.api.Test; import java.util.Arrays; +import java.util.Collections; import java.util.List; public class TopicAssignmentTest { @@ -44,18 +45,18 @@ public void testTopicAssignmentReplicas() { public void testConsistentEqualsAndHashCode() { List topicAssignments = Arrays.asList( new TopicAssignment( - Arrays.asList( - partitionAssignment( - Arrays.asList(0, 1, 2) + Collections.singletonList( + partitionAssignment( + Arrays.asList(0, 1, 2) + ) ) - ) ), new TopicAssignment( - Arrays.asList( - partitionAssignment( - Arrays.asList(1, 2, 0) + Collections.singletonList( + partitionAssignment( + Arrays.asList(1, 2, 0) + ) ) - ) ) ); @@ -81,8 +82,8 @@ public void testToString() { Uuid.fromString("MvUIAsOiRlSePeiBHdZrSQ"), Uuid.fromString("jUqCchHtTHqMxeVv4dw1RA") ); - List partitionAssignments = Arrays.asList( - new PartitionAssignment(replicas, directories::get) + List partitionAssignments = Collections.singletonList( + new PartitionAssignment(replicas, directories::get) ); TopicAssignment topicAssignment = new TopicAssignment(partitionAssignments); assertEquals("TopicAssignment(assignments=[PartitionAssignment(replicas=[0, 1, 2], " + diff --git a/metadata/src/test/java/org/apache/kafka/metadata/properties/MetaPropertiesEnsembleTest.java b/metadata/src/test/java/org/apache/kafka/metadata/properties/MetaPropertiesEnsembleTest.java index 6266df68065a0..38f24707b3b0c 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/properties/MetaPropertiesEnsembleTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/properties/MetaPropertiesEnsembleTest.java @@ -54,7 +54,7 @@ final public class MetaPropertiesEnsembleTest { private static final MetaPropertiesEnsemble FOO = new MetaPropertiesEnsemble( new HashSet<>(Arrays.asList("/tmp/empty1", "/tmp/empty2")), - new HashSet<>(Arrays.asList("/tmp/error3")), + new HashSet<>(Collections.singletonList("/tmp/error3")), Stream.of( new SimpleImmutableEntry<>("/tmp/dir4", new MetaProperties.Builder(). @@ -104,7 +104,7 @@ public void testEmptyLogDirsForEmpty() { @Test public void testErrorLogDirsForFoo() { - assertEquals(new HashSet<>(Arrays.asList("/tmp/error3")), FOO.errorLogDirs()); + assertEquals(new HashSet<>(Collections.singletonList("/tmp/error3")), FOO.errorLogDirs()); } @Test diff --git a/metadata/src/test/java/org/apache/kafka/metalog/LocalLogManager.java b/metadata/src/test/java/org/apache/kafka/metalog/LocalLogManager.java index 3b82d5765c976..f7336b90c5703 100644 --- a/metadata/src/test/java/org/apache/kafka/metalog/LocalLogManager.java +++ b/metadata/src/test/java/org/apache/kafka/metalog/LocalLogManager.java @@ -99,8 +99,7 @@ public int size() { public boolean equals(Object o) { if (!(o instanceof LeaderChangeBatch)) return false; LeaderChangeBatch other = (LeaderChangeBatch) o; - if (!other.newLeader.equals(newLeader)) return false; - return true; + return other.newLeader.equals(newLeader); } @Override From d2e6c8baab40d8fc30029a301d82452563ca73b6 Mon Sep 17 00:00:00 2001 From: Sanskar Jhajharia Date: Tue, 4 Jun 2024 18:05:22 +0530 Subject: [PATCH 2/5] Remove unnecessary indentation --- .../kafka/controller/QuorumController.java | 2 +- .../ClientQuotaControlManagerTest.java | 16 +- .../controller/ClusterControlManagerTest.java | 26 +- .../ConfigurationControlManagerTest.java | 14 +- .../controller/FeatureControlManagerTest.java | 10 +- .../PartitionReassignmentRevertTest.java | 7 +- .../QuorumControllerIntegrationTestUtils.java | 10 +- .../controller/QuorumControllerTest.java | 118 ++--- .../ReplicationControlManagerTest.java | 430 +++++++++--------- .../kafka/image/ClientQuotasImageTest.java | 4 +- .../apache/kafka/image/ClusterImageTest.java | 4 +- .../kafka/image/ImageDowngradeTest.java | 74 +-- .../image/loader/MetadataLoaderTest.java | 98 ++-- .../kafka/metadata/KafkaConfigSchemaTest.java | 7 +- .../kafka/metadata/RecordTestUtils.java | 12 +- .../StandardAclRecordIteratorTest.java | 2 +- .../bootstrap/BootstrapMetadataTest.java | 4 +- .../placement/TopicAssignmentTest.java | 18 +- 18 files changed, 427 insertions(+), 429 deletions(-) diff --git a/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java b/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java index cb2db1858b48c..9e98380691933 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java +++ b/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java @@ -1404,7 +1404,7 @@ private void maybeScheduleNextWriteNoOpRecord() { maybeScheduleNextWriteNoOpRecord(); return ControllerResult.of( - Collections.singletonList(new ApiMessageAndVersion(new NoOpRecord(), (short) 0)), + Collections.singletonList(new ApiMessageAndVersion(new NoOpRecord(), (short) 0)), null ); }, diff --git a/metadata/src/test/java/org/apache/kafka/controller/ClientQuotaControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/ClientQuotaControlManagerTest.java index e647cd597d9d6..e03351d555a6b 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ClientQuotaControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ClientQuotaControlManagerTest.java @@ -229,19 +229,19 @@ public void testEntityTypes() throws Exception { new EntityData().setEntityType("client-id").setEntityName(null))). setKey("request_percentage").setValue(55.55).setRemove(false), (short) 0), new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Collections.singletonList( - new EntityData().setEntityType("user").setEntityName("user-1"))). + new EntityData().setEntityType("user").setEntityName("user-1"))). setKey("request_percentage").setValue(56.56).setRemove(false), (short) 0), new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Collections.singletonList( - new EntityData().setEntityType("user").setEntityName("user-2"))). + new EntityData().setEntityType("user").setEntityName("user-2"))). setKey("request_percentage").setValue(57.57).setRemove(false), (short) 0), new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Collections.singletonList( - new EntityData().setEntityType("user").setEntityName("user-3"))). + new EntityData().setEntityType("user").setEntityName("user-3"))). setKey("request_percentage").setValue(58.58).setRemove(false), (short) 0), new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Collections.singletonList( - new EntityData().setEntityType("user").setEntityName(null))). + new EntityData().setEntityType("user").setEntityName(null))). setKey("request_percentage").setValue(59.59).setRemove(false), (short) 0), new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Collections.singletonList( - new EntityData().setEntityType("client-id").setEntityName("client-id-2"))). + new EntityData().setEntityType("client-id").setEntityName("client-id-2"))). setKey("request_percentage").setValue(60.60).setRemove(false), (short) 0)); records = new ArrayList<>(records); RecordTestUtils.deepSortRecords(records); @@ -357,9 +357,9 @@ public void testConfigKeysForEntityTypeWithUserAndClientId() { @Test public void testConfigKeysForEntityTypeWithIp() { testConfigKeysForEntityType(Collections.singletonList(ClientQuotaEntity.IP), - Collections.singletonList( - "connection_creation_rate" - )); + Collections.singletonList( + "connection_creation_rate" + )); } private static Map keysToEntity(List entityKeys) { diff --git a/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java index e2bfad53bc101..1036463276cd4 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java @@ -327,19 +327,19 @@ public void testRegisterBrokerRecordVersion(MetadataVersion metadataVersion) { short expectedVersion = metadataVersion.registerBrokerRecordVersion(); assertEquals( - Collections.singletonList(new ApiMessageAndVersion(new RegisterBrokerRecord(). - setBrokerEpoch(123L). - setBrokerId(0). - setRack(null). - setIncarnationId(Uuid.fromString("0H4fUu1xQEKXFYwB1aBjhg")). - setFenced(true). - setLogDirs(logDirs). - setFeatures(new RegisterBrokerRecord.BrokerFeatureCollection(Collections.singletonList( - new RegisterBrokerRecord.BrokerFeature(). - setName(MetadataVersion.FEATURE_NAME). - setMinSupportedVersion((short) 1). - setMaxSupportedVersion((short) 1)).iterator())). - setInControlledShutdown(false), expectedVersion)), + Collections.singletonList(new ApiMessageAndVersion(new RegisterBrokerRecord(). + setBrokerEpoch(123L). + setBrokerId(0). + setRack(null). + setIncarnationId(Uuid.fromString("0H4fUu1xQEKXFYwB1aBjhg")). + setFenced(true). + setLogDirs(logDirs). + setFeatures(new RegisterBrokerRecord.BrokerFeatureCollection(Collections.singletonList( + new RegisterBrokerRecord.BrokerFeature(). + setName(MetadataVersion.FEATURE_NAME). + setMinSupportedVersion((short) 1). + setMaxSupportedVersion((short) 1)).iterator())). + setInControlledShutdown(false), expectedVersion)), result.records()); } diff --git a/metadata/src/test/java/org/apache/kafka/controller/ConfigurationControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/ConfigurationControlManagerTest.java index b24848147878d..f61d763c81888 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ConfigurationControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ConfigurationControlManagerTest.java @@ -382,13 +382,13 @@ expectedRecords1, toMap(entry(MYTOPIC, ApiError.NONE))), manager.replay((ConfigRecord) message.message()); } assertEquals(ControllerResult.atomicOf(Collections.singletonList( - new ApiMessageAndVersion( - new ConfigRecord() - .setResourceType(TOPIC.id()) - .setResourceName("mytopic") - .setName("abc") - .setValue(null), - CONFIG_RECORD.highestSupportedVersion())), + new ApiMessageAndVersion( + new ConfigRecord() + .setResourceType(TOPIC.id()) + .setResourceName("mytopic") + .setName("abc") + .setValue(null), + CONFIG_RECORD.highestSupportedVersion())), toMap(entry(MYTOPIC, ApiError.NONE))), manager.legacyAlterConfigs(toMap(entry(MYTOPIC, toMap(entry("def", "901")))), true)); diff --git a/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java index 16f2809792687..df66f68843bbd 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java @@ -169,8 +169,8 @@ public void testUpdateFeaturesErrorCases() { setQuorumFeatures(features("foo", 1, 5, "bar", 0, 3)). setSnapshotRegistry(snapshotRegistry). setClusterFeatureSupportDescriber(createFakeClusterFeatureSupportDescriber( - Collections.singletonList(new SimpleImmutableEntry<>(5, singletonMap("bar", VersionRange.of(0, 3)))), - emptyList())). + Collections.singletonList(new SimpleImmutableEntry<>(5, singletonMap("bar", VersionRange.of(0, 3)))), + emptyList())). build(); assertEquals(ControllerResult.atomicOf(emptyList(), @@ -389,15 +389,15 @@ public void testCreateFeatureLevelRecords() { FeatureControlManager manager = new FeatureControlManager.Builder(). setQuorumFeatures(new QuorumFeatures(0, localSupportedFeatures, emptyList())). setClusterFeatureSupportDescriber(createFakeClusterFeatureSupportDescriber( - Collections.singletonList(new SimpleImmutableEntry<>(1, singletonMap("foo", VersionRange.of(0, 3)))), - emptyList())). + Collections.singletonList(new SimpleImmutableEntry<>(1, singletonMap("foo", VersionRange.of(0, 3)))), + emptyList())). build(); ControllerResult> result = manager.updateFeatures( Collections.singletonMap("foo", (short) 1), Collections.singletonMap("foo", FeatureUpdate.UpgradeType.UPGRADE), false); assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion( - new FeatureLevelRecord().setName("foo").setFeatureLevel((short) 1), (short) 0)), + new FeatureLevelRecord().setName("foo").setFeatureLevel((short) 1), (short) 0)), Collections.singletonMap("foo", ApiError.NONE)), result); RecordTestUtils.replayAll(manager, result.records()); assertEquals(Optional.of((short) 1), manager.finalizedFeatures(Long.MAX_VALUE).get("foo")); diff --git a/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentRevertTest.java b/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentRevertTest.java index ae1251e1c2afa..05148813e8108 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentRevertTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentRevertTest.java @@ -18,7 +18,6 @@ package org.apache.kafka.controller; import java.util.Arrays; -import java.util.Collections; import org.apache.kafka.common.Uuid; import org.apache.kafka.metadata.LeaderRecoveryState; @@ -79,7 +78,7 @@ public void testSomeAdding() { setAddingReplicas(new int[]{4, 5}).setLeader(3).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(200).build(); PartitionReassignmentRevert revert = new PartitionReassignmentRevert(registration); assertEquals(Arrays.asList(3, 2, 1), revert.replicas()); - assertEquals(Collections.singletonList(2), revert.isr()); + assertEquals(Arrays.asList(2), revert.isr()); assertFalse(revert.unclean()); } @@ -97,7 +96,7 @@ public void testSomeRemovingAndAdding() { setRemovingReplicas(new int[]{2}).setAddingReplicas(new int[]{4, 5}).setLeader(3).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(200).build(); PartitionReassignmentRevert revert = new PartitionReassignmentRevert(registration); assertEquals(Arrays.asList(3, 2, 1), revert.replicas()); - assertEquals(Collections.singletonList(2), revert.isr()); + assertEquals(Arrays.asList(2), revert.isr()); assertFalse(revert.unclean()); } @@ -115,7 +114,7 @@ public void testIsrSpecialCase() { setRemovingReplicas(new int[]{2}).setAddingReplicas(new int[]{4, 5}).setLeader(3).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(200).build(); PartitionReassignmentRevert revert = new PartitionReassignmentRevert(registration); assertEquals(Arrays.asList(3, 2, 1), revert.replicas()); - assertEquals(Collections.singletonList(3), revert.isr()); + assertEquals(Arrays.asList(3), revert.isr()); assertTrue(revert.unclean()); } } diff --git a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerIntegrationTestUtils.java b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerIntegrationTestUtils.java index bb3f0bbd57b8b..c6cb84d88b651 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerIntegrationTestUtils.java +++ b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerIntegrationTestUtils.java @@ -99,11 +99,11 @@ static Map registerBrokersAndUnfence( Uuid.fromString("TESTBROKER" + Integer.toString(100000 + brokerId).substring(1) + "DIRAAAA") )) .setListeners(new ListenerCollection( - Collections.singletonList( - new Listener() - .setName("PLAINTEXT") - .setHost("localhost") - .setPort(9092 + brokerId) + Collections.singletonList( + new Listener() + .setName("PLAINTEXT") + .setHost("localhost") + .setPort(9092 + brokerId) ).iterator() ) ) diff --git a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java index 12f5fc478202a..9dec8adddeffb 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java @@ -270,7 +270,7 @@ private void testDelayedConfigurationOperations( @Test public void testFenceMultipleBrokers() throws Throwable { List allBrokers = Arrays.asList(1, 2, 3, 4, 5); - List brokersToKeepUnfenced = singletonList(1); + List brokersToKeepUnfenced = Arrays.asList(1); List brokersToFence = Arrays.asList(2, 3, 4, 5); short replicationFactor = (short) allBrokers.size(); short numberOfPartitions = (short) allBrokers.size(); @@ -798,20 +798,20 @@ public void testSnapshotSaveAndLoad() throws Throwable { setIncarnationId(new Uuid(3465346L, i)). setZkMigrationReady(false). setListeners(new ControllerRegistrationRequestData.ListenerCollection( - singletonList( - new ControllerRegistrationRequestData.Listener(). - setName("CONTROLLER"). - setHost("localhost"). - setPort(8000 + i). - setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) + singletonList( + new ControllerRegistrationRequestData.Listener(). + setName("CONTROLLER"). + setHost("localhost"). + setPort(8000 + i). + setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) ).iterator() )). setFeatures(new ControllerRegistrationRequestData.FeatureCollection( - singletonList( - new ControllerRegistrationRequestData.Feature(). - setName(MetadataVersion.FEATURE_NAME). - setMinSupportedVersion(MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel()). - setMaxSupportedVersion(MetadataVersion.IBP_3_7_IV0.featureLevel()) + singletonList( + new ControllerRegistrationRequestData.Feature(). + setName(MetadataVersion.FEATURE_NAME). + setMinSupportedVersion(MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel()). + setMaxSupportedVersion(MetadataVersion.IBP_3_7_IV0.featureLevel()) ).iterator() ))).get(); } @@ -824,8 +824,8 @@ public void testSnapshotSaveAndLoad() throws Throwable { setFeatures(brokerFeatures(MetadataVersion.IBP_3_0_IV1, MetadataVersion.IBP_3_7_IV0)). setIncarnationId(Uuid.fromString("kxAT73dKQsitIedpiPtwB" + i)). setListeners(new ListenerCollection(singletonList(new Listener(). - setName("PLAINTEXT").setHost("localhost"). - setPort(9092 + i)).iterator()))).get(); + setName("PLAINTEXT").setHost("localhost"). + setPort(9092 + i)).iterator()))).get(); brokerEpochs.put(i, reply.epoch()); } for (int i = 0; i < numBrokers - 1; i++) { @@ -872,59 +872,59 @@ private List generateTestRecords(Uuid fooId, Map generateTestRecords(Uuid fooId, Map generateTestRecords(Uuid fooId, Map generateTestRecords(Uuid fooId, Map 1 ? topicId : Uuid.ZERO_UUID) - .setPartitions(singletonList(new PartitionData() - .setPartitionIndex(0))))); + .setTopicName(version <= 1 ? topicName : "") + .setTopicId(version > 1 ? topicId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new PartitionData() + .setPartitionIndex(0))))); ControllerRequestContext requestContext = anonymousContextFor(ApiKeys.ALTER_PARTITION, version); @@ -1172,11 +1172,11 @@ public void testAlterPartitionHandleUnknownTopicIdOrName(short version) { Errors expectedError = version > 1 ? UNKNOWN_TOPIC_ID : UNKNOWN_TOPIC_OR_PARTITION; AlterPartitionResponseData expectedResponse = new AlterPartitionResponseData() .setTopics(singletonList(new AlterPartitionResponseData.TopicData() - .setTopicName(version <= 1 ? topicName : "") - .setTopicId(version > 1 ? topicId : Uuid.ZERO_UUID) - .setPartitions(singletonList(new AlterPartitionResponseData.PartitionData() - .setPartitionIndex(0) - .setErrorCode(expectedError.code()))))); + .setTopicName(version <= 1 ? topicName : "") + .setTopicId(version > 1 ? topicId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new AlterPartitionResponseData.PartitionData() + .setPartitionIndex(0) + .setErrorCode(expectedError.code()))))); assertEquals(expectedResponse, result.response()); } @@ -1510,16 +1510,16 @@ public void testCreatePartitions() { List topics2 = new ArrayList<>(); topics2.add(new CreatePartitionsTopic(). setName("foo").setCount(6).setAssignments(singletonList( - new CreatePartitionsAssignment().setBrokerIds(asList(1, 3))))); + new CreatePartitionsAssignment().setBrokerIds(asList(1, 3))))); topics2.add(new CreatePartitionsTopic(). setName("bar").setCount(5).setAssignments(singletonList( - new CreatePartitionsAssignment().setBrokerIds(singletonList(1))))); + new CreatePartitionsAssignment().setBrokerIds(singletonList(1))))); topics2.add(new CreatePartitionsTopic(). setName("quux").setCount(4).setAssignments(singletonList( - new CreatePartitionsAssignment().setBrokerIds(asList(1, 0))))); + new CreatePartitionsAssignment().setBrokerIds(asList(1, 0))))); topics2.add(new CreatePartitionsTopic(). setName("foo2").setCount(3).setAssignments(singletonList( - new CreatePartitionsAssignment().setBrokerIds(asList(2, 0))))); + new CreatePartitionsAssignment().setBrokerIds(asList(2, 0))))); ControllerResult> createPartitionsResult2 = replicationControl.createPartitions(requestContext, topics2); assertEquals(asList(new CreatePartitionsTopicResult(). @@ -1580,7 +1580,7 @@ public void testCreatePartitionsWithMutationQuotaExceeded() { List topics2 = new ArrayList<>(); topics2.add(new CreatePartitionsTopic(). setName("foo").setCount(4).setAssignments(singletonList( - new CreatePartitionsAssignment().setBrokerIds(asList(1, 0))))); + new CreatePartitionsAssignment().setBrokerIds(asList(1, 0))))); ControllerResult> createPartitionsResult2 = replicationControl.createPartitions(createPartitionsRequestContext, topics2); assertEquals(expectedThrottled, createPartitionsResult2.response()); @@ -1614,11 +1614,11 @@ public void testCreatePartitionsFailsWhenAllBrokersAreFencedOrInControlledShutdo replicationControl.createPartitions(requestContext, topics); assertEquals( - singletonList(new CreatePartitionsTopicResult(). - setName("foo"). - setErrorCode(INVALID_REPLICATION_FACTOR.code()). - setErrorMessage("Unable to replicate the partition 2 time(s): All " + - "brokers are currently fenced or in controlled shutdown.")), + singletonList(new CreatePartitionsTopicResult(). + setName("foo"). + setErrorCode(INVALID_REPLICATION_FACTOR.code()). + setErrorMessage("Unable to replicate the partition 2 time(s): All " + + "brokers are currently fenced or in controlled shutdown.")), createPartitionsResult.response()); } @@ -1641,7 +1641,7 @@ public void testCreatePartitionsISRInvariants() { ctx.replay(result.records()); List topics = singletonList(new CreatePartitionsTopic(). - setName("foo").setCount(2).setAssignments(null)); + setName("foo").setCount(2).setAssignments(null)); ControllerResult> createPartitionsResult = replicationControl.createPartitions(requestContext, topics); @@ -1749,18 +1749,18 @@ public void testReassignPartitions(short version) { ListPartitionReassignmentsResponseData currentReassigning = new ListPartitionReassignmentsResponseData().setErrorMessage(null). setTopics(singletonList(new OngoingTopicReassignment(). - setName("foo").setPartitions(singletonList( - new OngoingPartitionReassignment().setPartitionIndex(1). - setRemovingReplicas(singletonList(3)). - setAddingReplicas(singletonList(0)). - setReplicas(asList(0, 2, 1, 3)))))); + setName("foo").setPartitions(singletonList( + new OngoingPartitionReassignment().setPartitionIndex(1). + setRemovingReplicas(singletonList(3)). + setAddingReplicas(singletonList(0)). + setReplicas(asList(0, 2, 1, 3)))))); assertEquals(currentReassigning, replication.listPartitionReassignments(null, Long.MAX_VALUE)); assertEquals(NONE_REASSIGNING, replication.listPartitionReassignments(singletonList( - new ListPartitionReassignmentsTopics().setName("bar"). - setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); + new ListPartitionReassignmentsTopics().setName("bar"). + setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); assertEquals(currentReassigning, replication.listPartitionReassignments(singletonList( - new ListPartitionReassignmentsTopics().setName("foo"). - setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); + new ListPartitionReassignmentsTopics().setName("foo"). + setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); ControllerResult cancelResult = replication.alterPartitionReassignments( new AlterPartitionReassignmentsRequestData().setTopics(asList( @@ -1772,8 +1772,8 @@ public void testReassignPartitions(short version) { new ReassignablePartition().setPartitionIndex(2). setReplicas(null))), new ReassignableTopic().setName("bar").setPartitions(singletonList( - new ReassignablePartition().setPartitionIndex(0). - setReplicas(null)))))); + new ReassignablePartition().setPartitionIndex(0). + setReplicas(null)))))); assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion( new PartitionChangeRecord().setTopicId(fooId). setPartitionId(1). @@ -1796,9 +1796,9 @@ public void testReassignPartitions(short version) { setErrorCode(UNKNOWN_TOPIC_OR_PARTITION.code()). setErrorMessage("Unable to find partition foo:2."))), new ReassignableTopicResponse().setName("bar").setPartitions(singletonList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorCode(NO_REASSIGNMENT_IN_PROGRESS.code()). - setErrorMessage(null)))))), + new ReassignablePartitionResponse().setPartitionIndex(0). + setErrorCode(NO_REASSIGNMENT_IN_PROGRESS.code()). + setErrorMessage(null)))))), cancelResult); log.info("running final alterPartition..."); ControllerRequestContext requestContext = @@ -1807,25 +1807,25 @@ public void testReassignPartitions(short version) { setBrokerId(3). setBrokerEpoch(103). setTopics(singletonList(new TopicData(). - setTopicName(version <= 1 ? "foo" : ""). - setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID). - setPartitions(singletonList(new PartitionData(). - setPartitionIndex(1). - setPartitionEpoch(1). - setLeaderEpoch(0). - setNewIsrWithEpochs(isrWithDefaultEpoch(3, 0, 2, 1)))))); + setTopicName(version <= 1 ? "foo" : ""). + setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID). + setPartitions(singletonList(new PartitionData(). + setPartitionIndex(1). + setPartitionEpoch(1). + setLeaderEpoch(0). + setNewIsrWithEpochs(isrWithDefaultEpoch(3, 0, 2, 1)))))); ControllerResult alterPartitionResult = replication.alterPartition( requestContext, new AlterPartitionRequest.Builder(alterPartitionRequestData, version > 1).build(version).data()); Errors expectedError = version > 1 ? NEW_LEADER_ELECTED : FENCED_LEADER_EPOCH; assertEquals(new AlterPartitionResponseData().setTopics(singletonList( - new AlterPartitionResponseData.TopicData(). - setTopicName(version <= 1 ? "foo" : ""). - setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID). - setPartitions(singletonList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(1). - setErrorCode(expectedError.code()))))), + new AlterPartitionResponseData.TopicData(). + setTopicName(version <= 1 ? "foo" : ""). + setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID). + setPartitions(singletonList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(1). + setErrorCode(expectedError.code()))))), alterPartitionResult.response()); ctx.replay(alterPartitionResult.records()); assertEquals(NONE_REASSIGNING, replication.listPartitionReassignments(null, Long.MAX_VALUE)); @@ -1868,13 +1868,13 @@ public void testAlterPartitionShouldRejectFencedBrokers(short version) { .setBrokerId(1) .setBrokerEpoch(101) .setTopics(singletonList(new TopicData() - .setTopicName(version <= 1 ? "foo" : "") - .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) - .setPartitions(singletonList(new PartitionData() - .setPartitionIndex(0) - .setPartitionEpoch(1) - .setLeaderEpoch(0) - .setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3, 4)))))); + .setTopicName(version <= 1 ? "foo" : "") + .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new PartitionData() + .setPartitionIndex(0) + .setPartitionEpoch(1) + .setLeaderEpoch(0) + .setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3, 4)))))); ControllerRequestContext requestContext = anonymousContextFor(ApiKeys.ALTER_PARTITION, version); @@ -1886,11 +1886,11 @@ public void testAlterPartitionShouldRejectFencedBrokers(short version) { assertEquals( new AlterPartitionResponseData() .setTopics(singletonList(new AlterPartitionResponseData.TopicData() - .setTopicName(version <= 1 ? "foo" : "") - .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) - .setPartitions(singletonList(new AlterPartitionResponseData.PartitionData() - .setPartitionIndex(0) - .setErrorCode(expectedError.code()))))), + .setTopicName(version <= 1 ? "foo" : "") + .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new AlterPartitionResponseData.PartitionData() + .setPartitionIndex(0) + .setErrorCode(expectedError.code()))))), alterPartitionResult.response()); fenceRecords = new ArrayList<>(); @@ -1902,15 +1902,15 @@ public void testAlterPartitionShouldRejectFencedBrokers(short version) { assertEquals( new AlterPartitionResponseData() .setTopics(singletonList(new AlterPartitionResponseData.TopicData() - .setTopicName(version <= 1 ? "foo" : "") - .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) - .setPartitions(singletonList(new AlterPartitionResponseData.PartitionData() - .setPartitionIndex(0) - .setLeaderId(1) - .setLeaderEpoch(0) - .setIsr(asList(1, 2, 3, 4)) - .setPartitionEpoch(2) - .setErrorCode(NONE.code()))))), + .setTopicName(version <= 1 ? "foo" : "") + .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new AlterPartitionResponseData.PartitionData() + .setPartitionIndex(0) + .setLeaderId(1) + .setLeaderEpoch(0) + .setIsr(asList(1, 2, 3, 4)) + .setPartitionEpoch(2) + .setErrorCode(NONE.code()))))), alterPartitionResult.response()); } @@ -1932,13 +1932,13 @@ public void testAlterPartitionShouldRejectBrokersWithStaleEpoch(short version) { setBrokerId(1). setBrokerEpoch(101). setTopics(singletonList(new TopicData(). - setTopicName(version <= 1 ? "foo" : ""). - setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID). - setPartitions(singletonList(new PartitionData(). - setPartitionIndex(0). - setPartitionEpoch(1). - setLeaderEpoch(0). - setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3, 4)))))); + setTopicName(version <= 1 ? "foo" : ""). + setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID). + setPartitions(singletonList(new PartitionData(). + setPartitionIndex(0). + setPartitionEpoch(1). + setLeaderEpoch(0). + setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3, 4)))))); // The broker 4 has failed silently and now registers again. long newEpoch = defaultBrokerEpoch(4) + 1000; @@ -1972,11 +1972,11 @@ public void testAlterPartitionShouldRejectBrokersWithStaleEpoch(short version) { assertEquals( new AlterPartitionResponseData(). setTopics(singletonList(new AlterPartitionResponseData.TopicData(). - setTopicName(""). - setTopicId(fooId). - setPartitions(singletonList(new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(0). - setErrorCode(INELIGIBLE_REPLICA.code()))))), + setTopicName(""). + setTopicId(fooId). + setPartitions(singletonList(new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(0). + setErrorCode(INELIGIBLE_REPLICA.code()))))), alterPartitionResult.response()); } else { assertEquals(NONE.code(), alterPartitionResult.response().errorCode()); @@ -2018,13 +2018,13 @@ public void testAlterPartitionShouldRejectShuttingDownBrokers(short version) { .setBrokerId(1) .setBrokerEpoch(101) .setTopics(singletonList(new TopicData() - .setTopicName(version <= 1 ? "foo" : "") - .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) - .setPartitions(singletonList(new PartitionData() - .setPartitionIndex(0) - .setPartitionEpoch(0) - .setLeaderEpoch(0) - .setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3, 4)))))); + .setTopicName(version <= 1 ? "foo" : "") + .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new PartitionData() + .setPartitionIndex(0) + .setPartitionEpoch(0) + .setLeaderEpoch(0) + .setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3, 4)))))); ControllerRequestContext requestContext = anonymousContextFor(ApiKeys.ALTER_PARTITION, version); @@ -2036,11 +2036,11 @@ public void testAlterPartitionShouldRejectShuttingDownBrokers(short version) { assertEquals( new AlterPartitionResponseData() .setTopics(singletonList(new AlterPartitionResponseData.TopicData() - .setTopicName(version <= 1 ? "foo" : "") - .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) - .setPartitions(singletonList(new AlterPartitionResponseData.PartitionData() - .setPartitionIndex(0) - .setErrorCode(expectedError.code()))))), + .setTopicName(version <= 1 ? "foo" : "") + .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new AlterPartitionResponseData.PartitionData() + .setPartitionIndex(0) + .setErrorCode(expectedError.code()))))), alterPartitionResult.response()); } @@ -2084,7 +2084,7 @@ public void testCancelReassignPartitions() { setReplicas(Collections.emptyList()))), new ReassignableTopic().setName("bar").setPartitions(singletonList( new ReassignablePartition().setPartitionIndex(0). - setReplicas(asList(1, 2, 3, 4, 0))))))); + setReplicas(asList(1, 2, 3, 4, 0))))))); assertEquals(new AlterPartitionReassignmentsResponseData(). setErrorMessage(null).setResponses(asList( new ReassignableTopicResponse().setName("foo").setPartitions(asList( @@ -2101,8 +2101,8 @@ public void testCancelReassignPartitions() { setErrorMessage("The manual partition assignment includes an empty " + "replica list."))), new ReassignableTopicResponse().setName("bar").setPartitions(singletonList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorMessage(null))))), + new ReassignablePartitionResponse().setPartitionIndex(0). + setErrorMessage(null))))), alterResult.response()); ctx.replay(alterResult.records()); assertEquals(new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 4}).setIsr(new int[] {1, 2, 4}). @@ -2132,43 +2132,43 @@ public void testCancelReassignPartitions() { ListPartitionReassignmentsResponseData currentReassigning = new ListPartitionReassignmentsResponseData().setErrorMessage(null). setTopics(singletonList(new OngoingTopicReassignment(). - setName("bar").setPartitions(singletonList( - new OngoingPartitionReassignment().setPartitionIndex(0). - setRemovingReplicas(Collections.emptyList()). - setAddingReplicas(asList(0, 1)). - setReplicas(asList(1, 2, 3, 4, 0)))))); + setName("bar").setPartitions(singletonList( + new OngoingPartitionReassignment().setPartitionIndex(0). + setRemovingReplicas(Collections.emptyList()). + setAddingReplicas(asList(0, 1)). + setReplicas(asList(1, 2, 3, 4, 0)))))); assertEquals(currentReassigning, replication.listPartitionReassignments(null, Long.MAX_VALUE)); assertEquals(NONE_REASSIGNING, replication.listPartitionReassignments(singletonList( - new ListPartitionReassignmentsTopics().setName("foo"). - setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); + new ListPartitionReassignmentsTopics().setName("foo"). + setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); assertEquals(currentReassigning, replication.listPartitionReassignments(singletonList( - new ListPartitionReassignmentsTopics().setName("bar"). - setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); + new ListPartitionReassignmentsTopics().setName("bar"). + setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); ControllerResult alterPartitionResult = replication.alterPartition( anonymousContextFor(ApiKeys.ALTER_PARTITION), new AlterPartitionRequestData().setBrokerId(4).setBrokerEpoch(104). setTopics(singletonList(new TopicData().setTopicId(barId).setPartitions(singletonList( - new PartitionData().setPartitionIndex(0).setPartitionEpoch(2). - setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(4, 1, 2, 0))))))); + new PartitionData().setPartitionIndex(0).setPartitionEpoch(2). + setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(4, 1, 2, 0))))))); assertEquals(new AlterPartitionResponseData().setTopics(singletonList( - new AlterPartitionResponseData.TopicData().setTopicId(barId).setPartitions(singletonList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(0). - setLeaderId(4). - setLeaderEpoch(0). - setIsr(asList(4, 1, 2, 0)). - setPartitionEpoch(3). - setErrorCode(NONE.code()))))), + new AlterPartitionResponseData.TopicData().setTopicId(barId).setPartitions(singletonList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(0). + setLeaderId(4). + setLeaderEpoch(0). + setIsr(asList(4, 1, 2, 0)). + setPartitionEpoch(3). + setErrorCode(NONE.code()))))), alterPartitionResult.response()); ControllerResult cancelResult = replication.alterPartitionReassignments( new AlterPartitionReassignmentsRequestData().setTopics(asList( new ReassignableTopic().setName("foo").setPartitions(singletonList( - new ReassignablePartition().setPartitionIndex(0). - setReplicas(null))), + new ReassignablePartition().setPartitionIndex(0). + setReplicas(null))), new ReassignableTopic().setName("bar").setPartitions(singletonList( - new ReassignablePartition().setPartitionIndex(0). - setReplicas(null)))))); + new ReassignablePartition().setPartitionIndex(0). + setReplicas(null)))))); assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion( new PartitionChangeRecord().setTopicId(barId). setPartitionId(0). @@ -2183,11 +2183,11 @@ public void testCancelReassignPartitions() { setAddingReplicas(Collections.emptyList()), MetadataVersion.latestTesting().partitionChangeRecordVersion())), new AlterPartitionReassignmentsResponseData().setErrorMessage(null).setResponses(asList( new ReassignableTopicResponse().setName("foo").setPartitions(singletonList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorCode(NO_REASSIGNMENT_IN_PROGRESS.code()).setErrorMessage(null))), + new ReassignablePartitionResponse().setPartitionIndex(0). + setErrorCode(NO_REASSIGNMENT_IN_PROGRESS.code()).setErrorMessage(null))), new ReassignableTopicResponse().setName("bar").setPartitions(singletonList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorMessage(null)))))), + new ReassignablePartitionResponse().setPartitionIndex(0). + setErrorMessage(null)))))), cancelResult); ctx.replay(cancelResult.records()); assertEquals(NONE_REASSIGNING, replication.listPartitionReassignments(null, Long.MAX_VALUE)); @@ -2462,29 +2462,29 @@ public void testElectPreferredLeaders() { anonymousContextFor(ApiKeys.ALTER_PARTITION), new AlterPartitionRequestData().setBrokerId(2).setBrokerEpoch(102). setTopics(singletonList(new TopicData().setTopicId(fooId). - setPartitions(asList( - new PartitionData(). - setPartitionIndex(0).setPartitionEpoch(0). - setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3)), - new PartitionData(). - setPartitionIndex(2).setPartitionEpoch(0). - setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(0, 2, 1))))))); + setPartitions(asList( + new PartitionData(). + setPartitionIndex(0).setPartitionEpoch(0). + setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3)), + new PartitionData(). + setPartitionIndex(2).setPartitionEpoch(0). + setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(0, 2, 1))))))); assertEquals(new AlterPartitionResponseData().setTopics(singletonList( - new AlterPartitionResponseData.TopicData().setTopicId(fooId).setPartitions(asList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(0). - setLeaderId(2). - setLeaderEpoch(0). - setIsr(asList(1, 2, 3)). - setPartitionEpoch(1). - setErrorCode(NONE.code()), - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(2). - setLeaderId(2). - setLeaderEpoch(0). - setIsr(asList(0, 2, 1)). - setPartitionEpoch(1). - setErrorCode(NONE.code()))))), + new AlterPartitionResponseData.TopicData().setTopicId(fooId).setPartitions(asList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(0). + setLeaderId(2). + setLeaderEpoch(0). + setIsr(asList(1, 2, 3)). + setPartitionEpoch(1). + setErrorCode(NONE.code()), + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(2). + setLeaderId(2). + setLeaderEpoch(0). + setIsr(asList(0, 2, 1)). + setPartitionEpoch(1). + setErrorCode(NONE.code()))))), alterPartitionResult.response()); ElectLeadersResponseData expectedResponse2 = buildElectLeadersResponse(NONE, false, Utils.mkMap( @@ -2548,18 +2548,18 @@ public void testBalancePartitionLeaders() { anonymousContextFor(ApiKeys.ALTER_PARTITION), new AlterPartitionRequestData().setBrokerId(2).setBrokerEpoch(102). setTopics(singletonList(new TopicData().setTopicId(fooId). - setPartitions(singletonList(new PartitionData(). - setPartitionIndex(0).setPartitionEpoch(0). - setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3))))))); + setPartitions(singletonList(new PartitionData(). + setPartitionIndex(0).setPartitionEpoch(0). + setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3))))))); assertEquals(new AlterPartitionResponseData().setTopics(singletonList( - new AlterPartitionResponseData.TopicData().setTopicId(fooId).setPartitions(singletonList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(0). - setLeaderId(2). - setLeaderEpoch(0). - setIsr(asList(1, 2, 3)). - setPartitionEpoch(1). - setErrorCode(NONE.code()))))), + new AlterPartitionResponseData.TopicData().setTopicId(fooId).setPartitions(singletonList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(0). + setLeaderId(2). + setLeaderEpoch(0). + setIsr(asList(1, 2, 3)). + setPartitionEpoch(1). + setErrorCode(NONE.code()))))), alterPartitionResult.response()); ctx.replay(alterPartitionResult.records()); @@ -2580,18 +2580,18 @@ public void testBalancePartitionLeaders() { anonymousContextFor(ApiKeys.ALTER_PARTITION), new AlterPartitionRequestData().setBrokerId(2).setBrokerEpoch(102). setTopics(singletonList(new TopicData().setTopicId(fooId). - setPartitions(singletonList(new PartitionData(). - setPartitionIndex(2).setPartitionEpoch(0). - setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(0, 2, 1))))))); + setPartitions(singletonList(new PartitionData(). + setPartitionIndex(2).setPartitionEpoch(0). + setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(0, 2, 1))))))); assertEquals(new AlterPartitionResponseData().setTopics(singletonList( - new AlterPartitionResponseData.TopicData().setTopicId(fooId).setPartitions(singletonList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(2). - setLeaderId(2). - setLeaderEpoch(0). - setIsr(asList(0, 2, 1)). - setPartitionEpoch(1). - setErrorCode(NONE.code()))))), + new AlterPartitionResponseData.TopicData().setTopicId(fooId).setPartitions(singletonList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(2). + setLeaderId(2). + setLeaderEpoch(0). + setIsr(asList(0, 2, 1)). + setPartitionEpoch(1). + setErrorCode(NONE.code()))))), alterPartitionResult.response()); ctx.replay(alterPartitionResult.records()); @@ -2774,24 +2774,24 @@ public void testReassignPartitionsHandlesNewReassignmentThatRemovesPreviouslyAdd ControllerResult alterResultOne = replication.alterPartitionReassignments( new AlterPartitionReassignmentsRequestData().setTopics(singletonList( - new ReassignableTopic().setName(topic).setPartitions(singletonList( - new ReassignablePartition().setPartitionIndex(0). - setReplicas(asList(2, 3))))))); + new ReassignableTopic().setName(topic).setPartitions(singletonList( + new ReassignablePartition().setPartitionIndex(0). + setReplicas(asList(2, 3))))))); assertEquals(new AlterPartitionReassignmentsResponseData(). setErrorMessage(null).setResponses(singletonList( - new ReassignableTopicResponse().setName(topic).setPartitions(singletonList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorMessage(null))))), alterResultOne.response()); + new ReassignableTopicResponse().setName(topic).setPartitions(singletonList( + new ReassignablePartitionResponse().setPartitionIndex(0). + setErrorMessage(null))))), alterResultOne.response()); ctx.replay(alterResultOne.records()); ListPartitionReassignmentsResponseData currentReassigning = new ListPartitionReassignmentsResponseData().setErrorMessage(null). setTopics(singletonList(new OngoingTopicReassignment(). - setName(topic).setPartitions(singletonList( - new OngoingPartitionReassignment().setPartitionIndex(0). - setRemovingReplicas(asList(0, 1)). - setAddingReplicas(asList(2, 3)). - setReplicas(asList(2, 3, 0, 1)))))); + setName(topic).setPartitions(singletonList( + new OngoingPartitionReassignment().setPartitionIndex(0). + setRemovingReplicas(asList(0, 1)). + setAddingReplicas(asList(2, 3)). + setReplicas(asList(2, 3, 0, 1)))))); // Make sure the reassignment metadata is as expected. assertEquals(currentReassigning, replication.listPartitionReassignments(null, Long.MAX_VALUE)); @@ -2803,24 +2803,24 @@ public void testReassignPartitionsHandlesNewReassignmentThatRemovesPreviouslyAdd setBrokerId(partition.leader). setBrokerEpoch(ctx.currentBrokerEpoch(partition.leader)). setTopics(singletonList(new TopicData(). - setTopicId(topicId). - setPartitions(singletonList(new PartitionData(). - setPartitionIndex(0). - setPartitionEpoch(partition.partitionEpoch). - setLeaderEpoch(partition.leaderEpoch). - setNewIsrWithEpochs(isrWithDefaultEpoch(0, 1, 2)))))); + setTopicId(topicId). + setPartitions(singletonList(new PartitionData(). + setPartitionIndex(0). + setPartitionEpoch(partition.partitionEpoch). + setLeaderEpoch(partition.leaderEpoch). + setNewIsrWithEpochs(isrWithDefaultEpoch(0, 1, 2)))))); ControllerResult alterPartitionResult = replication.alterPartition( anonymousContextFor(ApiKeys.ALTER_PARTITION), new AlterPartitionRequest.Builder(alterPartitionRequestData, true).build().data()); assertEquals(new AlterPartitionResponseData().setTopics(singletonList( - new AlterPartitionResponseData.TopicData(). - setTopicId(topicId). - setPartitions(singletonList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(0). - setIsr(asList(0, 1, 2)). - setPartitionEpoch(partition.partitionEpoch + 1). - setErrorCode(NONE.code()))))), + new AlterPartitionResponseData.TopicData(). + setTopicId(topicId). + setPartitions(singletonList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(0). + setIsr(asList(0, 1, 2)). + setPartitionEpoch(partition.partitionEpoch + 1). + setErrorCode(NONE.code()))))), alterPartitionResult.response()); ctx.replay(alterPartitionResult.records()); @@ -2846,25 +2846,25 @@ public void testReassignPartitionsHandlesNewReassignmentThatRemovesPreviouslyAdd ControllerResult alterResultTwo = replication.alterPartitionReassignments( new AlterPartitionReassignmentsRequestData().setTopics(singletonList( - new ReassignableTopic().setName(topic).setPartitions(singletonList( - new ReassignablePartition().setPartitionIndex(0). - setReplicas(asList(4, 5))))))); + new ReassignableTopic().setName(topic).setPartitions(singletonList( + new ReassignablePartition().setPartitionIndex(0). + setReplicas(asList(4, 5))))))); assertEquals(new AlterPartitionReassignmentsResponseData(). setErrorMessage(null).setResponses(singletonList( - new ReassignableTopicResponse().setName(topic).setPartitions(singletonList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorMessage(null))))), alterResultTwo.response()); + new ReassignableTopicResponse().setName(topic).setPartitions(singletonList( + new ReassignablePartitionResponse().setPartitionIndex(0). + setErrorMessage(null))))), alterResultTwo.response()); ctx.replay(alterResultTwo.records()); // Make sure the replicas list contains all the previous replicas 0, 1, 2, 3 as well as the new replicas 3, 4 currentReassigning = new ListPartitionReassignmentsResponseData().setErrorMessage(null). setTopics(singletonList(new OngoingTopicReassignment(). - setName(topic).setPartitions(singletonList( - new OngoingPartitionReassignment().setPartitionIndex(0). - setRemovingReplicas(asList(0, 1, 2, 3)). - setAddingReplicas(asList(4, 5)). - setReplicas(asList(4, 5, 0, 1, 2, 3)))))); + setName(topic).setPartitions(singletonList( + new OngoingPartitionReassignment().setPartitionIndex(0). + setRemovingReplicas(asList(0, 1, 2, 3)). + setAddingReplicas(asList(4, 5)). + setReplicas(asList(4, 5, 0, 1, 2, 3)))))); assertEquals(currentReassigning, replication.listPartitionReassignments(null, Long.MAX_VALUE)); @@ -2878,22 +2878,22 @@ public void testReassignPartitionsHandlesNewReassignmentThatRemovesPreviouslyAdd setBrokerId(partition.leader). setBrokerEpoch(ctx.currentBrokerEpoch(partition.leader)). setTopics(singletonList(new TopicData(). - setTopicId(topicId). - setPartitions(singletonList(new PartitionData(). - setPartitionIndex(0). - setPartitionEpoch(partition.partitionEpoch). - setLeaderEpoch(partition.leaderEpoch). - setNewIsrWithEpochs(isrWithDefaultEpoch(0, 1, 2, 3, 4, 5)))))); + setTopicId(topicId). + setPartitions(singletonList(new PartitionData(). + setPartitionIndex(0). + setPartitionEpoch(partition.partitionEpoch). + setLeaderEpoch(partition.leaderEpoch). + setNewIsrWithEpochs(isrWithDefaultEpoch(0, 1, 2, 3, 4, 5)))))); ControllerResult alterPartitionResultTwo = replication.alterPartition( anonymousContextFor(ApiKeys.ALTER_PARTITION), new AlterPartitionRequest.Builder(alterPartitionRequestDataTwo, true).build().data()); assertEquals(new AlterPartitionResponseData().setTopics(singletonList( - new AlterPartitionResponseData.TopicData(). - setTopicId(topicId). - setPartitions(singletonList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(0). - setErrorCode(NEW_LEADER_ELECTED.code()))))), + new AlterPartitionResponseData.TopicData(). + setTopicId(topicId). + setPartitions(singletonList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(0). + setErrorCode(NEW_LEADER_ELECTED.code()))))), alterPartitionResultTwo.response()); ctx.replay(alterPartitionResultTwo.records()); diff --git a/metadata/src/test/java/org/apache/kafka/image/ClientQuotasImageTest.java b/metadata/src/test/java/org/apache/kafka/image/ClientQuotasImageTest.java index bef1d35efc3ed..4e9215e1635d6 100644 --- a/metadata/src/test/java/org/apache/kafka/image/ClientQuotasImageTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/ClientQuotasImageTest.java @@ -74,13 +74,13 @@ public class ClientQuotasImageTest { // alter quota DELTA1_RECORDS.add(new ApiMessageAndVersion(new ClientQuotaRecord(). setEntity(Collections.singletonList( - new EntityData().setEntityType(ClientQuotaEntity.USER).setEntityName("foo"))). + new EntityData().setEntityType(ClientQuotaEntity.USER).setEntityName("foo"))). setKey(QuotaConfigs.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG). setValue(234.0), CLIENT_QUOTA_RECORD.highestSupportedVersion())); // add quota to entity with existing quota DELTA1_RECORDS.add(new ApiMessageAndVersion(new ClientQuotaRecord(). setEntity(Collections.singletonList( - new EntityData().setEntityType(ClientQuotaEntity.USER).setEntityName("foo"))). + new EntityData().setEntityType(ClientQuotaEntity.USER).setEntityName("foo"))). setKey(QuotaConfigs.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG). setValue(999.0), CLIENT_QUOTA_RECORD.highestSupportedVersion())); diff --git a/metadata/src/test/java/org/apache/kafka/image/ClusterImageTest.java b/metadata/src/test/java/org/apache/kafka/image/ClusterImageTest.java index 730cfec4963ef..f07cfeaaba5a1 100644 --- a/metadata/src/test/java/org/apache/kafka/image/ClusterImageTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/ClusterImageTest.java @@ -193,8 +193,8 @@ public class ClusterImageTest { DELTA2_RECORDS.add(new ApiMessageAndVersion(new RegisterBrokerRecord(). setBrokerId(2).setIsMigratingZkBroker(true).setIncarnationId(Uuid.fromString("Am5Yse7GQxaw0b2alM74bP")). setBrokerEpoch(1002).setEndPoints(new BrokerEndpointCollection( - Collections.singletonList(new BrokerEndpoint().setName("PLAINTEXT").setHost("localhost"). - setPort(9094).setSecurityProtocol((short) 0)).iterator())). + Collections.singletonList(new BrokerEndpoint().setName("PLAINTEXT").setHost("localhost"). + setPort(9094).setSecurityProtocol((short) 0)).iterator())). setFeatures(new BrokerFeatureCollection( Collections.singleton(new BrokerFeature(). setName(MetadataVersion.FEATURE_NAME). diff --git a/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java b/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java index 369ebd36d88e2..64d4fc27f3e75 100644 --- a/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java @@ -83,18 +83,18 @@ static ApiMessageAndVersion metadataVersionRecord(MetadataVersion metadataVersio @Test public void testPremodernVersion() { writeWithExpectedLosses(MetadataVersion.IBP_3_2_IV0, - Collections.singletonList( - "feature flag(s): foo.feature"), - Arrays.asList( - metadataVersionRecord(MetadataVersion.IBP_3_3_IV0), - TEST_RECORDS.get(0), - TEST_RECORDS.get(1), - new ApiMessageAndVersion(new FeatureLevelRecord(). - setName("foo.feature"). - setFeatureLevel((short) 4), (short) 0)), - Arrays.asList( - TEST_RECORDS.get(0), - TEST_RECORDS.get(1))); + Collections.singletonList( + "feature flag(s): foo.feature"), + Arrays.asList( + metadataVersionRecord(MetadataVersion.IBP_3_3_IV0), + TEST_RECORDS.get(0), + TEST_RECORDS.get(1), + new ApiMessageAndVersion(new FeatureLevelRecord(). + setName("foo.feature"). + setFeatureLevel((short) 4), (short) 0)), + Arrays.asList( + TEST_RECORDS.get(0), + TEST_RECORDS.get(1))); } /** @@ -134,31 +134,31 @@ public void testPreControlledShutdownStateVersion() { @Test public void testPreZkMigrationSupportVersion() { writeWithExpectedLosses(MetadataVersion.IBP_3_3_IV3, - Collections.singletonList( - "the isMigratingZkBroker state of one or more brokers"), - Arrays.asList( - metadataVersionRecord(MetadataVersion.IBP_3_4_IV0), - new ApiMessageAndVersion(new RegisterBrokerRecord(). - setBrokerId(123). - setIncarnationId(Uuid.fromString("XgjKo16hRWeWrTui0iR5Nw")). - setBrokerEpoch(456). - setRack(null). - setFenced(false). - setInControlledShutdown(true). - setIsMigratingZkBroker(true), (short) 2), - TEST_RECORDS.get(0), - TEST_RECORDS.get(1)), - Arrays.asList( - metadataVersionRecord(MetadataVersion.IBP_3_3_IV3), - new ApiMessageAndVersion(new RegisterBrokerRecord(). - setBrokerId(123). - setIncarnationId(Uuid.fromString("XgjKo16hRWeWrTui0iR5Nw")). - setBrokerEpoch(456). - setRack(null). - setFenced(false). - setInControlledShutdown(true), (short) 1), - TEST_RECORDS.get(0), - TEST_RECORDS.get(1))); + Collections.singletonList( + "the isMigratingZkBroker state of one or more brokers"), + Arrays.asList( + metadataVersionRecord(MetadataVersion.IBP_3_4_IV0), + new ApiMessageAndVersion(new RegisterBrokerRecord(). + setBrokerId(123). + setIncarnationId(Uuid.fromString("XgjKo16hRWeWrTui0iR5Nw")). + setBrokerEpoch(456). + setRack(null). + setFenced(false). + setInControlledShutdown(true). + setIsMigratingZkBroker(true), (short) 2), + TEST_RECORDS.get(0), + TEST_RECORDS.get(1)), + Arrays.asList( + metadataVersionRecord(MetadataVersion.IBP_3_3_IV3), + new ApiMessageAndVersion(new RegisterBrokerRecord(). + setBrokerId(123). + setIncarnationId(Uuid.fromString("XgjKo16hRWeWrTui0iR5Nw")). + setBrokerEpoch(456). + setRack(null). + setFenced(false). + setInControlledShutdown(true), (short) 1), + TEST_RECORDS.get(0), + TEST_RECORDS.get(1))); } @Test diff --git a/metadata/src/test/java/org/apache/kafka/image/loader/MetadataLoaderTest.java b/metadata/src/test/java/org/apache/kafka/image/loader/MetadataLoaderTest.java index 84f2542522c7d..2ab42cb4778f1 100644 --- a/metadata/src/test/java/org/apache/kafka/image/loader/MetadataLoaderTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/loader/MetadataLoaderTest.java @@ -253,15 +253,15 @@ public void testPublisherCannotBeInstalledMoreThanOnce( if (loadSnapshot) { MockSnapshotReader snapshotReader = new MockSnapshotReader( new MetadataProvenance(200, 100, 4000), - Collections.singletonList( - Batch.control( - 200, - 100, - 4000, - 10, - Collections.singletonList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) - ) + Collections.singletonList( + Batch.control( + 200, + 100, + 4000, + 10, + Collections.singletonList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) ) + ) ); loader.handleLoadSnapshot(snapshotReader); TestUtils.retryOnExceptionWithTimeout(30_000, () -> { @@ -305,11 +305,11 @@ public void testRemovePublisher() throws Exception { loader.installPublishers(publishers.subList(0, 2)).get(); loader.removeAndClosePublisher(publishers.get(1)).get(); MockSnapshotReader snapshotReader = MockSnapshotReader.fromRecordLists( - new MetadataProvenance(100, 50, 2000), - Collections.singletonList(Collections.singletonList(new ApiMessageAndVersion( - new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(IBP_3_3_IV2.featureLevel()), (short) 0)))); + new MetadataProvenance(100, 50, 2000), + Collections.singletonList(Collections.singletonList(new ApiMessageAndVersion( + new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(IBP_3_3_IV2.featureLevel()), (short) 0)))); assertFalse(snapshotReader.closed); loader.handleLoadSnapshot(snapshotReader); loader.waitForAllEventsToBeHandled(); @@ -364,15 +364,15 @@ private void loadEmptySnapshot( ) throws Exception { MockSnapshotReader snapshotReader = new MockSnapshotReader( new MetadataProvenance(offset, 100, 4000), - Collections.singletonList( - Batch.control( - 200, - 100, - 4000, - 10, - Collections.singletonList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) - ) + Collections.singletonList( + Batch.control( + 200, + 100, + 4000, + 10, + Collections.singletonList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) ) + ) ); if (loader.time() instanceof MockTime) { snapshotReader.setTime((MockTime) loader.time()); @@ -462,16 +462,16 @@ public void testLoadEmptyBatch() throws Exception { loadTestSnapshot(loader, 200); publishers.get(0).firstPublish.get(10, TimeUnit.SECONDS); MockBatchReader batchReader = new MockBatchReader( - 300, - Collections.singletonList( - Batch.control( - 300, - 100, - 4000, - 10, - Collections.singletonList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) - ) + 300, + Collections.singletonList( + Batch.control( + 300, + 100, + 4000, + 10, + Collections.singletonList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) ) + ) ).setTime(time); loader.handleCommit(batchReader); loader.waitForAllEventsToBeHandled(); @@ -508,12 +508,12 @@ public void testLastAppliedOffset() throws Exception { loader.installPublishers(publishers).get(); loader.handleLoadSnapshot(MockSnapshotReader.fromRecordLists( new MetadataProvenance(200, 100, 4000), asList( - Collections.singletonList(new ApiMessageAndVersion(new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(IBP_3_3_IV1.featureLevel()), (short) 0)), - Collections.singletonList(new ApiMessageAndVersion(new TopicRecord(). - setName("foo"). - setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)) + Collections.singletonList(new ApiMessageAndVersion(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(IBP_3_3_IV1.featureLevel()), (short) 0)), + Collections.singletonList(new ApiMessageAndVersion(new TopicRecord(). + setName("foo"). + setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)) ))); for (MockPublisher publisher : publishers) { publisher.firstPublish.get(1, TimeUnit.MINUTES); @@ -521,9 +521,9 @@ public void testLastAppliedOffset() throws Exception { loader.waitForAllEventsToBeHandled(); assertEquals(200L, loader.lastAppliedOffset()); loader.handleCommit(new MockBatchReader(201, Collections.singletonList( - MockBatchReader.newBatch(201, 100, Collections.singletonList( - new ApiMessageAndVersion(new RemoveTopicRecord(). - setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)))))); + MockBatchReader.newBatch(201, 100, Collections.singletonList( + new ApiMessageAndVersion(new RemoveTopicRecord(). + setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)))))); loader.waitForAllEventsToBeHandled(); assertEquals(201L, loader.lastAppliedOffset()); } @@ -638,10 +638,10 @@ public void testReloadSnapshot() throws Exception { assertTrue(publishers.get(0).latestImage.topics().topicsByName().containsKey("bar")); loader.handleCommit(new MockBatchReader(500, Collections.singletonList( - MockBatchReader.newBatch(500, 100, Collections.singletonList( - new ApiMessageAndVersion(new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(IBP_3_5_IV0.featureLevel()), (short) 0)))))); + MockBatchReader.newBatch(500, 100, Collections.singletonList( + new ApiMessageAndVersion(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(IBP_3_5_IV0.featureLevel()), (short) 0)))))); loader.waitForAllEventsToBeHandled(); assertEquals(IBP_3_5_IV0.featureLevel(), loader.metrics().currentMetadataVersion().featureLevel()); @@ -691,7 +691,7 @@ public void testPublishTransaction(boolean abortTxn) throws Exception { if (abortTxn) { loader.handleCommit( MockBatchReader.newSingleBatchReader(500, 100, Collections.singletonList( - new ApiMessageAndVersion(new AbortTransactionRecord(), (short) 0) + new ApiMessageAndVersion(new AbortTransactionRecord(), (short) 0) ))); loader.waitForAllEventsToBeHandled(); @@ -700,7 +700,7 @@ public void testPublishTransaction(boolean abortTxn) throws Exception { } else { loader.handleCommit( MockBatchReader.newSingleBatchReader(500, 100, Collections.singletonList( - new ApiMessageAndVersion(new EndTransactionRecord(), (short) 0) + new ApiMessageAndVersion(new EndTransactionRecord(), (short) 0) ))); loader.waitForAllEventsToBeHandled(); @@ -769,10 +769,10 @@ public void testSnapshotDuringTransaction() throws Exception { // loading a snapshot discards any in-flight transaction loader.handleLoadSnapshot(MockSnapshotReader.fromRecordLists( new MetadataProvenance(600, 101, 4000), Collections.singletonList( - Collections.singletonList(new ApiMessageAndVersion(new TopicRecord(). - setName("foo"). - setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)) - ))); + Collections.singletonList(new ApiMessageAndVersion(new TopicRecord(). + setName("foo"). + setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)) + ))); loader.waitForAllEventsToBeHandled(); assertEquals("Uum7sfhHQP-obSvfywmNUA", publisher.latestImage.topics().getTopic("foo").id().toString()); diff --git a/metadata/src/test/java/org/apache/kafka/metadata/KafkaConfigSchemaTest.java b/metadata/src/test/java/org/apache/kafka/metadata/KafkaConfigSchemaTest.java index 2574747f0a1ed..49597fff9ae7b 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/KafkaConfigSchemaTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/KafkaConfigSchemaTest.java @@ -26,7 +26,6 @@ import org.junit.jupiter.api.Timeout; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -62,9 +61,9 @@ public class KafkaConfigSchemaTest { public static final Map> SYNONYMS = new HashMap<>(); static { - SYNONYMS.put("abc", Collections.singletonList(new ConfigSynonym("foo.bar"))); - SYNONYMS.put("def", Collections.singletonList(new ConfigSynonym("quux", HOURS_TO_MILLISECONDS))); - SYNONYMS.put("ghi", Collections.singletonList(new ConfigSynonym("ghi"))); + SYNONYMS.put("abc", Arrays.asList(new ConfigSynonym("foo.bar"))); + SYNONYMS.put("def", Arrays.asList(new ConfigSynonym("quux", HOURS_TO_MILLISECONDS))); + SYNONYMS.put("ghi", Arrays.asList(new ConfigSynonym("ghi"))); SYNONYMS.put("xyz", Arrays.asList(new ConfigSynonym("quuux"), new ConfigSynonym("quuux2"))); } diff --git a/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java b/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java index 608665e751110..3b0ef9c4375ab 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java @@ -373,12 +373,12 @@ public static RegisterControllerRecord createTestControllerRegistration( ).iterator() )). setFeatures(new RegisterControllerRecord.ControllerFeatureCollection( - Collections.singletonList( - new RegisterControllerRecord.ControllerFeature(). - setName(MetadataVersion.FEATURE_NAME). - setMinSupportedVersion(MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel()). - setMaxSupportedVersion(MetadataVersion.IBP_3_6_IV1.featureLevel()) - ).iterator() + Collections.singletonList( + new RegisterControllerRecord.ControllerFeature(). + setName(MetadataVersion.FEATURE_NAME). + setMinSupportedVersion(MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel()). + setMaxSupportedVersion(MetadataVersion.IBP_3_6_IV1.featureLevel()) + ).iterator() )); } } diff --git a/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAclRecordIteratorTest.java b/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAclRecordIteratorTest.java index 51ab544b18913..f5b17018dd4ef 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAclRecordIteratorTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAclRecordIteratorTest.java @@ -49,7 +49,7 @@ public void testIteration() { iterator.next()); assertTrue(iterator.hasNext()); assertEquals(Collections.singletonList( - new ApiMessageAndVersion(TEST_ACLS.get(4).toRecord(), (short) 0)), + new ApiMessageAndVersion(TEST_ACLS.get(4).toRecord(), (short) 0)), iterator.next()); assertFalse(iterator.hasNext()); } diff --git a/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadataTest.java b/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadataTest.java index 731a944639bf0..f001115a2f8c3 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadataTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadataTest.java @@ -78,8 +78,8 @@ public void testCopyWithOnlyVersion() { final static List RECORDS_WITH_OLD_METADATA_VERSION = unmodifiableList(Collections.singletonList( new ApiMessageAndVersion(new FeatureLevelRecord(). - setName(FEATURE_NAME). - setFeatureLevel(IBP_3_0_IV1.featureLevel()), (short) 0))); + setName(FEATURE_NAME). + setFeatureLevel(IBP_3_0_IV1.featureLevel()), (short) 0))); @Test public void testFromRecordsListWithOldMetadataVersion() { diff --git a/metadata/src/test/java/org/apache/kafka/metadata/placement/TopicAssignmentTest.java b/metadata/src/test/java/org/apache/kafka/metadata/placement/TopicAssignmentTest.java index 289739b53292f..1e28fcbeae7eb 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/placement/TopicAssignmentTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/placement/TopicAssignmentTest.java @@ -45,18 +45,18 @@ public void testTopicAssignmentReplicas() { public void testConsistentEqualsAndHashCode() { List topicAssignments = Arrays.asList( new TopicAssignment( - Collections.singletonList( - partitionAssignment( - Arrays.asList(0, 1, 2) - ) + Collections.singletonList( + partitionAssignment( + Arrays.asList(0, 1, 2) ) + ) ), new TopicAssignment( - Collections.singletonList( - partitionAssignment( - Arrays.asList(1, 2, 0) - ) + Collections.singletonList( + partitionAssignment( + Arrays.asList(1, 2, 0) ) + ) ) ); @@ -83,7 +83,7 @@ public void testToString() { Uuid.fromString("jUqCchHtTHqMxeVv4dw1RA") ); List partitionAssignments = Collections.singletonList( - new PartitionAssignment(replicas, directories::get) + new PartitionAssignment(replicas, directories::get) ); TopicAssignment topicAssignment = new TopicAssignment(partitionAssignments); assertEquals("TopicAssignment(assignments=[PartitionAssignment(replicas=[0, 1, 2], " + From 074f00f18b428ef34df61f7bac0fa8a24c0eeb47 Mon Sep 17 00:00:00 2001 From: Sanskar Jhajharia Date: Tue, 4 Jun 2024 18:10:47 +0530 Subject: [PATCH 3/5] Cleanup some extra intedentations --- .../java/org/apache/kafka/controller/QuorumControllerTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java index 9dec8adddeffb..e2f9d39acf272 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java @@ -812,7 +812,7 @@ public void testSnapshotSaveAndLoad() throws Throwable { setName(MetadataVersion.FEATURE_NAME). setMinSupportedVersion(MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel()). setMaxSupportedVersion(MetadataVersion.IBP_3_7_IV0.featureLevel()) - ).iterator() + ).iterator() ))).get(); } for (int i = 0; i < numBrokers; i++) { From 881410c217f91e0d650c0bb9a52dc12920fa8783 Mon Sep 17 00:00:00 2001 From: Sanskar Jhajharia Date: Tue, 4 Jun 2024 20:31:31 +0530 Subject: [PATCH 4/5] Update the toString() method --- .../apache/kafka/metadata/FinalizedControllerFeatures.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java b/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java index 648efdfcc242c..8e240d9f8c704 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java @@ -70,9 +70,9 @@ public boolean equals(Object o) { @Override public String toString() { - return "{" + + return "FinalizedControllerFeatures(" + "featureMap=" + featureMap.toString() + ", epoch=" + epoch + - "}"; + ")"; } } From 3bd267644233f60d34c3afe0b1314fd9e25db854 Mon Sep 17 00:00:00 2001 From: Sanskar Jhajharia Date: Tue, 4 Jun 2024 21:12:12 +0530 Subject: [PATCH 5/5] Address review comments --- .../controller/QuorumControllerTestEnv.java | 1 - .../ReplicationControlManagerTest.java | 2 +- .../kafka/image/ImageDowngradeTest.java | 87 ++++++++++--------- .../image/loader/MetadataLoaderTest.java | 68 +++++++-------- .../image/writer/RaftSnapshotWriterTest.java | 3 +- 5 files changed, 82 insertions(+), 79 deletions(-) diff --git a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTestEnv.java b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTestEnv.java index 0339a30ef2770..1661d0918c4b3 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTestEnv.java +++ b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTestEnv.java @@ -49,7 +49,6 @@ public static class Builder { private Consumer controllerBuilderInitializer = __ -> { }; private OptionalLong sessionTimeoutMillis = OptionalLong.empty(); private OptionalLong leaderImbalanceCheckIntervalNs = OptionalLong.empty(); - private final boolean eligibleLeaderReplicasEnabled = false; private BootstrapMetadata bootstrapMetadata = BootstrapMetadata. fromVersion(MetadataVersion.latestTesting(), "test-provided version"); diff --git a/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java index 46bac197e5721..a598993bfed2e 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java @@ -2664,7 +2664,7 @@ public void testKRaftClusterDescriber() { ctx.registerBrokersWithDirs( 0, Collections.emptyList(), 1, Collections.emptyList(), - 2, singletonList(Uuid.fromString("ozwqsVMFSNiYQUPSJA3j0w")), + 2, asList(Uuid.fromString("ozwqsVMFSNiYQUPSJA3j0w")), 3, asList(Uuid.fromString("SSDgCZ4BTyec5QojGT65qg"), Uuid.fromString("K8KwMrviRcOUvgI8FPOJWg")), 4, Collections.emptyList() ); diff --git a/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java b/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java index 64d4fc27f3e75..4da792ae95735 100644 --- a/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java @@ -94,7 +94,8 @@ public void testPremodernVersion() { setFeatureLevel((short) 4), (short) 0)), Arrays.asList( TEST_RECORDS.get(0), - TEST_RECORDS.get(1))); + TEST_RECORDS.get(1)) + ); } /** @@ -103,29 +104,30 @@ public void testPremodernVersion() { @Test public void testPreControlledShutdownStateVersion() { writeWithExpectedLosses(MetadataVersion.IBP_3_3_IV2, - Collections.singletonList( - "the inControlledShutdown state of one or more brokers"), - Arrays.asList( - metadataVersionRecord(MetadataVersion.IBP_3_3_IV3), - new ApiMessageAndVersion(new RegisterBrokerRecord(). - setBrokerId(123). - setIncarnationId(Uuid.fromString("XgjKo16hRWeWrTui0iR5Nw")). - setBrokerEpoch(456). - setRack(null). - setFenced(false). - setInControlledShutdown(true), (short) 1), - TEST_RECORDS.get(0), - TEST_RECORDS.get(1)), - Arrays.asList( - metadataVersionRecord(MetadataVersion.IBP_3_3_IV2), - new ApiMessageAndVersion(new RegisterBrokerRecord(). - setBrokerId(123). - setIncarnationId(Uuid.fromString("XgjKo16hRWeWrTui0iR5Nw")). - setBrokerEpoch(456). - setRack(null). - setFenced(false), (short) 0), - TEST_RECORDS.get(0), - TEST_RECORDS.get(1))); + Collections.singletonList( + "the inControlledShutdown state of one or more brokers"), + Arrays.asList( + metadataVersionRecord(MetadataVersion.IBP_3_3_IV3), + new ApiMessageAndVersion(new RegisterBrokerRecord(). + setBrokerId(123). + setIncarnationId(Uuid.fromString("XgjKo16hRWeWrTui0iR5Nw")). + setBrokerEpoch(456). + setRack(null). + setFenced(false). + setInControlledShutdown(true), (short) 1), + TEST_RECORDS.get(0), + TEST_RECORDS.get(1)), + Arrays.asList( + metadataVersionRecord(MetadataVersion.IBP_3_3_IV2), + new ApiMessageAndVersion(new RegisterBrokerRecord(). + setBrokerId(123). + setIncarnationId(Uuid.fromString("XgjKo16hRWeWrTui0iR5Nw")). + setBrokerEpoch(456). + setRack(null). + setFenced(false), (short) 0), + TEST_RECORDS.get(0), + TEST_RECORDS.get(1)) + ); } /** @@ -158,7 +160,8 @@ public void testPreZkMigrationSupportVersion() { setFenced(false). setInControlledShutdown(true), (short) 1), TEST_RECORDS.get(0), - TEST_RECORDS.get(1))); + TEST_RECORDS.get(1)) + ); } @Test @@ -167,22 +170,24 @@ void testDirectoryAssignmentState() { MetadataVersion inputMetadataVersion = outputMetadataVersion; PartitionRecord testPartitionRecord = (PartitionRecord) TEST_RECORDS.get(1).message(); writeWithExpectedLosses(outputMetadataVersion, - Collections.singletonList("the directory assignment state of one or more replicas"), - Arrays.asList( - metadataVersionRecord(inputMetadataVersion), - TEST_RECORDS.get(0), - new ApiMessageAndVersion( - testPartitionRecord.duplicate().setDirectories(Arrays.asList( - Uuid.fromString("c7QfSi6xSIGQVh3Qd5RJxA"), - Uuid.fromString("rWaCHejCRRiptDMvW5Xw0g"))), - (short) 2)), - Arrays.asList( - metadataVersionRecord(outputMetadataVersion), - new ApiMessageAndVersion(new ZkMigrationStateRecord(), (short) 0), - TEST_RECORDS.get(0), - new ApiMessageAndVersion( - testPartitionRecord.duplicate().setDirectories(Collections.emptyList()), - (short) 0))); + Collections.singletonList( + "the directory assignment state of one or more replicas"), + Arrays.asList( + metadataVersionRecord(inputMetadataVersion), + TEST_RECORDS.get(0), + new ApiMessageAndVersion( + testPartitionRecord.duplicate().setDirectories(Arrays.asList( + Uuid.fromString("c7QfSi6xSIGQVh3Qd5RJxA"), + Uuid.fromString("rWaCHejCRRiptDMvW5Xw0g"))), + (short) 2)), + Arrays.asList( + metadataVersionRecord(outputMetadataVersion), + new ApiMessageAndVersion(new ZkMigrationStateRecord(), (short) 0), + TEST_RECORDS.get(0), + new ApiMessageAndVersion( + testPartitionRecord.duplicate().setDirectories(Collections.emptyList()), + (short) 0)) + ); } private static void writeWithExpectedLosses( diff --git a/metadata/src/test/java/org/apache/kafka/image/loader/MetadataLoaderTest.java b/metadata/src/test/java/org/apache/kafka/image/loader/MetadataLoaderTest.java index 2ab42cb4778f1..550d32b4e2654 100644 --- a/metadata/src/test/java/org/apache/kafka/image/loader/MetadataLoaderTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/loader/MetadataLoaderTest.java @@ -52,7 +52,6 @@ import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.OptionalLong; @@ -64,6 +63,7 @@ import java.util.stream.Collectors; import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; import static org.apache.kafka.server.common.MetadataVersion.IBP_3_3_IV1; import static org.apache.kafka.server.common.MetadataVersion.IBP_3_3_IV2; import static org.apache.kafka.server.common.MetadataVersion.IBP_3_5_IV0; @@ -249,17 +249,17 @@ public void testPublisherCannotBeInstalledMoreThanOnce( setFaultHandler(faultHandler). setHighWaterMarkAccessor(() -> OptionalLong.of(0L)). build()) { - loader.installPublishers(Collections.singletonList(publisher)).get(); + loader.installPublishers(singletonList(publisher)).get(); if (loadSnapshot) { MockSnapshotReader snapshotReader = new MockSnapshotReader( new MetadataProvenance(200, 100, 4000), - Collections.singletonList( + singletonList( Batch.control( 200, 100, 4000, 10, - Collections.singletonList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) + singletonList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) ) ) ); @@ -277,13 +277,13 @@ public void testPublisherCannotBeInstalledMoreThanOnce( assertEquals("testPublisherCannotBeInstalledMoreThanOnce: Attempted to install " + "publisher MockPublisher, which is already installed.", assertThrows(ExecutionException.class, - () -> loader.installPublishers(Collections.singletonList(publisher)).get()). + () -> loader.installPublishers(singletonList(publisher)).get()). getCause().getMessage()); } else { assertEquals("testPublisherCannotBeInstalledMoreThanOnce: Attempted to install " + "a new publisher named MockPublisher, but there is already a publisher with that name.", assertThrows(ExecutionException.class, - () -> loader.installPublishers(Collections.singletonList(new MockPublisher())).get()). + () -> loader.installPublishers(singletonList(new MockPublisher())).get()). getCause().getMessage()); } } @@ -306,7 +306,7 @@ public void testRemovePublisher() throws Exception { loader.removeAndClosePublisher(publishers.get(1)).get(); MockSnapshotReader snapshotReader = MockSnapshotReader.fromRecordLists( new MetadataProvenance(100, 50, 2000), - Collections.singletonList(Collections.singletonList(new ApiMessageAndVersion( + singletonList(singletonList(new ApiMessageAndVersion( new FeatureLevelRecord(). setName(MetadataVersion.FEATURE_NAME). setFeatureLevel(IBP_3_3_IV2.featureLevel()), (short) 0)))); @@ -334,7 +334,7 @@ public void testRemovePublisher() throws Exception { public void testLoadEmptySnapshot() throws Exception { MockFaultHandler faultHandler = new MockFaultHandler("testLoadEmptySnapshot"); MockTime time = new MockTime(); - List publishers = Collections.singletonList(new MockPublisher()); + List publishers = singletonList(new MockPublisher()); try (MetadataLoader loader = new MetadataLoader.Builder(). setFaultHandler(faultHandler). setTime(time). @@ -364,13 +364,13 @@ private void loadEmptySnapshot( ) throws Exception { MockSnapshotReader snapshotReader = new MockSnapshotReader( new MetadataProvenance(offset, 100, 4000), - Collections.singletonList( + singletonList( Batch.control( 200, 100, 4000, 10, - Collections.singletonList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) + singletonList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) ) ) ); @@ -393,7 +393,7 @@ static MockBatchReader newSingleBatchReader( List records ) { return new MockBatchReader(batchBaseOffset, - Collections.singletonList(newBatch(batchBaseOffset, epoch, records))); + singletonList(newBatch(batchBaseOffset, epoch, records))); } static Batch newBatch( @@ -452,7 +452,7 @@ public Batch next() { public void testLoadEmptyBatch() throws Exception { MockFaultHandler faultHandler = new MockFaultHandler("testLoadEmptyBatch"); MockTime time = new MockTime(); - List publishers = Collections.singletonList(new MockPublisher()); + List publishers = singletonList(new MockPublisher()); try (MetadataLoader loader = new MetadataLoader.Builder(). setFaultHandler(faultHandler). setTime(time). @@ -463,13 +463,13 @@ public void testLoadEmptyBatch() throws Exception { publishers.get(0).firstPublish.get(10, TimeUnit.SECONDS); MockBatchReader batchReader = new MockBatchReader( 300, - Collections.singletonList( + singletonList( Batch.control( 300, 100, 4000, 10, - Collections.singletonList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) + singletonList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) ) ) ).setTime(time); @@ -508,10 +508,10 @@ public void testLastAppliedOffset() throws Exception { loader.installPublishers(publishers).get(); loader.handleLoadSnapshot(MockSnapshotReader.fromRecordLists( new MetadataProvenance(200, 100, 4000), asList( - Collections.singletonList(new ApiMessageAndVersion(new FeatureLevelRecord(). + singletonList(new ApiMessageAndVersion(new FeatureLevelRecord(). setName(MetadataVersion.FEATURE_NAME). setFeatureLevel(IBP_3_3_IV1.featureLevel()), (short) 0)), - Collections.singletonList(new ApiMessageAndVersion(new TopicRecord(). + singletonList(new ApiMessageAndVersion(new TopicRecord(). setName("foo"). setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)) ))); @@ -520,8 +520,8 @@ public void testLastAppliedOffset() throws Exception { } loader.waitForAllEventsToBeHandled(); assertEquals(200L, loader.lastAppliedOffset()); - loader.handleCommit(new MockBatchReader(201, Collections.singletonList( - MockBatchReader.newBatch(201, 100, Collections.singletonList( + loader.handleCommit(new MockBatchReader(201, singletonList( + MockBatchReader.newBatch(201, 100, singletonList( new ApiMessageAndVersion(new RemoveTopicRecord(). setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)))))); loader.waitForAllEventsToBeHandled(); @@ -578,10 +578,10 @@ private void loadTestSnapshot( ) throws Exception { loader.handleLoadSnapshot(MockSnapshotReader.fromRecordLists( new MetadataProvenance(offset, 100, 4000), asList( - Collections.singletonList(new ApiMessageAndVersion(new FeatureLevelRecord(). + singletonList(new ApiMessageAndVersion(new FeatureLevelRecord(). setName(MetadataVersion.FEATURE_NAME). setFeatureLevel(IBP_3_3_IV1.featureLevel()), (short) 0)), - Collections.singletonList(new ApiMessageAndVersion(new TopicRecord(). + singletonList(new ApiMessageAndVersion(new TopicRecord(). setName("foo"). setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)) ))); @@ -594,10 +594,10 @@ private void loadTestSnapshot2( ) throws Exception { loader.handleLoadSnapshot(MockSnapshotReader.fromRecordLists( new MetadataProvenance(offset, 100, 4000), asList( - Collections.singletonList(new ApiMessageAndVersion(new FeatureLevelRecord(). + singletonList(new ApiMessageAndVersion(new FeatureLevelRecord(). setName(MetadataVersion.FEATURE_NAME). setFeatureLevel(IBP_3_3_IV2.featureLevel()), (short) 0)), - Collections.singletonList(new ApiMessageAndVersion(new TopicRecord(). + singletonList(new ApiMessageAndVersion(new TopicRecord(). setName("bar"). setTopicId(Uuid.fromString("VcL2Mw-cT4aL6XV9VujzoQ")), (short) 0)) ))); @@ -610,7 +610,7 @@ private void loadTestSnapshot2( @Test public void testReloadSnapshot() throws Exception { MockFaultHandler faultHandler = new MockFaultHandler("testLastAppliedOffset"); - List publishers = Collections.singletonList(new MockPublisher("a")); + List publishers = singletonList(new MockPublisher("a")); try (MetadataLoader loader = new MetadataLoader.Builder(). setFaultHandler(faultHandler). setHighWaterMarkAccessor(() -> OptionalLong.of(0)). @@ -637,8 +637,8 @@ public void testReloadSnapshot() throws Exception { assertFalse(publishers.get(0).latestImage.topics().topicsByName().containsKey("foo")); assertTrue(publishers.get(0).latestImage.topics().topicsByName().containsKey("bar")); - loader.handleCommit(new MockBatchReader(500, Collections.singletonList( - MockBatchReader.newBatch(500, 100, Collections.singletonList( + loader.handleCommit(new MockBatchReader(500, singletonList( + MockBatchReader.newBatch(500, 100, singletonList( new ApiMessageAndVersion(new FeatureLevelRecord(). setName(MetadataVersion.FEATURE_NAME). setFeatureLevel(IBP_3_5_IV0.featureLevel()), (short) 0)))))); @@ -654,7 +654,7 @@ public void testReloadSnapshot() throws Exception { public void testPublishTransaction(boolean abortTxn) throws Exception { MockFaultHandler faultHandler = new MockFaultHandler("testTransactions"); MockPublisher publisher = new MockPublisher("testTransactions"); - List publishers = Collections.singletonList(publisher); + List publishers = singletonList(publisher); try (MetadataLoader loader = new MetadataLoader.Builder(). setFaultHandler(faultHandler). setHighWaterMarkAccessor(() -> OptionalLong.of(0)). @@ -690,7 +690,7 @@ public void testPublishTransaction(boolean abortTxn) throws Exception { if (abortTxn) { loader.handleCommit( - MockBatchReader.newSingleBatchReader(500, 100, Collections.singletonList( + MockBatchReader.newSingleBatchReader(500, 100, singletonList( new ApiMessageAndVersion(new AbortTransactionRecord(), (short) 0) ))); loader.waitForAllEventsToBeHandled(); @@ -699,7 +699,7 @@ public void testPublishTransaction(boolean abortTxn) throws Exception { "Topic should not be visible since the transaction was aborted"); } else { loader.handleCommit( - MockBatchReader.newSingleBatchReader(500, 100, Collections.singletonList( + MockBatchReader.newSingleBatchReader(500, 100, singletonList( new ApiMessageAndVersion(new EndTransactionRecord(), (short) 0) ))); loader.waitForAllEventsToBeHandled(); @@ -715,7 +715,7 @@ public void testPublishTransaction(boolean abortTxn) throws Exception { public void testPublishTransactionWithinBatch() throws Exception { MockFaultHandler faultHandler = new MockFaultHandler("testPublishTransactionWithinBatch"); MockPublisher publisher = new MockPublisher("testPublishTransactionWithinBatch"); - List publishers = Collections.singletonList(publisher); + List publishers = singletonList(publisher); try (MetadataLoader loader = new MetadataLoader.Builder(). setFaultHandler(faultHandler). setHighWaterMarkAccessor(() -> OptionalLong.of(0)). @@ -746,7 +746,7 @@ public void testPublishTransactionWithinBatch() throws Exception { public void testSnapshotDuringTransaction() throws Exception { MockFaultHandler faultHandler = new MockFaultHandler("testSnapshotDuringTransaction"); MockPublisher publisher = new MockPublisher("testSnapshotDuringTransaction"); - List publishers = Collections.singletonList(publisher); + List publishers = singletonList(publisher); try (MetadataLoader loader = new MetadataLoader.Builder(). setFaultHandler(faultHandler). setHighWaterMarkAccessor(() -> OptionalLong.of(0)). @@ -768,8 +768,8 @@ public void testSnapshotDuringTransaction() throws Exception { // loading a snapshot discards any in-flight transaction loader.handleLoadSnapshot(MockSnapshotReader.fromRecordLists( - new MetadataProvenance(600, 101, 4000), Collections.singletonList( - Collections.singletonList(new ApiMessageAndVersion(new TopicRecord(). + new MetadataProvenance(600, 101, 4000), singletonList( + singletonList(new ApiMessageAndVersion(new TopicRecord(). setName("foo"). setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)) ))); @@ -804,9 +804,9 @@ public void onMetadataUpdate(MetadataDelta delta, MetadataImage newImage, Loader setFaultHandler(faultHandler). setHighWaterMarkAccessor(() -> OptionalLong.of(1)). build()) { - loader.installPublishers(Collections.singletonList(capturingPublisher)).get(); + loader.installPublishers(singletonList(capturingPublisher)).get(); loader.handleCommit( - MockBatchReader.newSingleBatchReader(0, 1, Collections.singletonList( + MockBatchReader.newSingleBatchReader(0, 1, singletonList( // Any record will work here new ApiMessageAndVersion(new ConfigRecord() .setResourceType(ConfigResource.Type.BROKER.id()) diff --git a/metadata/src/test/java/org/apache/kafka/image/writer/RaftSnapshotWriterTest.java b/metadata/src/test/java/org/apache/kafka/image/writer/RaftSnapshotWriterTest.java index a7f7578fe590b..0137aeb077224 100644 --- a/metadata/src/test/java/org/apache/kafka/image/writer/RaftSnapshotWriterTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/writer/RaftSnapshotWriterTest.java @@ -22,7 +22,6 @@ import org.junit.jupiter.api.Timeout; import java.util.Arrays; -import java.util.Collections; import static java.util.Collections.emptyList; import static org.apache.kafka.metadata.RecordTestUtils.testRecord; @@ -45,7 +44,7 @@ public void testFreezeAndClose() { assertTrue(snapshotWriter.isClosed()); assertEquals(Arrays.asList( Arrays.asList(testRecord(0), testRecord(1)), - Collections.singletonList(testRecord(2))), snapshotWriter.batches()); + Arrays.asList(testRecord(2))), snapshotWriter.batches()); } @Test