{metricPoints.map((row, index) => {
diff --git a/km-console/packages/layout-clusters-fe/src/pages/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/index.tsx
index 063e2db0b..c97da56a5 100644
--- a/km-console/packages/layout-clusters-fe/src/pages/index.tsx
+++ b/km-console/packages/layout-clusters-fe/src/pages/index.tsx
@@ -61,7 +61,6 @@ const LayoutContainer = () => {
// 路由前置守卫
const routeBeforeEach = useCallback(
(path: string, permissionNode: string | number) => {
- getLicenseInfo((msg) => licenseEventBus.emit('licenseError', msg));
// 判断进入页面的前置条件是否满足,如果不满足,则展示加载状态
const isClusterNotExist = path.includes(':clusterId') && !global.clusterInfo;
const isNotLoadedPermissions = typeof global.hasPermission !== 'function';
diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/config/KSConfigUtils.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/config/KSConfigUtils.java
index 55cdf051b..db77720b4 100644
--- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/config/KSConfigUtils.java
+++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/config/KSConfigUtils.java
@@ -15,6 +15,9 @@ public class KSConfigUtils {
private KSConfigUtils() {
}
+ @Value("${cluster-balance.ignored-topics.time-second:300}")
+ private Integer clusterBalanceIgnoredTopicsTimeSecond;
+
@Value(value = "${request.api-call.timeout-unit-ms:8000}")
private Integer apiCallTimeoutUnitMs;
diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/health/state/impl/HealthStateServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/health/state/impl/HealthStateServiceImpl.java
index b2d34f2af..dc3327d9b 100644
--- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/health/state/impl/HealthStateServiceImpl.java
+++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/health/state/impl/HealthStateServiceImpl.java
@@ -143,7 +143,7 @@ public BrokerMetrics calBrokerHealthMetrics(Long clusterPhyId, Integer brokerId)
// DB中不存在,则默认是存活的
metrics.getMetrics().put(BROKER_METRIC_HEALTH_STATE, (float)HealthStateEnum.GOOD.getDimension());
} else if (!broker.alive()) {
- metrics.getMetrics().put(BROKER_METRIC_HEALTH_STATE, (float)HealthStateEnum.DEAD.getDimension());
+ metrics.getMetrics().put(BROKER_METRIC_HEALTH_STATE, (float) HealthStateEnum.DEAD.getDimension());
} else {
metrics.getMetrics().put(BROKER_METRIC_HEALTH_STATE, (float)this.calHealthState(aggResultList).getDimension());
}
diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/OpPartitionServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/OpPartitionServiceImpl.java
index 838ac5949..8fafdec53 100644
--- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/OpPartitionServiceImpl.java
+++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/OpPartitionServiceImpl.java
@@ -19,6 +19,7 @@
import org.apache.kafka.clients.admin.ElectLeadersResult;
import org.apache.kafka.common.ElectionType;
import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.ElectionNotNeededException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import scala.jdk.javaapi.CollectionConverters;
@@ -108,12 +109,17 @@ private Result
preferredReplicaElectionByKafkaClient(VersionItemParam item
return Result.buildSuc();
} catch (Exception e) {
+ if(e.getCause() instanceof ElectionNotNeededException) {
+ // ignore ElectionNotNeededException
+ return Result.buildSuc();
+ }
+
LOGGER.error(
"method=preferredReplicaElectionByKafkaClient||clusterPhyId={}||errMsg=exception",
partitionParam.getClusterPhyId(), e
);
- return Result.buildFromRSAndMsg(ResultStatus.ZK_OPERATE_FAILED, e.getMessage());
+ return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage());
}
}
}
diff --git a/km-enterprise/km-rebalance/pom.xml b/km-enterprise/km-rebalance/pom.xml
new file mode 100644
index 000000000..fcc1167f4
--- /dev/null
+++ b/km-enterprise/km-rebalance/pom.xml
@@ -0,0 +1,64 @@
+
+
+
+ km
+ com.xiaojukeji.kafka
+ ${revision}
+ ../../pom.xml
+
+ 4.0.0
+
+ km-rebalance
+
+
+
+
+ org.apache.kafka
+ kafka-clients
+
+
+ org.elasticsearch.client
+ elasticsearch-rest-client
+
+
+ com.fasterxml.jackson.core
+ jackson-databind
+
+
+ com.google.guava
+ guava
+
+
+ org.slf4j
+ slf4j-api
+
+
+ commons-io
+ commons-io
+
+
+ org.apache.commons
+ commons-lang3
+
+
+ net.sf.jopt-simple
+ jopt-simple
+
+
+
+
+
+ com.xiaojukeji.kafka
+ km-common
+ ${project.parent.version}
+
+
+ com.xiaojukeji.kafka
+ km-core
+ ${project.parent.version}
+
+
+
+
\ No newline at end of file
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/KafkaRebalanceMain.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/KafkaRebalanceMain.java
new file mode 100644
index 000000000..4990f2a9d
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/KafkaRebalanceMain.java
@@ -0,0 +1,143 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm;
+
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.ExecutionRebalance;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common.BalanceParameter;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common.HostEnv;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common.OptimizerResult;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.utils.CommandLineUtils;
+import joptsimple.OptionParser;
+import joptsimple.OptionSet;
+import org.apache.commons.io.FileUtils;
+import org.apache.kafka.clients.CommonClientConfigs;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Properties;
+
+public class KafkaRebalanceMain {
+
+ public void run(OptionSet options) {
+ try {
+ BalanceParameter balanceParameter = new BalanceParameter();
+ if (options.has("excluded-topics")) {
+ balanceParameter.setExcludedTopics(options.valueOf("excluded-topics").toString());
+ }
+ if (options.has("offline-brokers")) {
+ balanceParameter.setOfflineBrokers(options.valueOf("offline-brokers").toString());
+ }
+ if (options.has("disk-threshold")) {
+ Double diskThreshold = (Double) options.valueOf("disk-threshold");
+ balanceParameter.setDiskThreshold(diskThreshold);
+ }
+ if (options.has("cpu-threshold")) {
+ Double cpuThreshold = (Double) options.valueOf("cpu-threshold");
+ balanceParameter.setCpuThreshold(cpuThreshold);
+ }
+ if (options.has("network-in-threshold")) {
+ Double networkInThreshold = (Double) options.valueOf("network-in-threshold");
+ balanceParameter.setNetworkInThreshold(networkInThreshold);
+ }
+ if (options.has("network-out-threshold")) {
+ Double networkOutThreshold = (Double) options.valueOf("network-out-threshold");
+ balanceParameter.setNetworkOutThreshold(networkOutThreshold);
+ }
+ if (options.has("balance-brokers")) {
+ balanceParameter.setBalanceBrokers(options.valueOf("balance-brokers").toString());
+ }
+ if (options.has("topic-leader-threshold")) {
+ Double topicLeaderThreshold = (Double) options.valueOf("topic-leader-threshold");
+ balanceParameter.setTopicLeaderThreshold(topicLeaderThreshold);
+ }
+ if (options.has("topic-replica-threshold")) {
+ Double topicReplicaThreshold = (Double) options.valueOf("topic-replica-threshold");
+ balanceParameter.setTopicReplicaThreshold(topicReplicaThreshold);
+ }
+ if (options.has("ignored-topics")) {
+ balanceParameter.setIgnoredTopics(options.valueOf("ignored-topics").toString());
+ }
+ String path = options.valueOf("output-path").toString();
+ String goals = options.valueOf("goals").toString();
+ balanceParameter.setGoals(Arrays.asList(goals.split(",")));
+ balanceParameter.setCluster(options.valueOf("cluster").toString());
+ Properties kafkaConfig = new Properties();
+ kafkaConfig.setProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, options.valueOf("bootstrap-servers").toString());
+ balanceParameter.setKafkaConfig(kafkaConfig);
+ if (options.has("es-password")) {
+ balanceParameter.setEsInfo(options.valueOf("es-rest-url").toString(), options.valueOf("es-password").toString(), options.valueOf("es-index-prefix").toString());
+ } else {
+ balanceParameter.setEsInfo(options.valueOf("es-rest-url").toString(), "", options.valueOf("es-index-prefix").toString());
+ }
+ balanceParameter.setBeforeSeconds((Integer) options.valueOf("before-seconds"));
+ String envFile = options.valueOf("hardware-env-file").toString();
+ String envJson = FileUtils.readFileToString(new File(envFile), "UTF-8");
+ List env = new ObjectMapper().readValue(envJson, new TypeReference>() {
+ });
+ balanceParameter.setHardwareEnv(env);
+ ExecutionRebalance exec = new ExecutionRebalance();
+ OptimizerResult optimizerResult = exec.optimizations(balanceParameter);
+ FileUtils.write(new File(path.concat("/overview.json")), optimizerResult.resultJsonOverview(), "UTF-8");
+ FileUtils.write(new File(path.concat("/detailed.json")), optimizerResult.resultJsonDetailed(), "UTF-8");
+ FileUtils.write(new File(path.concat("/task.json")), optimizerResult.resultJsonTask(), "UTF-8");
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ public static void main(String[] args) {
+ OptionParser parser = new OptionParser();
+ parser.accepts("bootstrap-servers", "Kafka cluster boot server").withRequiredArg().ofType(String.class);
+ parser.accepts("es-rest-url", "The url of elasticsearch").withRequiredArg().ofType(String.class);
+ parser.accepts("es-password", "The password of elasticsearch").withRequiredArg().ofType(String.class);
+ parser.accepts("es-index-prefix", "The Index Prefix of elasticsearch").withRequiredArg().ofType(String.class);
+ parser.accepts("goals", "Balanced goals include TopicLeadersDistributionGoal,TopicReplicaDistributionGoal,DiskDistributionGoal,NetworkInboundDistributionGoal,NetworkOutboundDistributionGoal").withRequiredArg().ofType(String.class);
+ parser.accepts("cluster", "Balanced cluster name").withRequiredArg().ofType(String.class);
+ parser.accepts("excluded-topics", "Topic does not perform data balancing").withOptionalArg().ofType(String.class);
+ parser.accepts("ignored-topics","Topics that do not contain model calculations").withOptionalArg().ofType(String.class);
+ parser.accepts("offline-brokers", "Broker does not perform data balancing").withOptionalArg().ofType(String.class);
+ parser.accepts("balance-brokers", "Balanced brokers list").withOptionalArg().ofType(String.class);
+ parser.accepts("disk-threshold", "Disk data balance threshold").withOptionalArg().ofType(Double.class);
+ parser.accepts("topic-leader-threshold","topic leader threshold").withOptionalArg().ofType(Double.class);
+ parser.accepts("topic-replica-threshold","topic replica threshold").withOptionalArg().ofType(Double.class);
+ parser.accepts("cpu-threshold", "Cpu utilization balance threshold").withOptionalArg().ofType(Double.class);
+ parser.accepts("network-in-threshold", "Network inflow threshold").withOptionalArg().ofType(Double.class);
+ parser.accepts("network-out-threshold", "Network outflow threshold").withOptionalArg().ofType(Double.class);
+ parser.accepts("before-seconds", "Query es data time").withRequiredArg().ofType(Integer.class);
+ parser.accepts("hardware-env-file", "Machine environment information includes cpu, disk and network").withRequiredArg().ofType(String.class);
+ parser.accepts("output-path", "Cluster balancing result file directory").withRequiredArg().ofType(String.class);
+ OptionSet options = parser.parse(args);
+ if (args.length == 0) {
+ CommandLineUtils.printUsageAndDie(parser, "Running parameters need to be configured to perform cluster balancing");
+ }
+ if (!options.has("bootstrap-servers")) {
+ CommandLineUtils.printUsageAndDie(parser, "bootstrap-servers cannot be empty");
+ }
+ if (!options.has("es-rest-url")) {
+ CommandLineUtils.printUsageAndDie(parser, "es-rest-url cannot be empty");
+ }
+ if (!options.has("es-index-prefix")) {
+ CommandLineUtils.printUsageAndDie(parser, "es-index-prefix cannot be empty");
+ }
+ if (!options.has("goals")) {
+ CommandLineUtils.printUsageAndDie(parser, "goals cannot be empty");
+ }
+ if (!options.has("cluster")) {
+ CommandLineUtils.printUsageAndDie(parser, "cluster name cannot be empty");
+ }
+ if (!options.has("before-seconds")) {
+ CommandLineUtils.printUsageAndDie(parser, "before-seconds cannot be empty");
+ }
+ if (!options.has("hardware-env-file")) {
+ CommandLineUtils.printUsageAndDie(parser, "hardware-env-file cannot be empty");
+ }
+ if (!options.has("output-path")) {
+ CommandLineUtils.printUsageAndDie(parser, "output-path cannot be empty");
+ }
+ KafkaRebalanceMain rebalanceMain = new KafkaRebalanceMain();
+ rebalanceMain.run(options);
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/exception/OptimizationFailureException.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/exception/OptimizationFailureException.java
new file mode 100644
index 000000000..0ef9f8b29
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/exception/OptimizationFailureException.java
@@ -0,0 +1,15 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.exception;
+
+public class OptimizationFailureException extends Exception {
+ public OptimizationFailureException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+ public OptimizationFailureException(String message) {
+ super(message);
+ }
+
+ public OptimizationFailureException(Throwable cause) {
+ super(cause);
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/ExecutionRebalance.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/ExecutionRebalance.java
new file mode 100644
index 000000000..045b193f3
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/ExecutionRebalance.java
@@ -0,0 +1,78 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor;
+
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common.BalanceGoal;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common.BalanceParameter;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common.BalanceThreshold;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common.BrokerBalanceState;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.ClusterModel;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Load;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Resource;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.GoalOptimizer;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.OptimizationOptions;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common.OptimizerResult;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.utils.GoalUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.Validate;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.Map;
+
+
+public class ExecutionRebalance {
+ private static final Logger logger = LoggerFactory.getLogger(ExecutionRebalance.class);
+
+ public OptimizerResult optimizations(BalanceParameter balanceParameter) {
+ Validate.isTrue(StringUtils.isNotBlank(balanceParameter.getCluster()), "cluster is empty");
+ Validate.isTrue(balanceParameter.getKafkaConfig() != null, "Kafka config properties is empty");
+ Validate.isTrue(balanceParameter.getGoals() != null, "Balance goals is empty");
+ Validate.isTrue(StringUtils.isNotBlank(balanceParameter.getEsIndexPrefix()), "EsIndexPrefix is empty");
+ Validate.isTrue(StringUtils.isNotBlank(balanceParameter.getEsRestURL()), "EsRestURL is empty");
+ Validate.isTrue(balanceParameter.getHardwareEnv() != null, "HardwareEnv is empty");
+ logger.info("Cluster balancing start");
+ ClusterModel clusterModel = GoalUtils.getInitClusterModel(balanceParameter);
+ GoalOptimizer optimizer = new GoalOptimizer();
+ OptimizerResult optimizerResult = optimizer.optimizations(clusterModel, new OptimizationOptions(balanceParameter));
+ logger.info("Cluster balancing completed");
+ return optimizerResult;
+ }
+
+ public static Map getClusterAvgResourcesState(BalanceParameter balanceParameter) {
+ ClusterModel clusterModel = GoalUtils.getInitClusterModel(balanceParameter);
+ Load load = clusterModel.load();
+ Map avgResource = new HashMap<>();
+ avgResource.put(Resource.DISK, load.loadFor(Resource.DISK) / clusterModel.brokers().size());
+ avgResource.put(Resource.CPU, load.loadFor(Resource.CPU) / clusterModel.brokers().size());
+ avgResource.put(Resource.NW_OUT, load.loadFor(Resource.NW_OUT) / clusterModel.brokers().size());
+ avgResource.put(Resource.NW_IN, load.loadFor(Resource.NW_IN) / clusterModel.brokers().size());
+ return avgResource;
+ }
+
+ public static Map getBrokerResourcesBalanceState(BalanceParameter balanceParameter) {
+ Map balanceState = new HashMap<>();
+ ClusterModel clusterModel = GoalUtils.getInitClusterModel(balanceParameter);
+ double[] clusterAvgResource = clusterModel.avgOfUtilization();
+ Map balanceThreshold = GoalUtils.getBalanceThreshold(balanceParameter, clusterAvgResource);
+ clusterModel.brokers().forEach(i -> {
+ BrokerBalanceState state = new BrokerBalanceState();
+ if (balanceParameter.getGoals().contains(BalanceGoal.DISK.goal())) {
+ state.setDiskAvgResource(i.load().loadFor(Resource.DISK));
+ state.setDiskUtilization(i.utilizationFor(Resource.DISK));
+ state.setDiskBalanceState(balanceThreshold.get(BalanceGoal.DISK.goal()).state(i.utilizationFor(Resource.DISK)));
+ }
+ if (balanceParameter.getGoals().contains(BalanceGoal.NW_IN.goal())) {
+ state.setBytesInAvgResource(i.load().loadFor(Resource.NW_IN));
+ state.setBytesInUtilization(i.utilizationFor(Resource.NW_IN));
+ state.setBytesInBalanceState(balanceThreshold.get(BalanceGoal.NW_IN.goal()).state(i.utilizationFor(Resource.NW_IN)));
+ }
+ if (balanceParameter.getGoals().contains(BalanceGoal.NW_OUT.goal())) {
+ state.setBytesOutAvgResource(i.load().loadFor(Resource.NW_OUT));
+ state.setBytesOutUtilization(i.utilizationFor(Resource.NW_OUT));
+ state.setBytesOutBalanceState(balanceThreshold.get(BalanceGoal.NW_OUT.goal()).state(i.utilizationFor(Resource.NW_OUT)));
+ }
+ balanceState.put(i.id(), state);
+ });
+ return balanceState;
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceActionHistory.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceActionHistory.java
new file mode 100644
index 000000000..d489bcc86
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceActionHistory.java
@@ -0,0 +1,76 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common;
+
+public class BalanceActionHistory {
+ //均衡目标
+ private String goal;
+ //均衡动作
+ private String actionType;
+ //均衡Topic
+ private String topic;
+ //均衡分区
+ private int partition;
+ //源Broker
+ private int sourceBrokerId;
+ //目标Broker
+ private int destinationBrokerId;
+
+ public String getGoal() {
+ return goal;
+ }
+
+ public void setGoal(String goal) {
+ this.goal = goal;
+ }
+
+ public String getActionType() {
+ return actionType;
+ }
+
+ public void setActionType(String actionType) {
+ this.actionType = actionType;
+ }
+
+ public String getTopic() {
+ return topic;
+ }
+
+ public void setTopic(String topic) {
+ this.topic = topic;
+ }
+
+ public int getPartition() {
+ return partition;
+ }
+
+ public void setPartition(int partition) {
+ this.partition = partition;
+ }
+
+ public int getSourceBrokerId() {
+ return sourceBrokerId;
+ }
+
+ public void setSourceBrokerId(int sourceBrokerId) {
+ this.sourceBrokerId = sourceBrokerId;
+ }
+
+ public int getDestinationBrokerId() {
+ return destinationBrokerId;
+ }
+
+ public void setDestinationBrokerId(int destinationBrokerId) {
+ this.destinationBrokerId = destinationBrokerId;
+ }
+
+ @Override
+ public String toString() {
+ return "BalanceActionHistory{" +
+ "goal='" + goal + '\'' +
+ ", actionType='" + actionType + '\'' +
+ ", topic='" + topic + '\'' +
+ ", partition='" + partition + '\'' +
+ ", sourceBrokerId='" + sourceBrokerId + '\'' +
+ ", destinationBrokerId='" + destinationBrokerId + '\'' +
+ '}';
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceDetailed.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceDetailed.java
new file mode 100644
index 000000000..92006fe33
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceDetailed.java
@@ -0,0 +1,173 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common;
+
+public class BalanceDetailed {
+ private int brokerId;
+ private String host;
+ //当前CPU使用率
+ private double currentCPUUtilization;
+ //最新CPU使用率
+ private double lastCPUUtilization;
+ //当前磁盘使用率
+ private double currentDiskUtilization;
+ //最新磁盘使用量
+ private double lastDiskUtilization;
+ //当前网卡入流量
+ private double currentNetworkInUtilization;
+ //最新网卡入流量
+ private double lastNetworkInUtilization;
+ //当前网卡出流量
+ private double currentNetworkOutUtilization;
+ //最新网卡出流量
+ private double lastNetworkOutUtilization;
+ //均衡状态
+ private int balanceState = 0;
+ //迁入磁盘容量
+ private double moveInDiskSize;
+ //迁出磁盘容量
+ private double moveOutDiskSize;
+ //迁入副本数
+ private double moveInReplicas;
+ //迁出副本数
+ private double moveOutReplicas;
+
+ public int getBrokerId() {
+ return brokerId;
+ }
+
+ public void setBrokerId(int brokerId) {
+ this.brokerId = brokerId;
+ }
+
+ public double getCurrentCPUUtilization() {
+ return currentCPUUtilization;
+ }
+
+ public void setCurrentCPUUtilization(double currentCPUUtilization) {
+ this.currentCPUUtilization = currentCPUUtilization;
+ }
+
+ public double getLastCPUUtilization() {
+ return lastCPUUtilization;
+ }
+
+ public void setLastCPUUtilization(double lastCPUUtilization) {
+ this.lastCPUUtilization = lastCPUUtilization;
+ }
+
+ public double getCurrentDiskUtilization() {
+ return currentDiskUtilization;
+ }
+
+ public void setCurrentDiskUtilization(double currentDiskUtilization) {
+ this.currentDiskUtilization = currentDiskUtilization;
+ }
+
+ public double getLastDiskUtilization() {
+ return lastDiskUtilization;
+ }
+
+ public void setLastDiskUtilization(double lastDiskUtilization) {
+ this.lastDiskUtilization = lastDiskUtilization;
+ }
+
+ public double getCurrentNetworkInUtilization() {
+ return currentNetworkInUtilization;
+ }
+
+ public void setCurrentNetworkInUtilization(double currentNetworkInUtilization) {
+ this.currentNetworkInUtilization = currentNetworkInUtilization;
+ }
+
+ public double getLastNetworkInUtilization() {
+ return lastNetworkInUtilization;
+ }
+
+ public void setLastNetworkInUtilization(double lastNetworkInUtilization) {
+ this.lastNetworkInUtilization = lastNetworkInUtilization;
+ }
+
+ public double getCurrentNetworkOutUtilization() {
+ return currentNetworkOutUtilization;
+ }
+
+ public void setCurrentNetworkOutUtilization(double currentNetworkOutUtilization) {
+ this.currentNetworkOutUtilization = currentNetworkOutUtilization;
+ }
+
+ public double getLastNetworkOutUtilization() {
+ return lastNetworkOutUtilization;
+ }
+
+ public void setLastNetworkOutUtilization(double lastNetworkOutUtilization) {
+ this.lastNetworkOutUtilization = lastNetworkOutUtilization;
+ }
+
+ public int getBalanceState() {
+ return balanceState;
+ }
+
+ public void setBalanceState(int balanceState) {
+ this.balanceState = balanceState;
+ }
+
+ public double getMoveInDiskSize() {
+ return moveInDiskSize;
+ }
+
+ public void setMoveInDiskSize(double moveInDiskSize) {
+ this.moveInDiskSize = moveInDiskSize;
+ }
+
+ public double getMoveOutDiskSize() {
+ return moveOutDiskSize;
+ }
+
+ public void setMoveOutDiskSize(double moveOutDiskSize) {
+ this.moveOutDiskSize = moveOutDiskSize;
+ }
+
+ public double getMoveInReplicas() {
+ return moveInReplicas;
+ }
+
+ public void setMoveInReplicas(double moveInReplicas) {
+ this.moveInReplicas = moveInReplicas;
+ }
+
+ public double getMoveOutReplicas() {
+ return moveOutReplicas;
+ }
+
+ public void setMoveOutReplicas(double moveOutReplicas) {
+ this.moveOutReplicas = moveOutReplicas;
+ }
+
+ public String getHost() {
+ return host;
+ }
+
+ public void setHost(String host) {
+ this.host = host;
+ }
+
+ @Override
+ public String toString() {
+ return "BalanceDetailed{" +
+ "brokerId=" + brokerId +
+ ", host='" + host + '\'' +
+ ", currentCPUUtilization=" + currentCPUUtilization +
+ ", lastCPUUtilization=" + lastCPUUtilization +
+ ", currentDiskUtilization=" + currentDiskUtilization +
+ ", lastDiskUtilization=" + lastDiskUtilization +
+ ", currentNetworkInUtilization=" + currentNetworkInUtilization +
+ ", lastNetworkInUtilization=" + lastNetworkInUtilization +
+ ", currentNetworkOutUtilization=" + currentNetworkOutUtilization +
+ ", lastNetworkOutUtilization=" + lastNetworkOutUtilization +
+ ", balanceState=" + balanceState +
+ ", moveInDiskSize=" + moveInDiskSize +
+ ", moveOutDiskSize=" + moveOutDiskSize +
+ ", moveInReplicas=" + moveInReplicas +
+ ", moveOutReplicas=" + moveOutReplicas +
+ '}';
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceGoal.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceGoal.java
new file mode 100644
index 000000000..8e0187e86
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceGoal.java
@@ -0,0 +1,20 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common;
+
+public enum BalanceGoal {
+ // KM传参时使用
+ TOPIC_LEADERS("TopicLeadersDistributionGoal"),
+ TOPIC_REPLICA("TopicReplicaDistributionGoal"),
+ DISK("DiskDistributionGoal"),
+ NW_IN("NetworkInboundDistributionGoal"),
+ NW_OUT("NetworkOutboundDistributionGoal");
+
+ private final String goal;
+
+ BalanceGoal(String goal) {
+ this.goal = goal;
+ }
+
+ public String goal() {
+ return goal;
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceOverview.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceOverview.java
new file mode 100644
index 000000000..f8017c0a2
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceOverview.java
@@ -0,0 +1,102 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common;
+
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Resource;
+
+import java.util.Map;
+
+public class BalanceOverview {
+ //任务类型
+ private String taskType;
+ //节点范围
+ private String nodeRange;
+ //总的迁移大小
+ private double totalMoveSize;
+ //topic黑名单
+ private String topicBlacklist;
+ //迁移副本数
+ private int moveReplicas;
+ //迁移Topic
+ private String moveTopics;
+ //均衡阈值
+ private Map balanceThreshold;
+ //移除节点
+ private String removeNode;
+
+ public String getTaskType() {
+ return taskType;
+ }
+
+ public void setTaskType(String taskType) {
+ this.taskType = taskType;
+ }
+
+ public String getNodeRange() {
+ return nodeRange;
+ }
+
+ public void setNodeRange(String nodeRange) {
+ this.nodeRange = nodeRange;
+ }
+
+ public double getTotalMoveSize() {
+ return totalMoveSize;
+ }
+
+ public void setTotalMoveSize(double totalMoveSize) {
+ this.totalMoveSize = totalMoveSize;
+ }
+
+ public String getTopicBlacklist() {
+ return topicBlacklist;
+ }
+
+ public void setTopicBlacklist(String topicBlacklist) {
+ this.topicBlacklist = topicBlacklist;
+ }
+
+ public int getMoveReplicas() {
+ return moveReplicas;
+ }
+
+ public void setMoveReplicas(int moveReplicas) {
+ this.moveReplicas = moveReplicas;
+ }
+
+ public String getMoveTopics() {
+ return moveTopics;
+ }
+
+ public void setMoveTopics(String moveTopics) {
+ this.moveTopics = moveTopics;
+ }
+
+ public Map getBalanceThreshold() {
+ return balanceThreshold;
+ }
+
+ public void setBalanceThreshold(Map balanceThreshold) {
+ this.balanceThreshold = balanceThreshold;
+ }
+
+ public String getRemoveNode() {
+ return removeNode;
+ }
+
+ public void setRemoveNode(String removeNode) {
+ this.removeNode = removeNode;
+ }
+
+ @Override
+ public String toString() {
+ return "BalanceOverview{" +
+ "taskType='" + taskType + '\'' +
+ ", nodeRange='" + nodeRange + '\'' +
+ ", totalMoveSize=" + totalMoveSize +
+ ", topicBlacklist='" + topicBlacklist + '\'' +
+ ", moveReplicas=" + moveReplicas +
+ ", moveTopics='" + moveTopics + '\'' +
+ ", balanceThreshold=" + balanceThreshold +
+ ", removeNode='" + removeNode + '\'' +
+ '}';
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceParameter.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceParameter.java
new file mode 100644
index 000000000..e9c5f3fcb
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceParameter.java
@@ -0,0 +1,207 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common;
+
+import java.util.List;
+import java.util.Properties;
+
+public class BalanceParameter {
+ //集群名称
+ private String cluster;
+ //集群访问配置
+ private Properties kafkaConfig;
+ //ES访问地址
+ private String esRestURL;
+
+ //ES访问密码
+ private String esPassword;
+
+ //ES存储索引前缀
+ private String esIndexPrefix;
+ //均衡目标
+ private List goals;
+ //Topic黑名单,(参与模型计算)
+ private String excludedTopics = "";
+ //忽略的Topic列表,(不参与模型计算)
+ private String ignoredTopics = "";
+ //下线的Broker
+ private String offlineBrokers = "";
+ //需要均衡的Broker
+ private String balanceBrokers = "";
+ //默认Topic副本分布阈值
+ private double topicReplicaThreshold = 0.1;
+ //磁盘浮动阈值
+ private double diskThreshold = 0.1;
+ //CPU浮动阈值
+ private double cpuThreshold = 0.1;
+ //流入浮动阈值
+ private double networkInThreshold = 0.1;
+ //流出浮动阈值
+ private double networkOutThreshold = 0.1;
+ //均衡时间范围
+ private int beforeSeconds = 300;
+ //集群中所有Broker的硬件环境:cpu、disk、bytesIn、bytesOut
+ private List hardwareEnv;
+ //最小Leader浮动阈值,不追求绝对平均,避免集群流量抖动
+ private double topicLeaderThreshold = 0.1;
+
+ public String getCluster() {
+ return cluster;
+ }
+
+ public void setCluster(String cluster) {
+ this.cluster = cluster;
+ }
+
+ public String getEsRestURL() {
+ return esRestURL;
+ }
+
+ public void setEsInfo(String esRestURL, String esPassword, String esIndexPrefix) {
+ this.esRestURL = esRestURL;
+ this.esPassword = esPassword;
+ this.esIndexPrefix = esIndexPrefix;
+ }
+
+ public String getEsPassword() {
+ return esPassword;
+ }
+
+ public List getGoals() {
+ return goals;
+ }
+
+ public void setGoals(List goals) {
+ this.goals = goals;
+ }
+
+ public String getExcludedTopics() {
+ return excludedTopics;
+ }
+
+ public void setExcludedTopics(String excludedTopics) {
+ this.excludedTopics = excludedTopics;
+ }
+
+ public String getIgnoredTopics() {
+ return ignoredTopics;
+ }
+
+ public void setIgnoredTopics(String ignoredTopics) {
+ this.ignoredTopics = ignoredTopics;
+ }
+
+ public double getTopicReplicaThreshold() {
+ return topicReplicaThreshold;
+ }
+
+ public void setTopicReplicaThreshold(double topicReplicaThreshold) {
+ this.topicReplicaThreshold = topicReplicaThreshold;
+ }
+
+ public double getDiskThreshold() {
+ return diskThreshold;
+ }
+
+ public void setDiskThreshold(double diskThreshold) {
+ this.diskThreshold = diskThreshold;
+ }
+
+ public double getCpuThreshold() {
+ return cpuThreshold;
+ }
+
+ public void setCpuThreshold(double cpuThreshold) {
+ this.cpuThreshold = cpuThreshold;
+ }
+
+ public double getNetworkInThreshold() {
+ return networkInThreshold;
+ }
+
+ public void setNetworkInThreshold(double networkInThreshold) {
+ this.networkInThreshold = networkInThreshold;
+ }
+
+ public double getNetworkOutThreshold() {
+ return networkOutThreshold;
+ }
+
+ public void setNetworkOutThreshold(double networkOutThreshold) {
+ this.networkOutThreshold = networkOutThreshold;
+ }
+
+ public List getHardwareEnv() {
+ return hardwareEnv;
+ }
+
+ public void setHardwareEnv(List hardwareEnv) {
+ this.hardwareEnv = hardwareEnv;
+ }
+
+ public String getBalanceBrokers() {
+ return balanceBrokers;
+ }
+
+ public void setBalanceBrokers(String balanceBrokers) {
+ this.balanceBrokers = balanceBrokers;
+ }
+
+ public Properties getKafkaConfig() {
+ return kafkaConfig;
+ }
+
+ public void setKafkaConfig(Properties kafkaConfig) {
+ this.kafkaConfig = kafkaConfig;
+ }
+
+ public String getEsIndexPrefix() {
+ return esIndexPrefix;
+ }
+
+ public String getOfflineBrokers() {
+ return offlineBrokers;
+ }
+
+ public void setOfflineBrokers(String offlineBrokers) {
+ this.offlineBrokers = offlineBrokers;
+ }
+
+ public int getBeforeSeconds() {
+ return beforeSeconds;
+ }
+
+ public void setBeforeSeconds(int beforeSeconds) {
+ this.beforeSeconds = beforeSeconds;
+ }
+
+ public double getTopicLeaderThreshold() {
+ return topicLeaderThreshold;
+ }
+
+ public void setTopicLeaderThreshold(double topicLeaderThreshold) {
+ this.topicLeaderThreshold = topicLeaderThreshold;
+ }
+
+ @Override
+ public String toString() {
+ return "BalanceParameter{" +
+ "cluster='" + cluster + '\'' +
+ ", kafkaConfig=" + kafkaConfig +
+ ", esRestURL='" + esRestURL + '\'' +
+ ", esPassword='" + esPassword + '\'' +
+ ", esIndexPrefix='" + esIndexPrefix + '\'' +
+ ", goals=" + goals +
+ ", excludedTopics='" + excludedTopics + '\'' +
+ ", ignoredTopics='" + ignoredTopics + '\'' +
+ ", offlineBrokers='" + offlineBrokers + '\'' +
+ ", balanceBrokers='" + balanceBrokers + '\'' +
+ ", topicReplicaThreshold=" + topicReplicaThreshold +
+ ", diskThreshold=" + diskThreshold +
+ ", cpuThreshold=" + cpuThreshold +
+ ", networkInThreshold=" + networkInThreshold +
+ ", networkOutThreshold=" + networkOutThreshold +
+ ", beforeSeconds=" + beforeSeconds +
+ ", hardwareEnv=" + hardwareEnv +
+ ", topicLeaderThreshold=" + topicLeaderThreshold +
+ '}';
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceTask.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceTask.java
new file mode 100644
index 000000000..f79b8f3ab
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceTask.java
@@ -0,0 +1,43 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common;
+
+import java.util.List;
+
+public class BalanceTask {
+ private String topic;
+ private int partition;
+ //副本分配列表
+ private List replicas;
+
+ public String getTopic() {
+ return topic;
+ }
+
+ public void setTopic(String topic) {
+ this.topic = topic;
+ }
+
+ public int getPartition() {
+ return partition;
+ }
+
+ public void setPartition(int partition) {
+ this.partition = partition;
+ }
+
+ public List getReplicas() {
+ return replicas;
+ }
+
+ public void setReplicas(List replicas) {
+ this.replicas = replicas;
+ }
+
+ @Override
+ public String toString() {
+ return "BalanceTask{" +
+ "topic='" + topic + '\'' +
+ ", partition=" + partition +
+ ", replicas=" + replicas +
+ '}';
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceThreshold.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceThreshold.java
new file mode 100644
index 000000000..edbc13bd1
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceThreshold.java
@@ -0,0 +1,41 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common;
+
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Resource;
+
+public class BalanceThreshold {
+ private final Resource _resource;
+ private final double _upper;
+ private final double _lower;
+
+ public BalanceThreshold(Resource resource, double threshold, double avgResource) {
+ _resource = resource;
+ _upper = avgResource * (1 + threshold);
+ _lower = avgResource * (1 - threshold);
+ }
+
+ public Resource resource() {
+ return _resource;
+ }
+
+ public boolean isInRange(double utilization) {
+ return utilization > _lower && utilization < _upper;
+ }
+
+ public int state(double utilization) {
+ if (utilization <= _lower) {
+ return -1;
+ } else if (utilization >= _upper) {
+ return 1;
+ }
+ return 0;
+ }
+
+ @Override
+ public String toString() {
+ return "BalanceThreshold{" +
+ "_resource=" + _resource +
+ ", _upper=" + _upper +
+ ", _lower=" + _lower +
+ '}';
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BrokerBalanceState.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BrokerBalanceState.java
new file mode 100644
index 000000000..c30f1bdf6
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BrokerBalanceState.java
@@ -0,0 +1,144 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common;
+
+public class BrokerBalanceState {
+ //CPU平均资源
+ private Double cpuAvgResource;
+ //CPU资源使用率
+ private Double cpuUtilization;
+ // -1,低于均衡范围
+ // 0,均衡范围内
+ // 1,高于均衡范围
+ private Integer cpuBalanceState;
+ //磁盘平均资源
+ private Double diskAvgResource;
+ //磁盘资源使用率
+ private Double diskUtilization;
+ //磁盘均衡状态
+ private Integer diskBalanceState;
+ //流入平均资源
+ private Double bytesInAvgResource;
+ //流入资源使用率
+ private Double bytesInUtilization;
+ //流入均衡状态
+ private Integer bytesInBalanceState;
+ //流出平均资源
+ private Double bytesOutAvgResource;
+ //流出资源使用率
+ private Double bytesOutUtilization;
+ //流出均衡状态
+ private Integer bytesOutBalanceState;
+
+ public Double getCpuAvgResource() {
+ return cpuAvgResource;
+ }
+
+ public void setCpuAvgResource(Double cpuAvgResource) {
+ this.cpuAvgResource = cpuAvgResource;
+ }
+
+ public Double getCpuUtilization() {
+ return cpuUtilization;
+ }
+
+ public void setCpuUtilization(Double cpuUtilization) {
+ this.cpuUtilization = cpuUtilization;
+ }
+
+ public Integer getCpuBalanceState() {
+ return cpuBalanceState;
+ }
+
+ public void setCpuBalanceState(Integer cpuBalanceState) {
+ this.cpuBalanceState = cpuBalanceState;
+ }
+
+ public Double getDiskAvgResource() {
+ return diskAvgResource;
+ }
+
+ public void setDiskAvgResource(Double diskAvgResource) {
+ this.diskAvgResource = diskAvgResource;
+ }
+
+ public Double getDiskUtilization() {
+ return diskUtilization;
+ }
+
+ public void setDiskUtilization(Double diskUtilization) {
+ this.diskUtilization = diskUtilization;
+ }
+
+ public Integer getDiskBalanceState() {
+ return diskBalanceState;
+ }
+
+ public void setDiskBalanceState(Integer diskBalanceState) {
+ this.diskBalanceState = diskBalanceState;
+ }
+
+ public Double getBytesInAvgResource() {
+ return bytesInAvgResource;
+ }
+
+ public void setBytesInAvgResource(Double bytesInAvgResource) {
+ this.bytesInAvgResource = bytesInAvgResource;
+ }
+
+ public Double getBytesInUtilization() {
+ return bytesInUtilization;
+ }
+
+ public void setBytesInUtilization(Double bytesInUtilization) {
+ this.bytesInUtilization = bytesInUtilization;
+ }
+
+ public Integer getBytesInBalanceState() {
+ return bytesInBalanceState;
+ }
+
+ public void setBytesInBalanceState(Integer bytesInBalanceState) {
+ this.bytesInBalanceState = bytesInBalanceState;
+ }
+
+ public Double getBytesOutAvgResource() {
+ return bytesOutAvgResource;
+ }
+
+ public void setBytesOutAvgResource(Double bytesOutAvgResource) {
+ this.bytesOutAvgResource = bytesOutAvgResource;
+ }
+
+ public Double getBytesOutUtilization() {
+ return bytesOutUtilization;
+ }
+
+ public void setBytesOutUtilization(Double bytesOutUtilization) {
+ this.bytesOutUtilization = bytesOutUtilization;
+ }
+
+ public Integer getBytesOutBalanceState() {
+ return bytesOutBalanceState;
+ }
+
+ public void setBytesOutBalanceState(Integer bytesOutBalanceState) {
+ this.bytesOutBalanceState = bytesOutBalanceState;
+ }
+
+ @Override
+ public String toString() {
+ return "BrokerBalanceState{" +
+ "cpuAvgResource=" + cpuAvgResource +
+ ", cpuUtilization=" + cpuUtilization +
+ ", cpuBalanceState=" + cpuBalanceState +
+ ", diskAvgResource=" + diskAvgResource +
+ ", diskUtilization=" + diskUtilization +
+ ", diskBalanceState=" + diskBalanceState +
+ ", bytesInAvgResource=" + bytesInAvgResource +
+ ", bytesInUtilization=" + bytesInUtilization +
+ ", bytesInBalanceState=" + bytesInBalanceState +
+ ", bytesOutAvgResource=" + bytesOutAvgResource +
+ ", bytesOutUtilization=" + bytesOutUtilization +
+ ", bytesOutBalanceState=" + bytesOutBalanceState +
+ '}';
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/HostEnv.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/HostEnv.java
new file mode 100644
index 000000000..9229895e6
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/HostEnv.java
@@ -0,0 +1,76 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common;
+
+public class HostEnv {
+ //BrokerId
+ private int id;
+ //机器IP
+ private String host;
+ //机架ID
+ private String rackId;
+ //CPU核数
+ private int cpu;
+ //磁盘总容量
+ private double disk;
+ //网卡容量
+ private double network;
+
+ public int getId() {
+ return id;
+ }
+
+ public void setId(int id) {
+ this.id = id;
+ }
+
+ public String getHost() {
+ return host;
+ }
+
+ public void setHost(String host) {
+ this.host = host;
+ }
+
+ public String getRackId() {
+ return rackId;
+ }
+
+ public void setRackId(String rackId) {
+ this.rackId = rackId;
+ }
+
+ public int getCpu() {
+ return cpu;
+ }
+
+ public void setCpu(int cpu) {
+ this.cpu = cpu;
+ }
+
+ public double getDisk() {
+ return disk;
+ }
+
+ public void setDisk(double disk) {
+ this.disk = disk;
+ }
+
+ public double getNetwork() {
+ return network;
+ }
+
+ public void setNetwork(double network) {
+ this.network = network;
+ }
+
+ @Override
+ public String toString() {
+ return "HostEnv{" +
+ "id=" + id +
+ ", host='" + host + '\'' +
+ ", rackId='" + rackId + '\'' +
+ ", cpu=" + cpu +
+ ", disk=" + disk +
+ ", network=" + network +
+ '}';
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/OptimizerResult.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/OptimizerResult.java
new file mode 100644
index 000000000..18abdcd9a
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/OptimizerResult.java
@@ -0,0 +1,218 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Broker;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.ClusterModel;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.ReplicaPlacementInfo;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Resource;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.ExecutionProposal;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.OptimizationOptions;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.utils.GoalUtils;
+import org.apache.kafka.common.TopicPartition;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+import java.util.*;
+import java.util.stream.Collectors;
+
+public class OptimizerResult {
+ private static final Logger logger = LoggerFactory.getLogger(OptimizerResult.class);
+ private Set _proposals;
+ private final BalanceParameter parameter;
+ private Set _balanceBrokersBefore;
+ private Set _balanceBrokersAfter;
+ private final ClusterModel clusterModel;
+ private final Map> balanceActionHistory;
+ private final Map balanceThreshold;
+
+ public OptimizerResult(ClusterModel clusterModel, OptimizationOptions optimizationOptions) {
+ this.clusterModel = clusterModel;
+ balanceActionHistory = clusterModel.balanceActionHistory();
+ parameter = optimizationOptions.parameter();
+ double[] clusterAvgResource = clusterModel.avgOfUtilization();
+ balanceThreshold = GoalUtils.getBalanceThreshold(parameter, clusterAvgResource);
+ }
+
+ /**
+ * 计划概览
+ */
+ public BalanceOverview resultOverview() {
+ BalanceOverview overview = new BalanceOverview();
+ overview.setTopicBlacklist(parameter.getExcludedTopics());
+ overview.setMoveReplicas(_proposals.size());
+ overview.setNodeRange(parameter.getBalanceBrokers());
+ overview.setRemoveNode(parameter.getOfflineBrokers());
+ Map balanceThreshold = new HashMap<>();
+ balanceThreshold.put(Resource.CPU, parameter.getCpuThreshold());
+ balanceThreshold.put(Resource.DISK, parameter.getDiskThreshold());
+ balanceThreshold.put(Resource.NW_IN, parameter.getNetworkInThreshold());
+ balanceThreshold.put(Resource.NW_OUT, parameter.getNetworkOutThreshold());
+ overview.setBalanceThreshold(balanceThreshold);
+ Set moveTopicsSet = _proposals.stream().map(j -> j.tp().topic()).collect(Collectors.toSet());
+ String moveTopics = String.join(",", moveTopicsSet);
+ overview.setMoveTopics(moveTopics);
+ //Leader切换时不需要进行统计
+ double totalMoveSize = _proposals.stream().filter(i -> Integer.max(i.replicasToAdd().size(), i.replicasToRemove().size()) != 0).mapToDouble(ExecutionProposal::partitionSize).sum();
+ overview.setTotalMoveSize(totalMoveSize);
+ return overview;
+ }
+
+ /**
+ * 计划明细
+ */
+ public Map resultDetailed() {
+ Map details = new HashMap<>();
+ _balanceBrokersBefore.forEach(i -> {
+ BalanceDetailed balanceDetailed = new BalanceDetailed();
+ balanceDetailed.setBrokerId(i.id());
+ balanceDetailed.setHost(i.host());
+ balanceDetailed.setCurrentCPUUtilization(i.utilizationFor(Resource.CPU));
+ balanceDetailed.setCurrentDiskUtilization(i.utilizationFor(Resource.DISK));
+ balanceDetailed.setCurrentNetworkInUtilization(i.utilizationFor(Resource.NW_IN));
+ balanceDetailed.setCurrentNetworkOutUtilization(i.utilizationFor(Resource.NW_OUT));
+ details.put(i.id(), balanceDetailed);
+ });
+ Map totalAddReplicaCount = new HashMap<>();
+ Map totalAddDataSize = new HashMap<>();
+ Map totalRemoveReplicaCount = new HashMap<>();
+ Map totalRemoveDataSize = new HashMap<>();
+ _proposals.forEach(i -> {
+ i.replicasToAdd().forEach((k, v) -> {
+ totalAddReplicaCount.merge(k, v[0], Double::sum);
+ totalAddDataSize.merge(k, v[1], Double::sum);
+ });
+ i.replicasToRemove().forEach((k, v) -> {
+ totalRemoveReplicaCount.merge(k, v[0], Double::sum);
+ totalRemoveDataSize.merge(k, v[1], Double::sum);
+ });
+ });
+ _balanceBrokersAfter.forEach(i -> {
+ BalanceDetailed balanceDetailed = details.get(i.id());
+ balanceDetailed.setLastCPUUtilization(i.utilizationFor(Resource.CPU));
+ balanceDetailed.setLastDiskUtilization(i.utilizationFor(Resource.DISK));
+ balanceDetailed.setLastNetworkInUtilization(i.utilizationFor(Resource.NW_IN));
+ balanceDetailed.setLastNetworkOutUtilization(i.utilizationFor(Resource.NW_OUT));
+ balanceDetailed.setMoveInReplicas(totalAddReplicaCount.getOrDefault(i.id(), 0.0));
+ balanceDetailed.setMoveOutReplicas(totalRemoveReplicaCount.getOrDefault(i.id(), 0.0));
+ balanceDetailed.setMoveInDiskSize(totalAddDataSize.getOrDefault(i.id(), 0.0));
+ balanceDetailed.setMoveOutDiskSize(totalRemoveDataSize.getOrDefault(i.id(), 0.0));
+ for (String str : parameter.getGoals()) {
+ BalanceThreshold threshold = balanceThreshold.get(str);
+ if (!threshold.isInRange(i.utilizationFor(threshold.resource()))) {
+ balanceDetailed.setBalanceState(-1);
+ break;
+ }
+ }
+ });
+
+ return details;
+ }
+
+ /**
+ * 计划任务
+ */
+ public List resultTask() {
+ List balanceTasks = new ArrayList<>();
+ _proposals.forEach(proposal -> {
+ BalanceTask task = new BalanceTask();
+ task.setTopic(proposal.tp().topic());
+ task.setPartition(proposal.tp().partition());
+ List replicas = proposal.newReplicas().stream().map(ReplicaPlacementInfo::brokerId).collect(Collectors.toList());
+ task.setReplicas(replicas);
+ balanceTasks.add(task);
+ });
+ return balanceTasks;
+ }
+
+ public Map> resultBalanceActionHistory() {
+ return Collections.unmodifiableMap(balanceActionHistory);
+ }
+
+ public String resultJsonOverview() {
+ try {
+ return new ObjectMapper().writeValueAsString(resultOverview());
+ } catch (Exception e) {
+ logger.error("result overview json process error", e);
+ }
+ return "{}";
+ }
+
+ public String resultJsonDetailed() {
+ try {
+ return new ObjectMapper().writeValueAsString(resultDetailed());
+ } catch (Exception e) {
+ logger.error("result detailed json process error", e);
+ }
+ return "{}";
+ }
+
+ public String resultJsonTask() {
+ try {
+ Map reassign = new HashMap<>();
+ reassign.put("partitions", resultTask());
+ reassign.put("version", 1);
+ return new ObjectMapper().writeValueAsString(reassign);
+ } catch (Exception e) {
+ logger.error("result task json process error", e);
+ }
+ return "{}";
+ }
+
+ public List resultTopicChangeHistory() {
+ List topicChangeHistoryList = new ArrayList<>();
+ for (ExecutionProposal proposal : _proposals) {
+ TopicChangeHistory changeHistory = new TopicChangeHistory();
+ changeHistory.setTopic(proposal.tp().topic());
+ changeHistory.setPartition(proposal.tp().partition());
+ changeHistory.setOldLeader(proposal.oldLeader().brokerId());
+ changeHistory.setNewLeader(proposal.newReplicas().get(0).brokerId());
+ List balanceBefore = proposal.oldReplicas().stream().map(ReplicaPlacementInfo::brokerId).collect(Collectors.toList());
+ List balanceAfter = proposal.newReplicas().stream().map(ReplicaPlacementInfo::brokerId).collect(Collectors.toList());
+ changeHistory.setBalanceBefore(balanceBefore);
+ changeHistory.setBalanceAfter(balanceAfter);
+ topicChangeHistoryList.add(changeHistory);
+ }
+ return topicChangeHistoryList;
+ }
+
+ public String resultJsonTopicChangeHistory() {
+ try {
+ return new ObjectMapper().writeValueAsString(resultTopicChangeHistory());
+ } catch (Exception e) {
+ logger.error("result balance topic change history json process error", e);
+ }
+ return "{}";
+ }
+
+ public String resultJsonBalanceActionHistory() {
+ try {
+ return new ObjectMapper().writeValueAsString(balanceActionHistory);
+ } catch (Exception e) {
+ logger.error("result balance action history json process error", e);
+ }
+ return "{}";
+ }
+
+ public void setBalanceBrokersFormBefore(Set balanceBrokersBefore) {
+ _balanceBrokersBefore = new HashSet<>();
+ balanceBrokersBefore.forEach(i -> {
+ Broker broker = new Broker(i.rack(), i.id(), i.host(), false, i.capacity());
+ broker.load().addLoad(i.load());
+ _balanceBrokersBefore.add(broker);
+ });
+ }
+
+ public void setBalanceBrokersFormAfter(Set balanceBrokersAfter) {
+ _balanceBrokersAfter = balanceBrokersAfter;
+ }
+
+ public void setExecutionProposal(Set proposals) {
+ _proposals = proposals;
+ }
+
+ // test
+ public ClusterModel clusterModel() {
+ return clusterModel;
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/TopicChangeHistory.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/TopicChangeHistory.java
new file mode 100644
index 000000000..83fb43c99
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/TopicChangeHistory.java
@@ -0,0 +1,78 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common;
+
+import java.util.List;
+
+public class TopicChangeHistory {
+ //均衡Topic
+ private String topic;
+ //均衡分区
+ private int partition;
+ //旧Leader的BrokerID
+ private int oldLeader;
+ //均衡前副本分布
+ private List balanceBefore;
+ //新Leader的BrokerID
+ private int newLeader;
+ //均衡后副本分布
+ private List balanceAfter;
+
+ public String getTopic() {
+ return topic;
+ }
+
+ public void setTopic(String topic) {
+ this.topic = topic;
+ }
+
+ public int getPartition() {
+ return partition;
+ }
+
+ public void setPartition(int partition) {
+ this.partition = partition;
+ }
+
+ public int getOldLeader() {
+ return oldLeader;
+ }
+
+ public void setOldLeader(int oldLeader) {
+ this.oldLeader = oldLeader;
+ }
+
+ public List getBalanceBefore() {
+ return balanceBefore;
+ }
+
+ public void setBalanceBefore(List balanceBefore) {
+ this.balanceBefore = balanceBefore;
+ }
+
+ public int getNewLeader() {
+ return newLeader;
+ }
+
+ public void setNewLeader(int newLeader) {
+ this.newLeader = newLeader;
+ }
+
+ public List getBalanceAfter() {
+ return balanceAfter;
+ }
+
+ public void setBalanceAfter(List balanceAfter) {
+ this.balanceAfter = balanceAfter;
+ }
+
+ @Override
+ public String toString() {
+ return "TopicChangeHistory{" +
+ "topic='" + topic + '\'' +
+ ", partition='" + partition + '\'' +
+ ", oldLeader=" + oldLeader +
+ ", balanceBefore=" + balanceBefore +
+ ", newLeader=" + newLeader +
+ ", balanceAfter=" + balanceAfter +
+ '}';
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/metric/Metric.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/metric/Metric.java
new file mode 100644
index 000000000..548efe000
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/metric/Metric.java
@@ -0,0 +1,51 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.metric;
+
+/**
+ * @author leewei
+ * @date 2022/5/12
+ */
+public class Metric {
+ private String topic;
+ private int partition;
+ private double cpu;
+ private double bytesIn;
+ private double bytesOut;
+ private double disk;
+
+ public Metric() {
+
+ }
+
+ public Metric(String topic, int partition, double cpu, double bytesIn, double bytesOut, double disk) {
+ this.topic = topic;
+ this.partition = partition;
+ this.cpu = cpu;
+ this.bytesIn = bytesIn;
+ this.bytesOut = bytesOut;
+ this.disk = disk;
+ }
+
+ public String topic() {
+ return topic;
+ }
+
+ public int partition() {
+ return partition;
+ }
+
+ public double cpu() {
+ return cpu;
+ }
+
+ public double bytesIn() {
+ return bytesIn;
+ }
+
+ public double bytesOut() {
+ return bytesOut;
+ }
+
+ public double disk() {
+ return disk;
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/metric/MetricStore.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/metric/MetricStore.java
new file mode 100644
index 000000000..be9c2fcc1
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/metric/MetricStore.java
@@ -0,0 +1,9 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.metric;
+
+/**
+ * @author leewei
+ * @date 2022/4/29
+ */
+public interface MetricStore {
+ Metrics getMetrics(String clusterName, int beforeSeconds);
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/metric/Metrics.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/metric/Metrics.java
new file mode 100644
index 000000000..7e09667e0
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/metric/Metrics.java
@@ -0,0 +1,46 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.metric;
+
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Load;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Resource;
+import org.apache.kafka.common.TopicPartition;
+
+import java.util.*;
+
+/**
+ * @author leewei
+ * @date 2022/4/29
+ */
+public class Metrics {
+ private final Map metricByTopicPartition;
+
+ public Metrics() {
+ this.metricByTopicPartition = new HashMap<>();
+ }
+
+ public void addMetrics(Metric metric) {
+ TopicPartition topicPartition = new TopicPartition(metric.topic(), metric.partition());
+ this.metricByTopicPartition.put(topicPartition, metric);
+ }
+
+ public List values() {
+ return Collections.unmodifiableList(new ArrayList<>(this.metricByTopicPartition.values()));
+ }
+
+ public Metric metric(TopicPartition topicPartition) {
+ return this.metricByTopicPartition.get(topicPartition);
+ }
+
+ public Load load(TopicPartition topicPartition) {
+ Metric metric = this.metricByTopicPartition.get(topicPartition);
+ if (metric == null) {
+ return null;
+ }
+ Load load = new Load();
+ load.setLoad(Resource.CPU, metric.cpu());
+ load.setLoad(Resource.NW_IN, metric.bytesIn());
+ load.setLoad(Resource.NW_OUT, metric.bytesOut());
+ load.setLoad(Resource.DISK, metric.disk());
+
+ return load;
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/metric/elasticsearch/ElasticsearchMetricStore.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/metric/elasticsearch/ElasticsearchMetricStore.java
new file mode 100644
index 000000000..3a8009977
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/metric/elasticsearch/ElasticsearchMetricStore.java
@@ -0,0 +1,124 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.metric.elasticsearch;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.metric.Metric;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.metric.MetricStore;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.metric.Metrics;
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.http.Header;
+import org.apache.http.HttpHost;
+import org.apache.http.message.BasicHeader;
+import org.elasticsearch.client.Request;
+import org.elasticsearch.client.Response;
+import org.elasticsearch.client.RestClient;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.*;
+
+/**
+ * @author leewei
+ * @date 2022/4/29
+ */
+public class ElasticsearchMetricStore implements MetricStore {
+ private final Logger logger = LoggerFactory.getLogger(ElasticsearchMetricStore.class);
+ private final ObjectMapper objectMapper = new ObjectMapper();
+
+ private final String hosts;
+
+ private final String password;
+
+ private final String indexPrefix;
+ private final String format;
+
+ public ElasticsearchMetricStore(String hosts, String password, String indexPrefix) {
+ this(hosts, password, indexPrefix, "yyyy-MM-dd");
+ }
+
+ public ElasticsearchMetricStore(String hosts, String password, String indexPrefix, String format) {
+ this.hosts = hosts;
+ this.password = password;
+ this.indexPrefix = indexPrefix;
+ this.format = format;
+ }
+
+ @Override
+ public Metrics getMetrics(String clusterName, int beforeSeconds) {
+ Metrics metrics = new Metrics();
+ try {
+ String metricsQueryJson = IOUtils.resourceToString("/MetricsQuery.json", StandardCharsets.UTF_8);
+ metricsQueryJson = metricsQueryJson.replaceAll("", Integer.toString(beforeSeconds))
+ .replaceAll("", clusterName);
+
+ List defaultHeaders = new ArrayList<>();
+ if (StringUtils.isNotBlank(password)) {
+ String encode = Base64.getEncoder().encodeToString(String.format("%s", this.password).getBytes(StandardCharsets.UTF_8));
+ Header header = new BasicHeader("Authorization", "Basic " + encode);
+ defaultHeaders.add(header);
+ }
+
+ Header[] headers = new Header[defaultHeaders.size()];
+ defaultHeaders.toArray(headers);
+ try (RestClient restClient = RestClient.builder(toHttpHosts(this.hosts)).setDefaultHeaders(headers).build()) {
+ Request request = new Request(
+ "GET",
+ "/" + indices(beforeSeconds) + "/_search");
+ request.setJsonEntity(metricsQueryJson);
+ logger.debug("Es metrics query for cluster: {} request: {} dsl: {}", clusterName, request, metricsQueryJson);
+ Response response = restClient.performRequest(request);
+ if (response.getStatusLine().getStatusCode() == 200) {
+ JsonNode rootNode = objectMapper.readTree(response.getEntity().getContent());
+ JsonNode topics = rootNode.at("/aggregations/by_topic/buckets");
+ for (JsonNode topic : topics) {
+ String topicName = topic.path("key").asText();
+ JsonNode partitions = topic.at("/by_partition/buckets");
+ for (JsonNode partition : partitions) {
+ int partitionId = partition.path("key").asInt();
+ // double cpu = partition.at("/avg_cpu/value").asDouble();
+ double cpu = 0D;
+ double bytesIn = partition.at("/avg_bytes_in/value").asDouble();
+ double bytesOut = partition.at("/avg_bytes_out/value").asDouble();
+ double disk = partition.at("/lastest_disk/hits/hits/0/_source/metrics/LogSize").asDouble();
+ // add
+ metrics.addMetrics(new Metric(topicName, partitionId, cpu, bytesIn, bytesOut, disk));
+ }
+ }
+ }
+ }
+ } catch (IOException e) {
+ throw new IllegalArgumentException("Cannot get metrics of cluster: " + clusterName, e);
+ }
+ logger.debug("Es metrics query for cluster: {} result count: {}", clusterName, metrics.values().size());
+ return metrics;
+ }
+
+ private String indices(long beforeSeconds) {
+ Set indices = new TreeSet<>();
+ DateFormat df = new SimpleDateFormat(this.format);
+ long endTime = System.currentTimeMillis();
+ long time = endTime - (beforeSeconds * 1000);
+ while (time < endTime) {
+ indices.add(this.indexPrefix + df.format(new Date(time)));
+ time += 24 * 60 * 60 * 1000; // add 24h
+ }
+ indices.add(this.indexPrefix + df.format(new Date(endTime)));
+ return String.join(",", indices);
+ }
+
+ private static HttpHost[] toHttpHosts(String url) {
+ String[] nodes = url.split(",");
+ HttpHost[] hosts = new HttpHost[nodes.length];
+ for (int i = 0; i < nodes.length; i++) {
+ String [] ipAndPort = nodes[i].split(":");
+ hosts[i] = new HttpHost(ipAndPort[0], ipAndPort.length > 1 ? Integer.parseInt(ipAndPort[1]) : 9200);
+ }
+ return hosts;
+ }
+
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Broker.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Broker.java
new file mode 100644
index 000000000..a2b3f2694
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Broker.java
@@ -0,0 +1,222 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.model;
+
+import org.apache.kafka.common.TopicPartition;
+
+import java.util.*;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+
+/**
+ * @author leewei
+ * @date 2022/4/29
+ */
+public class Broker implements Comparable {
+ public static final Broker NONE = new Broker(new Rack("-1"), -1, "localhost", true, new Capacity());
+
+ private final Rack rack;
+ private final int id;
+ private final String host;
+ private final boolean isOffline;
+
+ private final Set replicas;
+ private final Set leaderReplicas;
+ private final Map> topicReplicas;
+
+ private final Load load;
+
+ private final Capacity capacity;
+
+ public Broker(Rack rack, int id, String host, boolean isOffline, Capacity capacity) {
+ this.rack = rack;
+ this.id = id;
+ this.host = host;
+ this.isOffline = isOffline;
+ this.replicas = new HashSet<>();
+ this.leaderReplicas = new HashSet<>();
+ this.topicReplicas = new HashMap<>();
+ this.load = new Load();
+ this.capacity = capacity;
+ }
+
+ public Rack rack() {
+ return rack;
+ }
+
+ public int id() {
+ return id;
+ }
+
+ public String host() {
+ return host;
+ }
+
+ public boolean isOffline() {
+ return isOffline;
+ }
+
+ public Set replicas() {
+ return Collections.unmodifiableSet(this.replicas);
+ }
+
+ public SortedSet sortedReplicasFor(Resource resource, boolean reverse) {
+ return sortedReplicasFor(null, resource, reverse);
+ }
+
+ public SortedSet sortedReplicasFor(Predicate super Replica> filter, Resource resource, boolean reverse) {
+ Comparator comparator =
+ Comparator.comparingDouble(r -> r.load().loadFor(resource))
+ .thenComparingInt(Replica::hashCode);
+ if (reverse)
+ comparator = comparator.reversed();
+ SortedSet sortedReplicas = new TreeSet<>(comparator);
+ if (filter == null) {
+ sortedReplicas.addAll(this.replicas);
+ } else {
+ sortedReplicas.addAll(this.replicas.stream()
+ .filter(filter).collect(Collectors.toList()));
+ }
+
+ return sortedReplicas;
+ }
+
+ public Set leaderReplicas() {
+ return Collections.unmodifiableSet(this.leaderReplicas);
+ }
+
+ public Load load() {
+ return load;
+ }
+
+ public Capacity capacity() {
+ return capacity;
+ }
+
+ public double utilizationFor(Resource resource) {
+ return this.load.loadFor(resource) / this.capacity.capacityFor(resource);
+ }
+
+ public double expectedUtilizationAfterAdd(Resource resource, Load loadToChange) {
+ return (this.load.loadFor(resource) + ((loadToChange == null) ? 0 : loadToChange.loadFor(resource)))
+ / this.capacity.capacityFor(resource);
+ }
+
+ public double expectedUtilizationAfterRemove(Resource resource, Load loadToChange) {
+ return (this.load.loadFor(resource) - ((loadToChange == null) ? 0 : loadToChange.loadFor(resource)))
+ / this.capacity.capacityFor(resource);
+ }
+
+ public Replica replica(TopicPartition topicPartition) {
+ Map replicas = this.topicReplicas.get(topicPartition.topic());
+ if (replicas == null) {
+ return null;
+ }
+ return replicas.get(topicPartition.partition());
+ }
+
+ void addReplica(Replica replica) {
+ // Add replica to list of all replicas in the broker.
+ if (this.replicas.contains(replica)) {
+ throw new IllegalStateException(String.format("Broker %d already has replica %s", this.id,
+ replica.topicPartition()));
+ }
+ this.replicas.add(replica);
+ // Add topic replica.
+ this.topicReplicas.computeIfAbsent(replica.topicPartition().topic(), t -> new HashMap<>())
+ .put(replica.topicPartition().partition(), replica);
+
+ // Add leader replica.
+ if (replica.isLeader()) {
+ this.leaderReplicas.add(replica);
+ }
+
+ // Add replica load to the broker load.
+ this.load.addLoad(replica.load());
+ }
+
+ Replica removeReplica(TopicPartition topicPartition) {
+ Replica replica = replica(topicPartition);
+ if (replica != null) {
+ this.replicas.remove(replica);
+ Map replicas = this.topicReplicas.get(topicPartition.topic());
+ if (replicas != null) {
+ replicas.remove(topicPartition.partition());
+ }
+ if (replica.isLeader()) {
+ this.leaderReplicas.remove(replica);
+ }
+ this.load.subtractLoad(replica.load());
+ }
+ return replica;
+ }
+
+ Load makeFollower(TopicPartition topicPartition) {
+ Replica replica = replica(topicPartition);
+ Load leaderLoadDelta = replica.makeFollower();
+ // Remove leadership load from load.
+ this.load.subtractLoad(leaderLoadDelta);
+ this.leaderReplicas.remove(replica);
+ return leaderLoadDelta;
+ }
+
+ void makeLeader(TopicPartition topicPartition, Load leaderLoadDelta) {
+ Replica replica = replica(topicPartition);
+ replica.makeLeader(leaderLoadDelta);
+ // Add leadership load to load.
+ this.load.addLoad(leaderLoadDelta);
+ this.leaderReplicas.add(replica);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ Broker broker = (Broker) o;
+ return id == broker.id;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(id);
+ }
+
+ @Override
+ public int compareTo(Broker o) {
+ return Integer.compare(id, o.id());
+ }
+
+ @Override
+ public String toString() {
+ return "Broker{" +
+ "id=" + id +
+ ", host='" + host + '\'' +
+ ", rack=" + rack.id() +
+ ", replicas=" + replicas +
+ ", leaderReplicas=" + leaderReplicas +
+ ", topicReplicas=" + topicReplicas +
+ ", load=" + load +
+ ", capacity=" + capacity +
+ '}';
+ }
+
+ public int numLeadersFor(String topicName) {
+ return (int) replicasOfTopicInBroker(topicName).stream().filter(Replica::isLeader).count();
+ }
+
+ public Set topics() {
+ return topicReplicas.keySet();
+ }
+
+ public int numReplicasOfTopicInBroker(String topic) {
+ Map replicaMap = topicReplicas.get(topic);
+ return replicaMap == null ? 0 : replicaMap.size();
+ }
+
+ public Collection replicasOfTopicInBroker(String topic) {
+ Map replicaMap = topicReplicas.get(topic);
+ return replicaMap == null ? Collections.emptySet() : replicaMap.values();
+ }
+
+ public Set currentOfflineReplicas() {
+ return replicas.stream().filter(Replica::isCurrentOffline).collect(Collectors.toSet());
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Capacity.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Capacity.java
new file mode 100644
index 000000000..b6da2b738
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Capacity.java
@@ -0,0 +1,36 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.model;
+
+import java.util.Arrays;
+
+/**
+ * @author leewei
+ * @date 2022/5/9
+ */
+public class Capacity {
+ private final double[] values;
+
+ public Capacity() {
+ this.values = new double[Resource.values().length];
+ }
+
+ public void setCapacity(Resource resource, double capacity) {
+ this.values[resource.id()] = capacity;
+ }
+
+ public double capacityFor(Resource resource) {
+ return this.values[resource.id()];
+ }
+
+ public void addCapacity(Capacity capacityToAdd) {
+ for (Resource resource : Resource.values()) {
+ this.setCapacity(resource, this.capacityFor(resource) + capacityToAdd.capacityFor(resource));
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "Capacity{" +
+ "values=" + Arrays.toString(values) +
+ '}';
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/ClusterModel.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/ClusterModel.java
new file mode 100644
index 000000000..57ef98a1a
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/ClusterModel.java
@@ -0,0 +1,236 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.model;
+
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common.BalanceActionHistory;
+import org.apache.kafka.common.TopicPartition;
+
+import java.util.*;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+
+/**
+ * @author leewei
+ * @date 2022/4/29
+ */
+public class ClusterModel {
+ private final Map racksById;
+ private final Map brokersById;
+ private final Map> partitionsByTopic;
+ private Map> balanceActionHistory;
+
+ public ClusterModel() {
+ this.racksById = new HashMap<>();
+ this.brokersById = new HashMap<>();
+ this.partitionsByTopic = new HashMap<>();
+ this.balanceActionHistory = new HashMap<>();
+ }
+
+ public Rack rack(String rackId) {
+ return this.racksById.get(rackId);
+ }
+
+ public Rack addRack(String rackId) {
+ Rack rack = new Rack(rackId);
+ this.racksById.putIfAbsent(rackId, rack);
+ return this.racksById.get(rackId);
+ }
+
+ public SortedSet brokers() {
+ return new TreeSet<>(this.brokersById.values());
+ }
+
+ public Set topics() {
+ return this.partitionsByTopic.keySet();
+ }
+
+ public SortedSet topic(String name) {
+ return new TreeSet<>(this.partitionsByTopic.get(name).values());
+ }
+
+ public SortedSet sortedBrokersFor(Resource resource, boolean reverse) {
+ return sortedBrokersFor(null, resource, reverse);
+ }
+
+ public SortedSet sortedBrokersFor(Predicate super Broker> filter, Resource resource, boolean reverse) {
+ Comparator comparator =
+ Comparator.comparingDouble(b -> b.utilizationFor(resource))
+ .thenComparingInt(Broker::id);
+ if (reverse)
+ comparator = comparator.reversed();
+ SortedSet sortedBrokers = new TreeSet<>(comparator);
+ if (filter == null) {
+ sortedBrokers.addAll(this.brokersById.values());
+ } else {
+ sortedBrokers.addAll(this.brokersById.values().stream()
+ .filter(filter).collect(Collectors.toList()));
+ }
+
+ return sortedBrokers;
+ }
+
+ public Load load() {
+ Load load = new Load();
+ for (Broker broker : this.brokersById.values()) {
+ load.addLoad(broker.load());
+ }
+ return load;
+ }
+
+ public Capacity capacity() {
+ Capacity capacity = new Capacity();
+ for (Broker broker : this.brokersById.values()) {
+ capacity.addCapacity(broker.capacity());
+ }
+ return capacity;
+ }
+
+ public double utilizationFor(Resource resource) {
+ return load().loadFor(resource) / capacity().capacityFor(resource);
+ }
+
+ public double[] avgOfUtilization() {
+ Load load = load();
+ Capacity capacity = capacity();
+ double[] unils = new double[Resource.values().length];
+ for (Resource resource : Resource.values()) {
+ unils[resource.id()] = load.loadFor(resource) / capacity.capacityFor(resource);
+ }
+ return unils;
+ }
+
+ public Broker broker(int brokerId) {
+ return this.brokersById.get(brokerId);
+ }
+
+ public Broker addBroker(String rackId, int brokerId, String host, boolean isOffline, Capacity capacity) {
+ Rack rack = rack(rackId);
+ if (rack == null)
+ throw new IllegalArgumentException("Rack: " + rackId + "is not exists.");
+ Broker broker = new Broker(rack, brokerId, host, isOffline, capacity);
+ rack.addBroker(broker);
+ this.brokersById.put(brokerId, broker);
+ return broker;
+ }
+
+ public Replica addReplica(int brokerId, TopicPartition topicPartition, boolean isLeader, Load load) {
+ return addReplica(brokerId, topicPartition, isLeader, false, load);
+ }
+
+ public Replica addReplica(int brokerId, TopicPartition topicPartition, boolean isLeader, boolean isOffline, Load load) {
+ Broker broker = broker(brokerId);
+ if (broker == null) {
+ throw new IllegalArgumentException("Broker: " + brokerId + "is not exists.");
+ }
+
+ Replica replica = new Replica(broker, topicPartition, isLeader, isOffline);
+ replica.setLoad(load);
+ // add to broker
+ broker.addReplica(replica);
+
+ Map partitions = this.partitionsByTopic
+ .computeIfAbsent(topicPartition.topic(), k -> new HashMap<>());
+
+ Partition partition = partitions.computeIfAbsent(topicPartition, Partition::new);
+ if (isLeader) {
+ partition.addLeader(replica, 0);
+ } else {
+ partition.addFollower(replica, partition.replicas().size());
+ }
+
+ return replica;
+ }
+
+ public Replica removeReplica(int brokerId, TopicPartition topicPartition) {
+ Broker broker = broker(brokerId);
+ return broker.removeReplica(topicPartition);
+ }
+
+ public void relocateLeadership(String goal, String actionType, TopicPartition topicPartition, int sourceBrokerId, int destinationBrokerId) {
+ relocateLeadership(topicPartition, sourceBrokerId, destinationBrokerId);
+ addBalanceActionHistory(goal, actionType, topicPartition, sourceBrokerId, destinationBrokerId);
+ }
+
+ public void relocateLeadership(TopicPartition topicPartition, int sourceBrokerId, int destinationBrokerId) {
+ Broker sourceBroker = broker(sourceBrokerId);
+ Replica sourceReplica = sourceBroker.replica(topicPartition);
+ if (!sourceReplica.isLeader()) {
+ throw new IllegalArgumentException("Cannot relocate leadership of partition " + topicPartition + "from broker "
+ + sourceBrokerId + " to broker " + destinationBrokerId
+ + " because the source replica isn't leader.");
+ }
+ Broker destinationBroker = broker(destinationBrokerId);
+ Replica destinationReplica = destinationBroker.replica(topicPartition);
+ if (destinationReplica.isLeader()) {
+ throw new IllegalArgumentException("Cannot relocate leadership of partition " + topicPartition + "from broker "
+ + sourceBrokerId + " to broker " + destinationBrokerId
+ + " because the destination replica is a leader.");
+ }
+ Load leaderLoadDelta = sourceBroker.makeFollower(topicPartition);
+ destinationBroker.makeLeader(topicPartition, leaderLoadDelta);
+
+ Partition partition = this.partitionsByTopic.get(topicPartition.topic()).get(topicPartition);
+ partition.relocateLeadership(destinationReplica);
+ }
+
+ public void relocateReplica(String goal, String actionType, TopicPartition topicPartition, int sourceBrokerId, int destinationBrokerId) {
+ relocateReplica(topicPartition, sourceBrokerId, destinationBrokerId);
+ addBalanceActionHistory(goal, actionType, topicPartition, sourceBrokerId, destinationBrokerId);
+ }
+
+ public void relocateReplica(TopicPartition topicPartition, int sourceBrokerId, int destinationBrokerId) {
+ Replica replica = removeReplica(sourceBrokerId, topicPartition);
+ if (replica == null) {
+ throw new IllegalArgumentException("Replica is not in the cluster.");
+ }
+ Broker destinationBroker = broker(destinationBrokerId);
+ replica.setBroker(destinationBroker);
+ destinationBroker.addReplica(replica);
+ }
+
+ private void addBalanceActionHistory(String goal, String actionType, TopicPartition topicPartition, int sourceBrokerId, int destinationBrokerId) {
+ BalanceActionHistory history = new BalanceActionHistory();
+ history.setActionType(actionType);
+ history.setGoal(goal);
+ history.setTopic(topicPartition.topic());
+ history.setPartition(topicPartition.partition());
+ history.setSourceBrokerId(sourceBrokerId);
+ history.setDestinationBrokerId(destinationBrokerId);
+ this.balanceActionHistory.computeIfAbsent(topicPartition, k -> new ArrayList<>()).add(history);
+ }
+
+ public Map numLeadersPerTopic(Set topics) {
+ Map leaderCountByTopicNames = new HashMap<>();
+ topics.forEach(topic -> leaderCountByTopicNames.put(topic, partitionsByTopic.get(topic).size()));
+ return leaderCountByTopicNames;
+ }
+
+ public Map> getReplicaDistribution() {
+ Map> replicaDistribution = new HashMap<>();
+ for (Map tp : partitionsByTopic.values()) {
+ tp.values().forEach(i -> {
+ i.replicas().forEach(j -> replicaDistribution.computeIfAbsent(j.topicPartition(), k -> new ArrayList<>())
+ .add(new ReplicaPlacementInfo(j.broker().id(), "")));
+ });
+ }
+ return replicaDistribution;
+ }
+
+ public Replica partition(TopicPartition tp) {
+ return partitionsByTopic.get(tp.topic()).get(tp).leader();
+ }
+
+ public Map getLeaderDistribution() {
+ Map leaderDistribution = new HashMap<>();
+ for (Broker broker : brokersById.values()) {
+ broker.leaderReplicas().forEach(i -> leaderDistribution.put(i.topicPartition(), new ReplicaPlacementInfo(broker.id(), "")));
+ }
+ return leaderDistribution;
+ }
+
+ public int numTopicReplicas(String topic) {
+ return partitionsByTopic.get(topic).size();
+ }
+
+ public Map> balanceActionHistory() {
+ return this.balanceActionHistory;
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Load.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Load.java
new file mode 100644
index 000000000..ec8b2c174
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Load.java
@@ -0,0 +1,42 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.model;
+
+import java.util.Arrays;
+
+/**
+ * @author leewei
+ * @date 2022/5/9
+ */
+public class Load {
+ private final double[] values;
+
+ public Load() {
+ this.values = new double[Resource.values().length];
+ }
+
+ public void setLoad(Resource resource, double load) {
+ this.values[resource.id()] = load;
+ }
+
+ public double loadFor(Resource resource) {
+ return this.values[resource.id()];
+ }
+
+ public void addLoad(Load loadToAdd) {
+ for (Resource resource : Resource.values()) {
+ this.setLoad(resource, this.loadFor(resource) + loadToAdd.loadFor(resource));
+ }
+ }
+
+ public void subtractLoad(Load loadToSubtract) {
+ for (Resource resource : Resource.values()) {
+ this.setLoad(resource, this.loadFor(resource) - loadToSubtract.loadFor(resource));
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "Load{" +
+ "values=" + Arrays.toString(values) +
+ '}';
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Partition.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Partition.java
new file mode 100644
index 000000000..6783e6a41
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Partition.java
@@ -0,0 +1,148 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.model;
+
+import org.apache.kafka.common.TopicPartition;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import java.util.stream.Collectors;
+
+/**
+ * @author leewei
+ * @date 2022/5/11
+ */
+public class Partition implements Comparable {
+ private final TopicPartition topicPartition;
+ private final List replicas;
+
+ public Partition(TopicPartition topicPartition) {
+ this.topicPartition = topicPartition;
+ this.replicas = new ArrayList<>();
+ }
+
+ public TopicPartition topicPartition() {
+ return topicPartition;
+ }
+
+ public List replicas() {
+ return replicas;
+ }
+
+ public Broker originalLeaderBroker() {
+ return replicas.stream().filter(r -> r.original().isLeader())
+ .findFirst().orElseThrow(IllegalStateException::new).broker();
+ }
+
+ public Replica leader() {
+ return replicas.stream()
+ .filter(Replica::isLeader)
+ .findFirst()
+ .orElseThrow(() ->
+ new IllegalArgumentException("Not found leader of partition " + topicPartition)
+ );
+ }
+
+ public Replica leaderOrNull() {
+ return replicas.stream()
+ .filter(Replica::isLeader)
+ .findFirst()
+ .orElse(null);
+ }
+
+ public List followers() {
+ return replicas.stream()
+ .filter(r -> !r.isLeader())
+ .collect(Collectors.toList());
+ }
+
+ Replica replica(long brokerId) {
+ return replicas.stream()
+ .filter(r -> r.broker().id() == brokerId)
+ .findFirst()
+ .orElseThrow(() ->
+ new IllegalArgumentException("Requested replica " + brokerId + " is not a replica of partition " + topicPartition)
+ );
+ }
+
+ public boolean isLeaderChanged() {
+ // return originalLeaderBroker() != this.leader().broker();
+ return replicas.stream().anyMatch(Replica::isLeaderChanged);
+ }
+
+ public boolean isChanged() {
+ return replicas.stream().anyMatch(Replica::isChanged);
+ }
+
+ void addLeader(Replica leader, int index) {
+ if (leaderOrNull() != null) {
+ throw new IllegalArgumentException(String.format("Partition %s already has a leader replica %s. Cannot "
+ + "add a new leader replica %s", this.topicPartition, leaderOrNull(), leader));
+ }
+ if (!leader.isLeader()) {
+ throw new IllegalArgumentException("Inconsistent leadership information. Trying to set " + leader.broker()
+ + " as the leader for partition " + this.topicPartition + " while the replica is not marked "
+ + "as a leader.");
+ }
+ this.replicas.add(index, leader);
+ }
+
+ void addFollower(Replica follower, int index) {
+ if (follower.isLeader()) {
+ throw new IllegalArgumentException("Inconsistent leadership information. Trying to add follower replica "
+ + follower + " while it is a leader.");
+ }
+ if (!follower.topicPartition().equals(this.topicPartition)) {
+ throw new IllegalArgumentException("Inconsistent topic partition. Trying to add follower replica " + follower
+ + " to partition " + this.topicPartition + ".");
+ }
+ this.replicas.add(index, follower);
+ }
+
+ void relocateLeadership(Replica newLeader) {
+ if (!newLeader.isLeader()) {
+ throw new IllegalArgumentException("Inconsistent leadership information. Trying to set " + newLeader.broker()
+ + " as the leader for partition " + this.topicPartition + " while the replica is not marked "
+ + "as a leader.");
+ }
+ int leaderPos = this.replicas.indexOf(newLeader);
+ swapReplicaPositions(0, leaderPos);
+ }
+
+ void swapReplicaPositions(int index1, int index2) {
+ Replica replica1 = this.replicas.get(index1);
+ Replica replica2 = this.replicas.get(index2);
+
+ this.replicas.set(index2, replica1);
+ this.replicas.set(index1, replica2);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ Partition partition = (Partition) o;
+ return topicPartition.equals(partition.topicPartition);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(topicPartition);
+ }
+
+ @Override
+ public String toString() {
+ return "Partition{" +
+ "topicPartition=" + topicPartition +
+ ", replicas=" + replicas +
+ ", originalLeaderBroker=" + originalLeaderBroker().id() +
+ ", leader=" + leaderOrNull() +
+ '}';
+ }
+
+
+
+ @Override
+ public int compareTo(Partition o) {
+ return Integer.compare(topicPartition.partition(), o.topicPartition.partition());
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Rack.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Rack.java
new file mode 100644
index 000000000..028c87477
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Rack.java
@@ -0,0 +1,67 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.model;
+
+import java.util.*;
+
+/**
+ * @author leewei
+ * @date 2022/5/9
+ */
+public class Rack {
+ private final String id;
+ private final SortedSet brokers;
+
+ public Rack(String id) {
+ this.id = id;
+ this.brokers = new TreeSet<>();
+ }
+
+ public String id() {
+ return id;
+ }
+
+ public SortedSet brokers() {
+ return Collections.unmodifiableSortedSet(this.brokers);
+ }
+
+ public Load load() {
+ Load load = new Load();
+ for (Broker broker : this.brokers) {
+ load.addLoad(broker.load());
+ }
+ return load;
+ }
+
+ public List replicas() {
+ List replicas = new ArrayList<>();
+ for (Broker broker : this.brokers) {
+ replicas.addAll(broker.replicas());
+ }
+ return replicas;
+ }
+
+ Broker addBroker(Broker broker) {
+ this.brokers.add(broker);
+ return broker;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ Rack rack = (Rack) o;
+ return Objects.equals(id, rack.id);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(id);
+ }
+
+ @Override
+ public String toString() {
+ return "Rack{" +
+ "id='" + id + '\'' +
+ ", brokers=" + brokers +
+ '}';
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Replica.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Replica.java
new file mode 100644
index 000000000..37b1156ff
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Replica.java
@@ -0,0 +1,129 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.model;
+
+import org.apache.kafka.common.TopicPartition;
+
+import java.util.Objects;
+
+/**
+ * @author leewei
+ * @date 2022/4/29
+ */
+public class Replica {
+ private final Load load;
+ private final Replica original;
+ private final TopicPartition topicPartition;
+ private Broker broker;
+ private boolean isLeader;
+ private boolean isOffline;
+
+ public Replica(Broker broker, TopicPartition topicPartition, boolean isLeader, boolean isOffline) {
+ this(broker, topicPartition, isLeader, isOffline, false);
+ }
+
+ private Replica(Broker broker, TopicPartition topicPartition, boolean isLeader, boolean isOffline, boolean isOriginal) {
+ if (isOriginal) {
+ this.original = null;
+ } else {
+ this.original = new Replica(broker, topicPartition, isLeader, isOffline, true);
+ }
+ this.load = new Load();
+ this.topicPartition = topicPartition;
+ this.broker = broker;
+ this.isLeader = isLeader;
+ this.isOffline = isOffline;
+ }
+
+ public TopicPartition topicPartition() {
+ return topicPartition;
+ }
+
+ public Replica original() {
+ return original;
+ }
+
+ public Broker broker() {
+ return broker;
+ }
+
+ public void setBroker(Broker broker) {
+ checkOriginal();
+ this.broker = broker;
+ }
+
+ public boolean isLeader() {
+ return isLeader;
+ }
+
+ public Load load() {
+ return load;
+ }
+
+ void setLoad(Load load) {
+ checkOriginal();
+ this.load.addLoad(load);
+ }
+
+ Load makeFollower() {
+ checkOriginal();
+ this.isLeader = false;
+ // TODO cpu recal
+ Load leaderLoadDelta = new Load();
+ leaderLoadDelta.setLoad(Resource.NW_OUT, this.load.loadFor(Resource.NW_OUT));
+ this.load.subtractLoad(leaderLoadDelta);
+ return leaderLoadDelta;
+ }
+
+ void makeLeader(Load leaderLoadDelta) {
+ checkOriginal();
+ this.isLeader = true;
+ this.load.addLoad(leaderLoadDelta);
+ }
+
+ public boolean isLeaderChanged() {
+ checkOriginal();
+ return this.original.isLeader != this.isLeader;
+ }
+
+ public boolean isChanged() {
+ checkOriginal();
+ return this.original.broker != this.broker || this.original.isLeader != this.isLeader;
+ }
+
+ private void checkOriginal() {
+ if (this.original == null) {
+ throw new IllegalStateException("This is a original replica, this operation is not supported.");
+ }
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ checkOriginal();
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ Replica replica = (Replica) o;
+ return topicPartition.equals(replica.topicPartition) && this.original.broker.equals(replica.original.broker);
+ }
+
+ @Override
+ public int hashCode() {
+ checkOriginal();
+ return Objects.hash(topicPartition, this.original.broker);
+ }
+
+ @Override
+ public String toString() {
+ checkOriginal();
+ return "Replica{" +
+ "topicPartition=" + topicPartition +
+ ", originalBroker=" + this.original.broker.id() +
+ ", broker=" + broker.id() +
+ ", originalIsLeader=" + this.original.isLeader +
+ ", isLeader=" + isLeader +
+ ", load=" + load +
+ '}';
+ }
+ //todo:副本状态,待考虑
+ public boolean isCurrentOffline() {
+ return isOffline;
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/ReplicaPlacementInfo.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/ReplicaPlacementInfo.java
new file mode 100644
index 000000000..0364c3928
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/ReplicaPlacementInfo.java
@@ -0,0 +1,48 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.model;
+
+import java.util.Objects;
+
+public class ReplicaPlacementInfo {
+ private final int _brokerId;
+ private final String _logdir;
+
+ public ReplicaPlacementInfo(int brokerId, String logdir) {
+ _brokerId = brokerId;
+ _logdir = logdir;
+ }
+
+ public ReplicaPlacementInfo(Integer brokerId) {
+ this(brokerId, null);
+ }
+
+ public Integer brokerId() {
+ return _brokerId;
+ }
+
+ public String logdir() {
+ return _logdir;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (!(o instanceof ReplicaPlacementInfo)) {
+ return false;
+ }
+ ReplicaPlacementInfo info = (ReplicaPlacementInfo) o;
+ return _brokerId == info._brokerId && Objects.equals(_logdir, info._logdir);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(_brokerId, _logdir);
+ }
+
+ @Override
+ public String toString() {
+ if (_logdir == null) {
+ return String.format("{Broker: %d}", _brokerId);
+ } else {
+ return String.format("{Broker: %d, Logdir: %s}", _brokerId, _logdir);
+ }
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Resource.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Resource.java
new file mode 100644
index 000000000..ff31a0c17
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Resource.java
@@ -0,0 +1,29 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.model;
+
+/**
+ * @author leewei
+ * @date 2022/5/10
+ */
+public enum Resource {
+ CPU("cpu", 0),
+ NW_IN("bytesIn", 1),
+ NW_OUT("bytesOut", 2),
+ DISK("disk", 3);
+
+ private final String resource;
+ private final int id;
+
+ Resource(String resource, int id) {
+ this.resource = resource;
+ this.id = id;
+ }
+
+ public String resource() {
+ return this.resource;
+ }
+
+ public int id() {
+ return this.id;
+ }
+
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Supplier.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Supplier.java
new file mode 100644
index 000000000..70db965c0
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Supplier.java
@@ -0,0 +1,112 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.model;
+
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.metric.MetricStore;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.metric.Metrics;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.metric.elasticsearch.ElasticsearchMetricStore;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.utils.MetadataUtils;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.common.Cluster;
+import org.apache.kafka.common.Node;
+import org.apache.kafka.common.PartitionInfo;
+import org.apache.kafka.common.TopicPartition;
+
+import java.util.*;
+import java.util.stream.Collectors;
+
+/**
+ * @author leewei
+ * @date 2022/5/12
+ */
+public class Supplier {
+ public static Map subConfig(Map config, String prefix, boolean stripPrefix) {
+ return config.entrySet().stream()
+ .filter(e -> e.getKey().startsWith(prefix))
+ .collect(Collectors.toMap(e -> stripPrefix ? e.getKey().substring(prefix.length()) : e.getKey(),
+ Map.Entry::getValue));
+ }
+
+ public static ClusterModel load(String clusterName, int beforeSeconds, String kafkaBootstrapServer, String esUrls, String esPassword, String esIndexPrefix, Map capacitiesById, Set ignoredTopics) {
+ Properties kafkaProperties = new Properties();
+ kafkaProperties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBootstrapServer);
+ return load(clusterName, beforeSeconds, kafkaProperties, esUrls, esPassword, esIndexPrefix, capacitiesById, ignoredTopics);
+ }
+
+ public static ClusterModel load(String clusterName, int beforeSeconds, Properties kafkaProperties, String esUrls, String esPassword, String esIndexPrefix, Map capacitiesById, Set ignoredTopics) {
+ MetricStore store = new ElasticsearchMetricStore(esUrls, esPassword, esIndexPrefix);
+ Metrics metrics = store.getMetrics(clusterName, beforeSeconds);
+ return load(kafkaProperties, capacitiesById, metrics, ignoredTopics);
+ }
+
+ public static ClusterModel load(Properties kafkaProperties, Map capacitiesById, Metrics metrics, Set ignoredTopics) {
+ ClusterModel model = new ClusterModel();
+ Cluster cluster = MetadataUtils.metadata(kafkaProperties);
+
+ // nodes
+ for (Node node: cluster.nodes()) {
+ addBroker(node, false, model, capacitiesById);
+ }
+
+ // replicas
+ cluster.topics()
+ .stream()
+ .filter(topic -> !ignoredTopics.contains(topic))
+ .forEach(topic -> {
+ List partitions = cluster.partitionsForTopic(topic);
+ for (PartitionInfo partition : partitions) {
+ // TODO fix ignore no partition leader
+ if (partition.leader() == null) {
+ continue;
+ }
+
+ TopicPartition topicPartition = new TopicPartition(partition.topic(), partition.partition());
+ Load leaderLoad = metrics.load(topicPartition);
+ if (leaderLoad == null) {
+ if (partition.leader() == null) {
+ // set empty load
+ leaderLoad = new Load();
+ } else {
+ throw new IllegalArgumentException("Cannot get leader load of topic partiton: " + topicPartition);
+ }
+ }
+
+ // leader nw out + follower nw out
+ leaderLoad.setLoad(Resource.NW_OUT,
+ leaderLoad.loadFor(Resource.NW_OUT) +
+ leaderLoad.loadFor(Resource.NW_IN) * (partition.replicas().length - 1));
+
+ Load followerLoad = new Load();
+ followerLoad.addLoad(leaderLoad);
+ followerLoad.setLoad(Resource.NW_OUT, 0);
+ List offlineReplicas = Arrays.asList(partition.offlineReplicas());
+ for (Node n : partition.replicas()) {
+ boolean isLeader = partition.leader() != null && partition.leader().equals(n);
+ boolean isOffline = offlineReplicas.contains(n);
+ if (isOffline) {
+ if (model.broker(n.id()) == null) {
+ // add offline broker
+ addBroker(n, true, model, capacitiesById);
+ }
+ }
+ model.addReplica(n.id(), topicPartition, isLeader, isOffline, isLeader ? leaderLoad : followerLoad);
+ }
+ }
+ });
+ return model;
+ }
+
+ private static String rack(Node node) {
+ return (node.rack() == null || "".equals(node.rack())) ? node.host() : node.rack();
+ }
+
+ private static void addBroker(Node node, boolean isOffline, ClusterModel model, Map capacitiesById) {
+ // rack
+ Rack rack = model.addRack(rack(node));
+ // broker
+ Capacity capacity = capacitiesById.get(node.id());
+ if (capacity == null)
+ throw new IllegalArgumentException("Cannot get capacity of node: " + node);
+
+ model.addBroker(rack.id(), node.id(), node.host(), isOffline, capacity);
+ }
+
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/ActionAcceptance.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/ActionAcceptance.java
new file mode 100644
index 000000000..34105557c
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/ActionAcceptance.java
@@ -0,0 +1,5 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer;
+
+public enum ActionAcceptance {
+ ACCEPT, REJECT;
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/ActionType.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/ActionType.java
new file mode 100644
index 000000000..91ddb58f9
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/ActionType.java
@@ -0,0 +1,18 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer;
+
+public enum ActionType {
+ REPLICA_MOVEMENT("REPLICA"),
+ LEADERSHIP_MOVEMENT("LEADER");
+// REPLICA_SWAP("SWAP");
+
+ private final String _balancingAction;
+
+ ActionType(String balancingAction) {
+ _balancingAction = balancingAction;
+ }
+
+ @Override
+ public String toString() {
+ return _balancingAction;
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/AnalyzerUtils.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/AnalyzerUtils.java
new file mode 100644
index 000000000..f327bfb99
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/AnalyzerUtils.java
@@ -0,0 +1,73 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer;
+
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.ClusterModel;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Replica;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.ReplicaPlacementInfo;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Resource;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.goals.Goal;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.kafka.common.TopicPartition;
+
+import java.util.*;
+import java.util.stream.Collectors;
+
+import static com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.ActionAcceptance.ACCEPT;
+
+public class AnalyzerUtils {
+
+ public static Set getSplitTopics(String value) {
+ if (StringUtils.isBlank(value)) {
+ return new HashSet<>();
+ }
+ String[] arr = value.split(",");
+ return Arrays.stream(arr).collect(Collectors.toSet());
+ }
+
+ public static Set getSplitBrokers(String value) {
+ if (StringUtils.isBlank(value)) {
+ return new HashSet<>();
+ }
+ String[] arr = value.split(",");
+ return Arrays.stream(arr).map(Integer::valueOf).collect(Collectors.toSet());
+ }
+
+ public static Set getDiff(Map> initialReplicaDistribution,
+ Map initialLeaderDistribution,
+ ClusterModel optimizedClusterModel) {
+ Map> finalReplicaDistribution = optimizedClusterModel.getReplicaDistribution();
+ if (!initialReplicaDistribution.keySet().equals(finalReplicaDistribution.keySet())) {
+ throw new IllegalArgumentException("diff distributions with different partitions.");
+ }
+ Set diff = new HashSet<>();
+ for (Map.Entry> entry : initialReplicaDistribution.entrySet()) {
+ TopicPartition tp = entry.getKey();
+ List initialReplicas = entry.getValue();
+ List finalReplicas = finalReplicaDistribution.get(tp);
+ Replica finalLeader = optimizedClusterModel.partition(tp);
+ ReplicaPlacementInfo finalLeaderPlacementInfo = new ReplicaPlacementInfo(finalLeader.broker().id(), "");
+ if (finalReplicas.equals(initialReplicas) && initialLeaderDistribution.get(tp).equals(finalLeaderPlacementInfo)) {
+ continue;
+ }
+ if (!finalLeaderPlacementInfo.equals(finalReplicas.get(0))) {
+ int leaderPos = finalReplicas.indexOf(finalLeaderPlacementInfo);
+ finalReplicas.set(leaderPos, finalReplicas.get(0));
+ finalReplicas.set(0, finalLeaderPlacementInfo);
+ }
+ double partitionSize = optimizedClusterModel.partition(tp).load().loadFor(Resource.DISK);
+ diff.add(new ExecutionProposal(tp, partitionSize, initialLeaderDistribution.get(tp), initialReplicas, finalReplicas));
+ }
+ return diff;
+ }
+
+ public static ActionAcceptance isProposalAcceptableForOptimizedGoals(Set optimizedGoals,
+ BalancingAction proposal,
+ ClusterModel clusterModel) {
+ for (Goal optimizedGoal : optimizedGoals) {
+ ActionAcceptance actionAcceptance = optimizedGoal.actionAcceptance(proposal, clusterModel);
+ if (actionAcceptance != ACCEPT) {
+ return actionAcceptance;
+ }
+ }
+ return ACCEPT;
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/BalancingAction.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/BalancingAction.java
new file mode 100644
index 000000000..feafda152
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/BalancingAction.java
@@ -0,0 +1,40 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer;
+
+import org.apache.kafka.common.TopicPartition;
+
+public class BalancingAction {
+ private final TopicPartition _tp;
+ private final Integer _sourceBrokerId;
+ private final Integer _destinationBrokerId;
+ private final ActionType _actionType;
+
+ public BalancingAction(TopicPartition tp,
+ Integer sourceBrokerId,
+ Integer destinationBrokerId,
+ ActionType actionType) {
+ _tp = tp;
+ _sourceBrokerId = sourceBrokerId;
+ _destinationBrokerId = destinationBrokerId;
+ _actionType = actionType;
+ }
+
+ public Integer sourceBrokerId() {
+ return _sourceBrokerId;
+ }
+
+ public Integer destinationBrokerId() {
+ return _destinationBrokerId;
+ }
+
+ public ActionType balancingAction() {
+ return _actionType;
+ }
+
+ public TopicPartition topicPartition() {
+ return _tp;
+ }
+
+ public String topic() {
+ return _tp.topic();
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/ExecutionProposal.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/ExecutionProposal.java
new file mode 100644
index 000000000..90e66e6cf
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/ExecutionProposal.java
@@ -0,0 +1,72 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer;
+
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.ReplicaPlacementInfo;
+import org.apache.kafka.common.TopicPartition;
+
+import java.util.*;
+import java.util.stream.Collectors;
+
+public class ExecutionProposal {
+
+ private final TopicPartition _tp;
+ private final double _partitionSize;
+ private final ReplicaPlacementInfo _oldLeader;
+ private final List _oldReplicas;
+ private final List _newReplicas;
+ private final Set _replicasToAdd;
+ private final Set _replicasToRemove;
+
+ public ExecutionProposal(TopicPartition tp,
+ double partitionSize,
+ ReplicaPlacementInfo oldLeader,
+ List oldReplicas,
+ List newReplicas) {
+ _tp = tp;
+ _partitionSize = partitionSize;
+ _oldLeader = oldLeader;
+ _oldReplicas = oldReplicas == null ? Collections.emptyList() : oldReplicas;
+ _newReplicas = newReplicas;
+ Set newBrokerList = _newReplicas.stream().mapToInt(ReplicaPlacementInfo::brokerId).boxed().collect(Collectors.toSet());
+ Set oldBrokerList = _oldReplicas.stream().mapToInt(ReplicaPlacementInfo::brokerId).boxed().collect(Collectors.toSet());
+ _replicasToAdd = _newReplicas.stream().filter(r -> !oldBrokerList.contains(r.brokerId())).collect(Collectors.toSet());
+ _replicasToRemove = _oldReplicas.stream().filter(r -> !newBrokerList.contains(r.brokerId())).collect(Collectors.toSet());
+ }
+
+ public TopicPartition tp() {
+ return _tp;
+ }
+
+ public double partitionSize() {
+ return _partitionSize;
+ }
+
+ public ReplicaPlacementInfo oldLeader() {
+ return _oldLeader;
+ }
+
+ public List oldReplicas() {
+ return _oldReplicas;
+ }
+
+ public List newReplicas() {
+ return _newReplicas;
+ }
+
+ public Map replicasToAdd() {
+ Map addData = new HashMap<>();
+ _replicasToAdd.forEach(i -> {
+ Double[] total = {1d, _partitionSize};
+ addData.put(i.brokerId(), total);
+ });
+ return Collections.unmodifiableMap(addData);
+ }
+
+ public Map replicasToRemove() {
+ Map removeData = new HashMap<>();
+ _replicasToRemove.forEach(i -> {
+ Double[] total = {1d, _partitionSize};
+ removeData.put(i.brokerId(), total);
+ });
+ return Collections.unmodifiableMap(removeData);
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/GoalOptimizer.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/GoalOptimizer.java
new file mode 100644
index 000000000..2f37ca7ae
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/GoalOptimizer.java
@@ -0,0 +1,48 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer;
+
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common.OptimizerResult;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.ClusterModel;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.ReplicaPlacementInfo;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.goals.Goal;
+import org.apache.kafka.common.TopicPartition;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.*;
+
+/**
+ * @author leewei
+ * @date 2022/4/29
+ */
+public class GoalOptimizer {
+ private static final Logger logger = LoggerFactory.getLogger(GoalOptimizer.class);
+
+ public OptimizerResult optimizations(ClusterModel clusterModel, OptimizationOptions optimizationOptions) {
+ Set optimizedGoals = new HashSet<>();
+ OptimizerResult optimizerResult = new OptimizerResult(clusterModel, optimizationOptions);
+ optimizerResult.setBalanceBrokersFormBefore(clusterModel.brokers());
+ Map> initReplicaDistribution = clusterModel.getReplicaDistribution();
+ Map initLeaderDistribution = clusterModel.getLeaderDistribution();
+ try {
+ Map goalMap = new HashMap<>();
+ ServiceLoader serviceLoader = ServiceLoader.load(Goal.class);
+ for (Goal goal : serviceLoader) {
+ goalMap.put(goal.name(), goal);
+ }
+ for (String g : optimizationOptions.goals()) {
+ Goal goal = goalMap.get(g);
+ if (goal != null) {
+ logger.info("Start {} balancing", goal.name());
+ goal.optimize(clusterModel, optimizedGoals, optimizationOptions);
+ optimizedGoals.add(goal);
+ }
+ }
+ } catch (Exception e) {
+ logger.error("Cluster balancing goal error", e);
+ }
+ Set proposals = AnalyzerUtils.getDiff(initReplicaDistribution, initLeaderDistribution, clusterModel);
+ optimizerResult.setBalanceBrokersFormAfter(clusterModel.brokers());
+ optimizerResult.setExecutionProposal(proposals);
+ return optimizerResult;
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/OptimizationOptions.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/OptimizationOptions.java
new file mode 100644
index 000000000..53befbeea
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/OptimizationOptions.java
@@ -0,0 +1,60 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer;
+
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common.BalanceParameter;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Resource;
+
+import java.util.*;
+
+public class OptimizationOptions {
+ private final Set _excludedTopics;
+ private final Set _offlineBrokers;
+ private final Set _balanceBrokers;
+ private final Map _resourceBalancePercentage;
+ private final List _goals;
+ private final BalanceParameter _parameter;
+
+ public OptimizationOptions(BalanceParameter parameter) {
+ _parameter = parameter;
+ _goals = parameter.getGoals();
+ _excludedTopics = AnalyzerUtils.getSplitTopics(parameter.getExcludedTopics());
+ _offlineBrokers = AnalyzerUtils.getSplitBrokers(parameter.getOfflineBrokers());
+ _balanceBrokers = AnalyzerUtils.getSplitBrokers(parameter.getBalanceBrokers());
+ _resourceBalancePercentage = new HashMap<>();
+ _resourceBalancePercentage.put(Resource.CPU, parameter.getCpuThreshold());
+ _resourceBalancePercentage.put(Resource.DISK, parameter.getDiskThreshold());
+ _resourceBalancePercentage.put(Resource.NW_IN, parameter.getNetworkInThreshold());
+ _resourceBalancePercentage.put(Resource.NW_OUT, parameter.getNetworkOutThreshold());
+ }
+
+ public Set excludedTopics() {
+ return Collections.unmodifiableSet(_excludedTopics);
+ }
+
+ public Set offlineBrokers() {
+ return Collections.unmodifiableSet(_offlineBrokers);
+ }
+
+ public Set balanceBrokers() {
+ return Collections.unmodifiableSet(_balanceBrokers);
+ }
+
+ public double resourceBalancePercentageFor(Resource resource) {
+ return _resourceBalancePercentage.get(resource);
+ }
+
+ public List goals() {
+ return Collections.unmodifiableList(_goals);
+ }
+
+ public double topicReplicaThreshold() {
+ return _parameter.getTopicReplicaThreshold();
+ }
+
+ public BalanceParameter parameter() {
+ return _parameter;
+ }
+
+ public double topicLeaderThreshold() {
+ return _parameter.getTopicLeaderThreshold();
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/AbstractGoal.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/AbstractGoal.java
new file mode 100644
index 000000000..0916a228c
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/AbstractGoal.java
@@ -0,0 +1,129 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.goals;
+
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Broker;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.ClusterModel;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Replica;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.*;
+
+import java.util.*;
+import java.util.stream.Collectors;
+
+public abstract class AbstractGoal implements Goal {
+
+ /**
+ * 均衡算法逻辑处理
+ */
+ protected abstract void rebalanceForBroker(Broker broker, ClusterModel clusterModel, Set optimizedGoals, OptimizationOptions optimizationOptions);
+
+ /**
+ * 集群列表中的所有Broker循环执行均衡算法
+ */
+ @Override
+ public void optimize(ClusterModel clusterModel, Set optimizedGoals, OptimizationOptions optimizationOptions) {
+ initGoalState(clusterModel, optimizationOptions);
+ SortedSet brokenBrokers = clusterModel.brokers().stream()
+ .filter(b -> optimizationOptions.balanceBrokers().isEmpty()
+ || optimizationOptions.balanceBrokers().contains(b.id()))
+ .collect(Collectors.toCollection(TreeSet::new));
+
+ // SortedSet brokenBrokers = clusterModel.brokers();
+
+ for (Broker broker : brokenBrokers) {
+ rebalanceForBroker(broker, clusterModel, optimizedGoals, optimizationOptions);
+ }
+ }
+
+ protected abstract void initGoalState(ClusterModel clusterModel, OptimizationOptions optimizationOptions);
+
+ /**
+ * 根据已经计算完的均衡副本、候选目标Broker、执行类型来
+ * 执行不同的集群模型数据更改操作
+ */
+ protected Broker maybeApplyBalancingAction(ClusterModel clusterModel,
+ Replica replica,
+ Collection candidateBrokers,
+ ActionType action,
+ Set optimizedGoals,
+ OptimizationOptions optimizationOptions) {
+ List eligibleBrokers = eligibleBrokers(replica, candidateBrokers, action, optimizationOptions);
+ for (Broker broker : eligibleBrokers) {
+ BalancingAction proposal = new BalancingAction(replica.topicPartition(), replica.broker().id(), broker.id(), action);
+ //均衡的副本如果存在当前的Broker上则进行下次Broker
+ if (!legitMove(replica, broker, action)) {
+ continue;
+ }
+ //均衡条件已经满足进行下次Broker
+ if (!selfSatisfied(clusterModel, proposal)) {
+ continue;
+ }
+ //判断当前均衡操作是否与其他目标冲突,如果冲突则禁止均衡操作
+ ActionAcceptance acceptance = AnalyzerUtils.isProposalAcceptableForOptimizedGoals(optimizedGoals, proposal, clusterModel);
+ if (acceptance == ActionAcceptance.ACCEPT) {
+ if (action == ActionType.LEADERSHIP_MOVEMENT) {
+ clusterModel.relocateLeadership(name(), action.toString(), replica.topicPartition(), replica.broker().id(), broker.id());
+ } else if (action == ActionType.REPLICA_MOVEMENT) {
+ clusterModel.relocateReplica(name(), action.toString(), replica.topicPartition(), replica.broker().id(), broker.id());
+ }
+ return broker;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * 副本操作合法性判断:
+ * 1.副本迁移,目的broker不包含移动副本
+ * 2.Leader切换,目的broker需要包含切换副本
+ */
+ private static boolean legitMove(Replica replica,
+ Broker destinationBroker, ActionType actionType) {
+ switch (actionType) {
+ case REPLICA_MOVEMENT:
+ return destinationBroker.replica(replica.topicPartition()) == null;
+ case LEADERSHIP_MOVEMENT:
+ return replica.isLeader() && destinationBroker.replica(replica.topicPartition()) != null;
+ default:
+ return false;
+ }
+ }
+
+ protected abstract boolean selfSatisfied(ClusterModel clusterModel, BalancingAction action);
+
+ /**
+ * 候选Broker列表筛选过滤
+ */
+ public static List eligibleBrokers(Replica replica,
+ Collection candidates,
+ ActionType action,
+ OptimizationOptions optimizationOptions) {
+ List eligibleBrokers = new ArrayList<>(candidates);
+ filterOutBrokersExcludedForLeadership(eligibleBrokers, optimizationOptions, replica, action);
+ filterOutBrokersExcludedForReplicaMove(eligibleBrokers, optimizationOptions, action);
+ return eligibleBrokers;
+ }
+
+ /**
+ * Leader切换,从候选的Broker列表中排除掉excludedBroker
+ */
+ public static void filterOutBrokersExcludedForLeadership(List eligibleBrokers,
+ OptimizationOptions optimizationOptions,
+ Replica replica,
+ ActionType action) {
+ Set excludedBrokers = optimizationOptions.offlineBrokers();
+ if (!excludedBrokers.isEmpty() && (action == ActionType.LEADERSHIP_MOVEMENT || replica.isLeader())) {
+ eligibleBrokers.removeIf(broker -> excludedBrokers.contains(broker.id()));
+ }
+ }
+
+ /**
+ * 副本迁移,从候选的Broker列表中排除掉excludedBroker
+ */
+ public static void filterOutBrokersExcludedForReplicaMove(List eligibleBrokers,
+ OptimizationOptions optimizationOptions,
+ ActionType action) {
+ Set excludedBrokers = optimizationOptions.offlineBrokers();
+ if (!excludedBrokers.isEmpty() && action == ActionType.REPLICA_MOVEMENT) {
+ eligibleBrokers.removeIf(broker -> excludedBrokers.contains(broker.id()));
+ }
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/DiskDistributionGoal.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/DiskDistributionGoal.java
new file mode 100644
index 000000000..7b1eb8197
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/DiskDistributionGoal.java
@@ -0,0 +1,31 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.goals;
+
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.ClusterModel;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Resource;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.ActionAcceptance;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.ActionType;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.BalancingAction;
+
+/**
+ * @author leewei
+ * @date 2022/5/24
+ */
+public class DiskDistributionGoal extends ResourceDistributionGoal {
+
+ @Override
+ protected Resource resource() {
+ return Resource.DISK;
+ }
+
+ @Override
+ public String name() {
+ return DiskDistributionGoal.class.getSimpleName();
+ }
+
+ @Override
+ public ActionAcceptance actionAcceptance(BalancingAction action, ClusterModel clusterModel) {
+ // Leadership movement won't cause disk utilization change.
+ return action.balancingAction() == ActionType.LEADERSHIP_MOVEMENT ? ActionAcceptance.ACCEPT : super.actionAcceptance(action, clusterModel);
+ }
+
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/Goal.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/Goal.java
new file mode 100644
index 000000000..2c1665f04
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/Goal.java
@@ -0,0 +1,17 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.goals;
+
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.ClusterModel;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.ActionAcceptance;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.BalancingAction;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.OptimizationOptions;
+
+import java.util.Set;
+
+public interface Goal {
+
+ void optimize(ClusterModel clusterModel, Set optimizedGoals, OptimizationOptions optimizationOptions);
+
+ String name();
+
+ ActionAcceptance actionAcceptance(BalancingAction action, ClusterModel clusterModel);
+}
\ No newline at end of file
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/NetworkInboundDistributionGoal.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/NetworkInboundDistributionGoal.java
new file mode 100644
index 000000000..2d1657c23
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/NetworkInboundDistributionGoal.java
@@ -0,0 +1,30 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.goals;
+
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.ClusterModel;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Resource;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.ActionAcceptance;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.ActionType;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.BalancingAction;
+
+/**
+ * @author leewei
+ * @date 2022/5/20
+ */
+public class NetworkInboundDistributionGoal extends ResourceDistributionGoal {
+
+ @Override
+ protected Resource resource() {
+ return Resource.NW_IN;
+ }
+
+ @Override
+ public String name() {
+ return NetworkInboundDistributionGoal.class.getSimpleName();
+ }
+
+ @Override
+ public ActionAcceptance actionAcceptance(BalancingAction action, ClusterModel clusterModel) {
+ // Leadership movement won't cause inbound network utilization change.
+ return action.balancingAction() == ActionType.LEADERSHIP_MOVEMENT ? ActionAcceptance.ACCEPT : super.actionAcceptance(action, clusterModel);
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/NetworkOutboundDistributionGoal.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/NetworkOutboundDistributionGoal.java
new file mode 100644
index 000000000..fb193f4c0
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/NetworkOutboundDistributionGoal.java
@@ -0,0 +1,22 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.goals;
+
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Resource;
+
+/**
+ * @author leewei
+ * @date 2022/5/24
+ */
+public class NetworkOutboundDistributionGoal extends ResourceDistributionGoal {
+
+ @Override
+ protected Resource resource() {
+ return Resource.NW_OUT;
+ }
+
+ @Override
+ public String name() {
+ return NetworkOutboundDistributionGoal.class.getSimpleName();
+ }
+
+}
+
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/ResourceDistributionGoal.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/ResourceDistributionGoal.java
new file mode 100644
index 000000000..8109859a8
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/ResourceDistributionGoal.java
@@ -0,0 +1,227 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.goals;
+
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.*;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.ActionAcceptance;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.ActionType;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.BalancingAction;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.OptimizationOptions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Set;
+import java.util.SortedSet;
+
+/**
+ * @author leewei
+ * @date 2022/5/20
+ */
+public abstract class ResourceDistributionGoal extends AbstractGoal {
+ private static final Logger logger = LoggerFactory.getLogger(ResourceDistributionGoal.class);
+ private double balanceUpperThreshold;
+ private double balanceLowerThreshold;
+
+ @Override
+ protected void initGoalState(ClusterModel clusterModel, OptimizationOptions optimizationOptions) {
+ double avgUtilization = clusterModel.utilizationFor(resource());
+ double balancePercentage = optimizationOptions.resourceBalancePercentageFor(resource());
+ this.balanceUpperThreshold = avgUtilization * (1 + balancePercentage);
+ this.balanceLowerThreshold = avgUtilization * (1 - balancePercentage);
+ }
+
+ @Override
+ protected void rebalanceForBroker(Broker broker,
+ ClusterModel clusterModel,
+ Set optimizedGoals,
+ OptimizationOptions optimizationOptions) {
+ double utilization = broker.utilizationFor(resource());
+
+ boolean requireLessLoad = utilization > this.balanceUpperThreshold;
+ boolean requireMoreLoad = utilization < this.balanceLowerThreshold;
+ if (!requireMoreLoad && !requireLessLoad) {
+ return;
+ }
+
+ // First try leadership movement
+ if (resource() == Resource.NW_OUT || resource() == Resource.CPU) {
+ if (requireLessLoad && rebalanceByMovingLoadOut(broker, clusterModel, optimizedGoals,
+ ActionType.LEADERSHIP_MOVEMENT, optimizationOptions)) {
+ logger.debug("Successfully balanced {} for broker {} by moving out leaders.", resource(), broker.id());
+ requireLessLoad = false;
+ }
+ if (requireMoreLoad && rebalanceByMovingLoadIn(broker, clusterModel, optimizedGoals,
+ ActionType.LEADERSHIP_MOVEMENT, optimizationOptions)) {
+ logger.debug("Successfully balanced {} for broker {} by moving in leaders.", resource(), broker.id());
+ requireMoreLoad = false;
+ }
+ }
+
+ boolean balanced = true;
+ if (requireLessLoad) {
+ if (!rebalanceByMovingLoadOut(broker, clusterModel, optimizedGoals,
+ ActionType.REPLICA_MOVEMENT, optimizationOptions)) {
+ balanced = rebalanceBySwappingLoadOut(broker, clusterModel, optimizedGoals, optimizationOptions);
+ }
+ } else if (requireMoreLoad) {
+ if (!rebalanceByMovingLoadIn(broker, clusterModel, optimizedGoals,
+ ActionType.REPLICA_MOVEMENT, optimizationOptions)) {
+ balanced = rebalanceBySwappingLoadIn(broker, clusterModel, optimizedGoals, optimizationOptions);
+ }
+ }
+ if (balanced) {
+ logger.debug("Successfully balanced {} for broker {} by moving leaders and replicas.", resource(), broker.id());
+ }
+ }
+
+ private boolean rebalanceByMovingLoadOut(Broker broker,
+ ClusterModel clusterModel,
+ Set optimizedGoals,
+ ActionType actionType,
+ OptimizationOptions optimizationOptions) {
+
+ SortedSet candidateBrokers = sortedCandidateBrokersUnderThreshold(clusterModel, this.balanceUpperThreshold, optimizationOptions, broker, false);
+ SortedSet replicasToMove = sortedCandidateReplicas(broker, actionType, optimizationOptions, true);
+
+ for (Replica replica : replicasToMove) {
+ Broker acceptedBroker = maybeApplyBalancingAction(clusterModel, replica, candidateBrokers, actionType, optimizedGoals, optimizationOptions);
+
+ if (acceptedBroker != null) {
+ if (broker.utilizationFor(resource()) < this.balanceUpperThreshold) {
+ return true;
+ }
+ // Remove and reinsert the broker so the order is correct.
+ // candidateBrokers.remove(acceptedBroker);
+ candidateBrokers.removeIf(b -> b.id() == acceptedBroker.id());
+ if (acceptedBroker.utilizationFor(resource()) < this.balanceUpperThreshold) {
+ candidateBrokers.add(acceptedBroker);
+ }
+ }
+ }
+
+ return false;
+ }
+
+ private boolean rebalanceByMovingLoadIn(Broker broker,
+ ClusterModel clusterModel,
+ Set optimizedGoals,
+ ActionType actionType,
+ OptimizationOptions optimizationOptions) {
+ SortedSet candidateBrokers = sortedCandidateBrokersOverThreshold(clusterModel, this.balanceLowerThreshold, optimizationOptions, broker, true);
+ Iterator candidateBrokersIt = candidateBrokers.iterator();
+ Broker nextCandidateBroker = null;
+ while (true) {
+ Broker candidateBroker;
+ if (nextCandidateBroker != null) {
+ candidateBroker = nextCandidateBroker;
+ nextCandidateBroker = null;
+ } else if (candidateBrokersIt.hasNext()) {
+ candidateBroker = candidateBrokersIt.next();
+ } else {
+ break;
+ }
+ SortedSet replicasToMove = sortedCandidateReplicas(candidateBroker, actionType, optimizationOptions, true);
+
+ for (Replica replica : replicasToMove) {
+ Broker acceptedBroker = maybeApplyBalancingAction(clusterModel, replica, Collections.singletonList(broker), actionType, optimizedGoals, optimizationOptions);
+ if (acceptedBroker != null) {
+ if (broker.utilizationFor(resource()) > this.balanceLowerThreshold) {
+ return true;
+ }
+ if (candidateBrokersIt.hasNext() || nextCandidateBroker != null) {
+ if (nextCandidateBroker == null) {
+ nextCandidateBroker = candidateBrokersIt.next();
+ }
+ if (candidateBroker.utilizationFor(resource()) < nextCandidateBroker.utilizationFor(resource())) {
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return false;
+ }
+
+ private boolean rebalanceBySwappingLoadOut(Broker broker,
+ ClusterModel clusterModel,
+ Set optimizedGoals,
+ OptimizationOptions optimizationOptions) {
+ return false;
+ }
+
+ private boolean rebalanceBySwappingLoadIn(Broker broker,
+ ClusterModel clusterModel,
+ Set optimizedGoals,
+ OptimizationOptions optimizationOptions) {
+ return false;
+ }
+
+ private SortedSet sortedCandidateBrokersUnderThreshold(ClusterModel clusterModel,
+ double utilizationThreshold,
+ OptimizationOptions optimizationOptions,
+ Broker excludedBroker,
+ boolean reverse) {
+ return clusterModel.sortedBrokersFor(
+ b -> b.utilizationFor(resource()) < utilizationThreshold
+ && !excludedBroker.equals(b)
+ // filter brokers
+ && (optimizationOptions.balanceBrokers().isEmpty() || optimizationOptions.balanceBrokers().contains(b.id()))
+ , resource(), reverse);
+ }
+
+ private SortedSet sortedCandidateBrokersOverThreshold(ClusterModel clusterModel,
+ double utilizationThreshold,
+ OptimizationOptions optimizationOptions,
+ Broker excludedBroker,
+ boolean reverse) {
+ return clusterModel.sortedBrokersFor(
+ b -> b.utilizationFor(resource()) > utilizationThreshold
+ && !excludedBroker.equals(b)
+ // filter brokers
+ && (optimizationOptions.balanceBrokers().isEmpty() || optimizationOptions.balanceBrokers().contains(b.id()))
+ , resource(), reverse);
+ }
+
+ private SortedSet sortedCandidateReplicas(Broker broker,
+ ActionType actionType,
+ OptimizationOptions optimizationOptions,
+ boolean reverse) {
+ return broker.sortedReplicasFor(
+ // exclude topic
+ r -> !optimizationOptions.excludedTopics().contains(r.topicPartition().topic())
+ && r.load().loadFor(resource()) > 0.0
+ // LEADERSHIP_MOVEMENT or NW_OUT is require leader replica
+ && (actionType != ActionType.LEADERSHIP_MOVEMENT && resource() != Resource.NW_OUT || r.isLeader())
+ , resource(), reverse);
+ }
+
+ protected abstract Resource resource();
+
+ @Override
+ protected boolean selfSatisfied(ClusterModel clusterModel, BalancingAction action) {
+ Broker destinationBroker = clusterModel.broker(action.destinationBrokerId());
+ Broker sourceBroker = clusterModel.broker(action.sourceBrokerId());
+ Replica sourceReplica = sourceBroker.replica(action.topicPartition());
+
+ Load loadToChange;
+ if (action.balancingAction() == ActionType.LEADERSHIP_MOVEMENT) {
+ Replica destinationReplica = destinationBroker.replica(action.topicPartition());
+ Load delta = new Load();
+ delta.addLoad(sourceReplica.load());
+ delta.subtractLoad(destinationReplica.load());
+ loadToChange = delta;
+ } else {
+ loadToChange = sourceReplica.load();
+ }
+ double sourceUtilization = sourceBroker.expectedUtilizationAfterRemove(resource(), loadToChange);
+ double destinationUtilization = destinationBroker.expectedUtilizationAfterAdd(resource(), loadToChange);
+
+ return sourceUtilization >= this.balanceLowerThreshold && destinationUtilization <= this.balanceUpperThreshold;
+ }
+
+ @Override
+ public ActionAcceptance actionAcceptance(BalancingAction action, ClusterModel clusterModel) {
+ return this.selfSatisfied(clusterModel, action) ? ActionAcceptance.ACCEPT : ActionAcceptance.REJECT;
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/TopicLeadersDistributionGoal.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/TopicLeadersDistributionGoal.java
new file mode 100644
index 000000000..454504807
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/TopicLeadersDistributionGoal.java
@@ -0,0 +1,222 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.goals;
+
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Broker;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.ClusterModel;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Replica;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.ActionAcceptance;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.BalancingAction;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.OptimizationOptions;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.utils.GoalUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.*;
+import java.util.stream.Collectors;
+
+import static com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.ActionAcceptance.ACCEPT;
+import static com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.ActionAcceptance.REJECT;
+import static com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.ActionType.REPLICA_MOVEMENT;
+import static com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.ActionType.LEADERSHIP_MOVEMENT;
+
+public class TopicLeadersDistributionGoal extends AbstractGoal {
+ private static final Logger logger = LoggerFactory.getLogger(TopicLeadersDistributionGoal.class);
+ private Map _mustHaveTopicMinLeadersPerBroker;
+
+ /**
+ * 执行Topic Leader均衡
+ */
+ @Override
+ protected void rebalanceForBroker(Broker broker, ClusterModel clusterModel, Set optimizedGoals, OptimizationOptions optimizationOptions) {
+ moveAwayOfflineReplicas(broker, clusterModel, optimizedGoals, optimizationOptions);
+ if (_mustHaveTopicMinLeadersPerBroker.isEmpty()) {
+ return;
+ }
+ if (optimizationOptions.offlineBrokers().contains(broker.id())) {
+ return;
+ }
+
+ for (String topicName : _mustHaveTopicMinLeadersPerBroker.keySet()) {
+ maybeMoveLeaderOfTopicToBroker(topicName, broker, clusterModel, optimizedGoals, optimizationOptions);
+ }
+ }
+
+ /**
+ * 初始化均衡条件:
+ * 1.排除不需要的Broker、Topic
+ * 2.计算每个Topic在集群中所有Broker的平均分布数量
+ */
+ @Override
+ protected void initGoalState(ClusterModel clusterModel, OptimizationOptions optimizationOptions) {
+ _mustHaveTopicMinLeadersPerBroker = new HashMap<>();
+ Set excludedTopics = optimizationOptions.excludedTopics();
+ Set excludedBrokers = optimizationOptions.offlineBrokers();
+ Set mustHaveTopicLeadersPerBroker = GoalUtils.getNotExcludeTopics(clusterModel, excludedTopics);
+ Map numLeadersByTopicNames = clusterModel.numLeadersPerTopic(mustHaveTopicLeadersPerBroker);
+ Set allBrokers = GoalUtils.getNotExcludeBrokers(clusterModel, excludedBrokers);
+ for (String topicName : mustHaveTopicLeadersPerBroker) {
+ int topicNumLeaders = numLeadersByTopicNames.get(topicName);
+ int avgLeaders = allBrokers.size() == 0 ? 0 : (int) Math.ceil(topicNumLeaders / (double) allBrokers.size() * (1 + optimizationOptions.topicLeaderThreshold()));
+ _mustHaveTopicMinLeadersPerBroker.put(topicName, avgLeaders);
+ }
+ }
+
+ /**
+ * 已满足均衡条件判断:
+ * 1.待操作Broker的副本已下线
+ * 2.待操作Broker上Topic Leader数量大于Topic平均分布数量
+ */
+ @Override
+ protected boolean selfSatisfied(ClusterModel clusterModel, BalancingAction action) {
+ Broker sourceBroker = clusterModel.broker(action.sourceBrokerId());
+ Replica replicaToBeMoved = sourceBroker.replica(action.topicPartition());
+ if (replicaToBeMoved.broker().replica(action.topicPartition()).isCurrentOffline()) {
+ return action.balancingAction() == REPLICA_MOVEMENT;
+ }
+ String topicName = replicaToBeMoved.topicPartition().topic();
+ return sourceBroker.numLeadersFor(topicName) > minTopicLeadersPerBroker(topicName);
+ }
+
+ /**
+ * 获取Topic在每台Broker上的最小Leader数
+ */
+ private int minTopicLeadersPerBroker(String topicName) {
+ return _mustHaveTopicMinLeadersPerBroker.get(topicName);
+ }
+
+ @Override
+ public String name() {
+ return TopicLeadersDistributionGoal.class.getSimpleName();
+ }
+
+ /**
+ * 判断Topic Leader均衡动作是否可以执行
+ */
+ @Override
+ public ActionAcceptance actionAcceptance(BalancingAction action, ClusterModel clusterModel) {
+ if (_mustHaveTopicMinLeadersPerBroker.containsKey(action.topic())) {
+ return ACCEPT;
+ }
+ switch (action.balancingAction()) {
+ case LEADERSHIP_MOVEMENT:
+ case REPLICA_MOVEMENT:
+ Replica replicaToBeRemoved = clusterModel.broker(action.sourceBrokerId()).replica(action.topicPartition());
+ return doesLeaderRemoveViolateOptimizedGoal(replicaToBeRemoved) ? REJECT : ACCEPT;
+ default:
+ throw new IllegalArgumentException("Unsupported balancing action " + action.balancingAction() + " is provided.");
+ }
+ }
+
+ /**
+ * 根据指定的副本判断是否可以执行均衡动作
+ */
+ private boolean doesLeaderRemoveViolateOptimizedGoal(Replica replicaToBeRemoved) {
+ if (!replicaToBeRemoved.isLeader()) {
+ return false;
+ }
+ String topic = replicaToBeRemoved.topicPartition().topic();
+ if (!_mustHaveTopicMinLeadersPerBroker.containsKey(topic)) {
+ return false;
+ }
+ int topicLeaderCountOnSourceBroker = replicaToBeRemoved.broker().numLeadersFor(topic);
+ return topicLeaderCountOnSourceBroker <= minTopicLeadersPerBroker(topic);
+ }
+
+ /**
+ * 执行具体的均衡逻辑:
+ * 先通过Leader切换如果还不满足条件则进行副本迁移
+ */
+ private void maybeMoveLeaderOfTopicToBroker(String topicName,
+ Broker broker,
+ ClusterModel clusterModel,
+ Set optimizedGoals,
+ OptimizationOptions optimizationOptions) {
+ int topicLeaderCount = broker.numLeadersFor(topicName);
+ //判断Topic在当前Broker上的Leader数量是否超过最小Leader分布
+ if (topicLeaderCount >= minTopicLeadersPerBroker(topicName)) {
+ return;
+ }
+ //获取Topic在当前Broker上的所有follower副本
+ List followerReplicas = broker.replicas().stream().filter(i -> !i.isLeader() && i.topicPartition().topic().equals(topicName)).collect(Collectors.toList());
+ for (Replica followerReplica : followerReplicas) {
+ //根据follower副本信息从集群中获取对应的Leader副本
+ Replica leader = clusterModel.partition(followerReplica.topicPartition());
+ //如果Leader副本所在Broker的Topic Leader数量超过最小Leader分布则进行Leader切换
+ if (leader.broker().numLeadersFor(topicName) > minTopicLeadersPerBroker(topicName)) {
+ if (maybeApplyBalancingAction(clusterModel, leader, Collections.singleton(broker),
+ LEADERSHIP_MOVEMENT, optimizedGoals, optimizationOptions) != null) {
+ topicLeaderCount++;
+ //Topic在当前Broker的Leader分布大于等于最小Leader分布则结束均衡
+ if (topicLeaderCount >= minTopicLeadersPerBroker(topicName)) {
+ return;
+ }
+ }
+ }
+ }
+ //根据Topic获取需要Leader数量大于最小Leader分布待迁移的Broker列表
+ PriorityQueue brokersWithExcessiveLeaderToMove = getBrokersWithExcessiveLeaderToMove(topicName, clusterModel);
+ while (!brokersWithExcessiveLeaderToMove.isEmpty()) {
+ Broker brokerWithExcessiveLeaderToMove = brokersWithExcessiveLeaderToMove.poll();
+ List leadersOfTopic = brokerWithExcessiveLeaderToMove.leaderReplicas().stream()
+ .filter(i -> i.topicPartition().topic().equals(topicName)).collect(Collectors.toList());
+ boolean leaderMoved = false;
+ int leaderMoveCount = leadersOfTopic.size();
+ for (Replica leaderOfTopic : leadersOfTopic) {
+ Broker destinationBroker = maybeApplyBalancingAction(clusterModel, leaderOfTopic, Collections.singleton(broker),
+ REPLICA_MOVEMENT, optimizedGoals, optimizationOptions);
+ if (destinationBroker != null) {
+ leaderMoved = true;
+ break;
+ }
+ }
+ if (leaderMoved) {
+ //当前Topic Leader数量在满足最小Leader分布后则结束均衡
+ topicLeaderCount++;
+ if (topicLeaderCount >= minTopicLeadersPerBroker(topicName)) {
+ return;
+ }
+ //分布过多的Broker在进行副本迁移后Topic Leader依然大于最小Leader分布则继续迁移
+ leaderMoveCount--;
+ if (leaderMoveCount > minTopicLeadersPerBroker(topicName)) {
+ brokersWithExcessiveLeaderToMove.add(brokerWithExcessiveLeaderToMove);
+ }
+ }
+ }
+ }
+
+ /**
+ * 根据指定的TopicName,筛选出集群内超过该TopicName Leader平均分布数量的所有Broker并且降序排列
+ */
+ private PriorityQueue getBrokersWithExcessiveLeaderToMove(String topicName, ClusterModel clusterModel) {
+ PriorityQueue brokersWithExcessiveLeaderToMove = new PriorityQueue<>((broker1, broker2) -> {
+ int broker1LeaderCount = broker1.numLeadersFor(topicName);
+ int broker2LeaderCount = broker2.numLeadersFor(topicName);
+ int leaderCountCompareResult = Integer.compare(broker2LeaderCount, broker1LeaderCount);
+ return leaderCountCompareResult == 0 ? Integer.compare(broker1.id(), broker2.id()) : leaderCountCompareResult;
+ });
+ clusterModel.brokers().stream().filter(broker -> broker.numLeadersFor(topicName) > minTopicLeadersPerBroker(topicName))
+ .forEach(brokersWithExcessiveLeaderToMove::add);
+ return brokersWithExcessiveLeaderToMove;
+ }
+
+ /**
+ * 下线副本优先处理迁移
+ */
+ private void moveAwayOfflineReplicas(Broker srcBroker,
+ ClusterModel clusterModel,
+ Set optimizedGoals,
+ OptimizationOptions optimizationOptions) {
+ if (srcBroker.currentOfflineReplicas().isEmpty()) {
+ return;
+ }
+ SortedSet eligibleBrokersToMoveOfflineReplicasTo = new TreeSet<>(
+ Comparator.comparingInt((Broker broker) -> broker.replicas().size()).thenComparingInt(Broker::id));
+ Set offlineReplicas = new HashSet<>(srcBroker.currentOfflineReplicas());
+ for (Replica offlineReplica : offlineReplicas) {
+ if (maybeApplyBalancingAction(clusterModel, offlineReplica, eligibleBrokersToMoveOfflineReplicasTo,
+ REPLICA_MOVEMENT, optimizedGoals, optimizationOptions) == null) {
+ logger.error(String.format("[%s] offline replica %s from broker %d (has %d replicas) move error", name(),
+ offlineReplica, srcBroker.id(), srcBroker.replicas().size()));
+ }
+ }
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/TopicReplicaDistributionGoal.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/TopicReplicaDistributionGoal.java
new file mode 100644
index 000000000..a7ff050e7
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/optimizer/goals/TopicReplicaDistributionGoal.java
@@ -0,0 +1,287 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.goals;
+
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Broker;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.ClusterModel;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.Replica;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.ActionAcceptance;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.ActionType;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.BalancingAction;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.OptimizationOptions;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.utils.GoalUtils;
+
+import java.util.*;
+import java.util.stream.Collectors;
+
+public class TopicReplicaDistributionGoal extends AbstractGoal {
+ private final Map _balanceUpperLimitByTopic;
+ private final Map _balanceLowerLimitByTopic;
+ private Set _brokersAllowedReplicaMove;
+ private final Map _avgTopicReplicasOnBroker;
+
+ public TopicReplicaDistributionGoal() {
+ _balanceUpperLimitByTopic = new HashMap<>();
+ _balanceLowerLimitByTopic = new HashMap<>();
+ _avgTopicReplicasOnBroker = new HashMap<>();
+ }
+
+ @Override
+ protected void rebalanceForBroker(Broker broker, ClusterModel clusterModel, Set optimizedGoals, OptimizationOptions optimizationOptions) {
+ for (String topic : broker.topics()) {
+ if (isTopicExcludedFromRebalance(topic)) {
+ continue;
+ }
+ Collection replicas = broker.replicasOfTopicInBroker(topic);
+ int numTopicReplicas = replicas.size();
+ boolean isExcludedForReplicaMove = isExcludedForReplicaMove(broker);
+ int numOfflineTopicReplicas = GoalUtils.retainCurrentOfflineBrokerReplicas(broker, replicas).size();
+ boolean requireLessReplicas = numOfflineTopicReplicas > 0 || numTopicReplicas > _balanceUpperLimitByTopic.get(topic) && !isExcludedForReplicaMove;
+ boolean requireMoreReplicas = !isExcludedForReplicaMove && numTopicReplicas - numOfflineTopicReplicas < _balanceLowerLimitByTopic.get(topic);
+
+ if (requireLessReplicas) {
+ rebalanceByMovingReplicasOut(broker, topic, clusterModel, optimizedGoals, optimizationOptions);
+ }
+ if (requireMoreReplicas) {
+ rebalanceByMovingReplicasIn(broker, topic, clusterModel, optimizedGoals, optimizationOptions);
+ }
+ }
+ }
+
+ /**
+ * 初始化均衡条件:
+ * 1.Topic平均分布副本数量
+ * 2.Topic在平均副本的基础上向上浮动数量
+ * 3.Topic在平均副本的基础上向下浮动数量
+ */
+ @Override
+ protected void initGoalState(ClusterModel clusterModel, OptimizationOptions optimizationOptions) {
+ Set excludedTopics = optimizationOptions.excludedTopics();
+ Set excludedBrokers = optimizationOptions.offlineBrokers();
+ Set topicsAllowedRebalance = GoalUtils.getNotExcludeTopics(clusterModel, excludedTopics);
+ _brokersAllowedReplicaMove = GoalUtils.getNotExcludeBrokers(clusterModel, excludedBrokers);
+ if (_brokersAllowedReplicaMove.isEmpty()) {
+ return;
+ }
+ for (String topic : topicsAllowedRebalance) {
+ int numTopicReplicas = clusterModel.numTopicReplicas(topic);
+ _avgTopicReplicasOnBroker.put(topic, numTopicReplicas / (double) _brokersAllowedReplicaMove.size());
+ _balanceUpperLimitByTopic.put(topic, balanceUpperLimit(topic, optimizationOptions));
+ _balanceLowerLimitByTopic.put(topic, balanceLowerLimit(topic, optimizationOptions));
+ }
+ }
+
+ /**
+ * 指定Topic平均副本向下浮动,默认10%
+ */
+ private Integer balanceLowerLimit(String topic, OptimizationOptions optimizationOptions) {
+ return (int) Math.floor(_avgTopicReplicasOnBroker.get(topic)
+ * Math.max(0, (1 - optimizationOptions.topicReplicaThreshold())));
+ }
+
+ /**
+ * 指定Topic平均副本向上浮动,默认10%
+ */
+ private Integer balanceUpperLimit(String topic, OptimizationOptions optimizationOptions) {
+ return (int) Math.ceil(_avgTopicReplicasOnBroker.get(topic)
+ * (1 + optimizationOptions.topicReplicaThreshold()));
+ }
+
+ @Override
+ protected boolean selfSatisfied(ClusterModel clusterModel, BalancingAction action) {
+ Broker sourceBroker = clusterModel.broker(action.sourceBrokerId());
+ if (sourceBroker.replica(action.topicPartition()).isCurrentOffline()) {
+ return action.balancingAction() == ActionType.REPLICA_MOVEMENT;
+ }
+ Broker destinationBroker = clusterModel.broker(action.destinationBrokerId());
+ String sourceTopic = action.topic();
+ return isReplicaCountAddUpperLimit(sourceTopic, destinationBroker)
+ && (isExcludedForReplicaMove(sourceBroker) || isReplicaCountRemoveLowerLimit(sourceTopic, sourceBroker));
+
+ }
+
+ @Override
+ public String name() {
+ return TopicReplicaDistributionGoal.class.getSimpleName();
+ }
+
+ @Override
+ public ActionAcceptance actionAcceptance(BalancingAction action, ClusterModel clusterModel) {
+ Broker sourceBroker = clusterModel.broker(action.sourceBrokerId());
+ Broker destinationBroker = clusterModel.broker(action.destinationBrokerId());
+ String sourceTopic = action.topic();
+ switch (action.balancingAction()) {
+ case LEADERSHIP_MOVEMENT:
+ return ActionAcceptance.ACCEPT;
+ case REPLICA_MOVEMENT:
+ return (isReplicaCountAddUpperLimit(sourceTopic, destinationBroker)
+ && (isExcludedForReplicaMove(sourceBroker)
+ || isReplicaCountRemoveLowerLimit(sourceTopic, sourceBroker))) ? ActionAcceptance.ACCEPT : ActionAcceptance.REJECT;
+ default:
+ throw new IllegalArgumentException("Unsupported balancing action " + action.balancingAction() + " is provided.");
+ }
+ }
+
+ /**
+ * 指定的Broker上存在Topic副本数大于阈值则迁出副本
+ */
+ private boolean rebalanceByMovingReplicasOut(Broker broker,
+ String topic,
+ ClusterModel clusterModel,
+ Set optimizedGoals,
+ OptimizationOptions optimizationOptions) {
+ //筛选出现低于UpperLimit的所有Broker做为存放目标
+ SortedSet candidateBrokers = new TreeSet<>(
+ Comparator.comparingInt((Broker b) -> b.numReplicasOfTopicInBroker(topic)).thenComparingInt(Broker::id));
+ Set filterUpperLimitBroker = clusterModel.brokers().stream().filter(b -> b.numReplicasOfTopicInBroker(topic) < _balanceUpperLimitByTopic.get(topic)).collect(Collectors.toSet());
+ candidateBrokers.addAll(filterUpperLimitBroker);
+ Collection replicasOfTopicInBroker = broker.replicasOfTopicInBroker(topic);
+ int numReplicasOfTopicInBroker = replicasOfTopicInBroker.size();
+ int numOfflineTopicReplicas = GoalUtils.retainCurrentOfflineBrokerReplicas(broker, replicasOfTopicInBroker).size();
+ int balanceUpperLimitForSourceBroker = isExcludedForReplicaMove(broker) ? 0 : _balanceUpperLimitByTopic.get(topic);
+ boolean wasUnableToMoveOfflineReplica = false;
+
+ for (Replica replica : replicasToMoveOut(broker, topic)) {
+ //当前Broker没有离线副本及Topic的副本数量低于UpperLimit则结束均衡
+ if (wasUnableToMoveOfflineReplica && !replica.isCurrentOffline() && numReplicasOfTopicInBroker <= balanceUpperLimitForSourceBroker) {
+ return false;
+ }
+ boolean wasOffline = replica.isCurrentOffline();
+ Broker b = maybeApplyBalancingAction(clusterModel, replica, candidateBrokers, ActionType.REPLICA_MOVEMENT,
+ optimizedGoals, optimizationOptions);
+ // Only check if we successfully moved something.
+ if (b != null) {
+ if (wasOffline) {
+ numOfflineTopicReplicas--;
+ }
+ if (--numReplicasOfTopicInBroker <= (numOfflineTopicReplicas == 0 ? balanceUpperLimitForSourceBroker : 0)) {
+ return false;
+ }
+
+ // Remove and reinsert the broker so the order is correct.
+ candidateBrokers.remove(b);
+ if (b.numReplicasOfTopicInBroker(topic) < _balanceUpperLimitByTopic.get(topic)) {
+ candidateBrokers.add(b);
+ }
+ } else if (wasOffline) {
+ wasUnableToMoveOfflineReplica = true;
+ }
+ }
+ return !broker.replicasOfTopicInBroker(topic).isEmpty();
+ }
+
+ /**
+ * 1.离线副本优行处理
+ * 2.小分区号优行处理
+ */
+ private SortedSet replicasToMoveOut(Broker broker, String topic) {
+ SortedSet replicasToMoveOut = new TreeSet<>((r1, r2) -> {
+ boolean r1Offline = broker.currentOfflineReplicas().contains(r1);
+ boolean r2Offline = broker.currentOfflineReplicas().contains(r2);
+ if (r1Offline && !r2Offline) {
+ return -1;
+ } else if (!r1Offline && r2Offline) {
+ return 1;
+ }
+ if (r1.topicPartition().partition() > r2.topicPartition().partition()) {
+ return 1;
+ } else if (r1.topicPartition().partition() < r2.topicPartition().partition()) {
+ return -1;
+ }
+ return 0;
+ });
+ replicasToMoveOut.addAll(broker.replicasOfTopicInBroker(topic));
+ return replicasToMoveOut;
+ }
+
+ /**
+ * Topic副本数>最低阈值的副本,迁到指定的Broker上
+ */
+ private boolean rebalanceByMovingReplicasIn(Broker broker,
+ String topic,
+ ClusterModel clusterModel,
+ Set optimizedGoals,
+ OptimizationOptions optimizationOptions) {
+ PriorityQueue eligibleBrokers = new PriorityQueue<>((b1, b2) -> {
+ Collection replicasOfTopicInB2 = b2.replicasOfTopicInBroker(topic);
+ int numReplicasOfTopicInB2 = replicasOfTopicInB2.size();
+ int numOfflineTopicReplicasInB2 = GoalUtils.retainCurrentOfflineBrokerReplicas(b2, replicasOfTopicInB2).size();
+ Collection replicasOfTopicInB1 = b1.replicasOfTopicInBroker(topic);
+ int numReplicasOfTopicInB1 = replicasOfTopicInB1.size();
+ int numOfflineTopicReplicasInB1 = GoalUtils.retainCurrentOfflineBrokerReplicas(b1, replicasOfTopicInB1).size();
+
+ int resultByOfflineReplicas = Integer.compare(numOfflineTopicReplicasInB2, numOfflineTopicReplicasInB1);
+ if (resultByOfflineReplicas == 0) {
+ int resultByAllReplicas = Integer.compare(numReplicasOfTopicInB2, numReplicasOfTopicInB1);
+ return resultByAllReplicas == 0 ? Integer.compare(b1.id(), b2.id()) : resultByAllReplicas;
+ }
+ return resultByOfflineReplicas;
+ });
+ //筛选当前Topic高于LowerLimit、存在离线副本、的所有Broker做为需要迁的副本
+ for (Broker sourceBroker : clusterModel.brokers()) {
+ if (sourceBroker.numReplicasOfTopicInBroker(topic) > _balanceLowerLimitByTopic.get(topic)
+ || !sourceBroker.currentOfflineReplicas().isEmpty() || isExcludedForReplicaMove(sourceBroker)) {
+ eligibleBrokers.add(sourceBroker);
+ }
+ }
+ Collection replicasOfTopicInBroker = broker.replicasOfTopicInBroker(topic);
+ int numReplicasOfTopicInBroker = replicasOfTopicInBroker.size();
+ //当前Broker做为存放目标
+ Set candidateBrokers = Collections.singleton(broker);
+ while (!eligibleBrokers.isEmpty()) {
+ Broker sourceBroker = eligibleBrokers.poll();
+ SortedSet replicasToMove = replicasToMoveOut(sourceBroker, topic);
+ int numOfflineTopicReplicas = GoalUtils.retainCurrentOfflineBrokerReplicas(sourceBroker, replicasToMove).size();
+
+ for (Replica replica : replicasToMove) {
+ boolean wasOffline = replica.isCurrentOffline();
+ Broker b = maybeApplyBalancingAction(clusterModel, replica, candidateBrokers, ActionType.REPLICA_MOVEMENT,
+ optimizedGoals, optimizationOptions);
+ if (b != null) {
+ if (wasOffline) {
+ numOfflineTopicReplicas--;
+ }
+ if (++numReplicasOfTopicInBroker >= _balanceLowerLimitByTopic.get(topic)) {
+ return false;
+ }
+ if (!eligibleBrokers.isEmpty() && numOfflineTopicReplicas == 0
+ && sourceBroker.numReplicasOfTopicInBroker(topic) < eligibleBrokers.peek().numReplicasOfTopicInBroker(topic)) {
+ eligibleBrokers.add(sourceBroker);
+ break;
+ }
+ }
+ }
+ }
+ return true;
+ }
+
+ /**
+ * 目标Broker增加副本后,Topic副本数<=最高阈值
+ */
+ private boolean isReplicaCountAddUpperLimit(String topic, Broker destinationBroker) {
+ int numTopicReplicas = destinationBroker.numReplicasOfTopicInBroker(topic);
+ int brokerBalanceUpperLimit = _balanceUpperLimitByTopic.get(topic);
+ return numTopicReplicas + 1 <= brokerBalanceUpperLimit;
+ }
+
+ /**
+ * 源Broker迁走副本后,Topic副本数>=最低阈值
+ */
+ private boolean isReplicaCountRemoveLowerLimit(String topic, Broker sourceBroker) {
+ int numTopicReplicas = sourceBroker.numReplicasOfTopicInBroker(topic);
+ int brokerBalanceLowerLimit = _balanceLowerLimitByTopic.get(topic);
+ return numTopicReplicas - 1 >= brokerBalanceLowerLimit;
+ }
+
+ /**
+ * 判断指定的Broker是否可以进行副本迁移操作
+ */
+ private boolean isExcludedForReplicaMove(Broker broker) {
+ return !_brokersAllowedReplicaMove.contains(broker);
+ }
+
+ /**
+ * 判断指定的Topic是否在可均衡的列表中
+ */
+ private boolean isTopicExcludedFromRebalance(String topic) {
+ return _avgTopicReplicasOnBroker.get(topic) == null;
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/package-info.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/package-info.java
new file mode 100644
index 000000000..f96cee6d8
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/package-info.java
@@ -0,0 +1,7 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm;
+
+/**
+ *
+ * Re-Balance算法部分代码
+ *
+ * */
\ No newline at end of file
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/utils/CommandLineUtils.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/utils/CommandLineUtils.java
new file mode 100644
index 000000000..fe6c8204d
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/utils/CommandLineUtils.java
@@ -0,0 +1,21 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.utils;
+
+import joptsimple.OptionParser;
+
+import java.io.IOException;
+
+public class CommandLineUtils {
+
+ /**
+ * Print usage and exit
+ */
+ public static void printUsageAndDie(OptionParser parser, String message) {
+ try {
+ System.err.println(message);
+ parser.printHelpOn(System.err);
+ System.exit(1);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+}
diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/utils/GoalUtils.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/utils/GoalUtils.java
new file mode 100644
index 000000000..9d517fd7c
--- /dev/null
+++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/utils/GoalUtils.java
@@ -0,0 +1,67 @@
+package com.xiaojukeji.know.streaming.km.rebalance.algorithm.utils;
+
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common.BalanceGoal;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common.BalanceParameter;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common.BalanceThreshold;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.executor.common.HostEnv;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.model.*;
+import com.xiaojukeji.know.streaming.km.rebalance.algorithm.optimizer.AnalyzerUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.*;
+import java.util.stream.Collectors;
+
+public class GoalUtils {
+ private static final Logger logger = LoggerFactory.getLogger(GoalUtils.class);
+
+ public static Set getNotExcludeTopics(ClusterModel clusterModel, Set excludedTopics) {
+ return clusterModel.topics().stream().filter(topicName -> !excludedTopics.contains(topicName)).collect(Collectors.toSet());
+ }
+
+ public static Set getNotExcludeBrokers(ClusterModel clusterModel, Set excludedBrokers) {
+ return clusterModel.brokers().stream().filter(broker -> !excludedBrokers.contains(broker.id())).collect(Collectors.toSet());
+ }
+
+ /**
+ * 在Broker上获取指定的离线副本列表
+ */
+ public static Set retainCurrentOfflineBrokerReplicas(Broker broker, Collection