Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicMetricService;
import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService;
import com.xiaojukeji.know.streaming.km.core.service.version.metrics.kafka.TopicMetricVersionItems;
import com.xiaojukeji.know.streaming.km.core.utils.ApiCallThreadPoolService;
import org.apache.commons.lang3.ObjectUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.consumer.*;
Expand Down Expand Up @@ -89,6 +90,9 @@ public class TopicStateManagerImpl implements TopicStateManager {
@Autowired
private GroupManager groupManager;

@Autowired
private ApiCallThreadPoolService apiCallThreadPoolService;

@Override
public TopicBrokerAllVO getTopicBrokerAll(Long clusterPhyId, String topicName, String searchBrokerHost) throws NotExistException {
Topic topic = topicService.getTopic(clusterPhyId, topicName);
Expand Down Expand Up @@ -303,25 +307,38 @@ public Result<List<TopicPartitionVO>> getTopicPartitions(Long clusterPhyId, Stri
return Result.buildSuc();
}

Result<List<PartitionMetrics>> metricsResult = partitionMetricService.collectPartitionsMetricsFromKafka(clusterPhyId, topicName, metricsNames);
if (metricsResult.failed()) {
// 仅打印错误日志,但是不直接返回错误
log.error(
"method=getTopicPartitions||clusterPhyId={}||topicName={}||result={}||msg=get metrics from es failed",
clusterPhyId, topicName, metricsResult
);
List<PartitionMetrics> partitionMetricsList = new ArrayList<>();
for (String metricName : metricsNames) {
apiCallThreadPoolService.runnableTask("task", 3000, () -> {
Result<List<PartitionMetrics>> metricsResult = partitionMetricService.collectPartitionsMetricsFromKafka(clusterPhyId, topicName, Arrays.asList(metricName));
if (metricsResult.failed()) {
// 仅打印错误日志,但是不直接返回错误
log.error(
"method=getTopicPartitions||clusterPhyId={}||topicName={}||result={}||msg=get metrics from es failed",
clusterPhyId, topicName, metricsResult
);
}
if (metricsResult.hasData()) {
partitionMetricsList.addAll(metricsResult.getData());
}
return null;
});
}

apiCallThreadPoolService.waitResult(400);

List<PartitionMetrics> metricsList = combinePartitionMetrics(partitionMetricsList);

// 转map
Map<Integer, PartitionMetrics> metricsMap = new HashMap<>();
if (metricsResult.hasData()) {
for (PartitionMetrics metrics: metricsResult.getData()) {
if (!metricsList.isEmpty()) {
for (PartitionMetrics metrics : metricsList) {
metricsMap.put(metrics.getPartitionId(), metrics);
}
}

List<TopicPartitionVO> voList = new ArrayList<>();
for (Partition partition: partitionList) {
for (Partition partition : partitionList) {
voList.add(TopicVOConverter.convert2TopicPartitionVO(partition, metricsMap.get(partition.getPartitionId())));
}
return Result.buildSuc(voList);
Expand Down Expand Up @@ -450,4 +467,17 @@ private Properties generateClientProperties(ClusterPhy clusterPhy, Integer maxPo
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, Math.max(2, Math.min(5, maxPollRecords)));
return props;
}
}

private List<PartitionMetrics> combinePartitionMetrics(List<PartitionMetrics> partitionMetricsList) {
Map<String, PartitionMetrics> metricsMap = new HashMap<>();
for (PartitionMetrics partitionMetrics : partitionMetricsList) {
PartitionMetrics metrics = metricsMap.get(partitionMetrics.getClusterPhyId() + "@" + partitionMetrics.getPartitionId());
if (metrics == null) {
metricsMap.put(partitionMetrics.getClusterPhyId() + "@" + partitionMetrics.getPartitionId(), partitionMetrics);
} else {
metrics.putMetric(partitionMetrics.getMetrics());
}
}
return metricsMap.values().stream().collect(Collectors.toList());
}
}