From b2754cc498d53fba3508791b06aaacc8a5062c96 Mon Sep 17 00:00:00 2001 From: sheli00 Date: Tue, 14 May 2024 16:16:32 +0800 Subject: [PATCH 1/6] git add hugegraph-store/hg-store-node --- hugegraph-store/hg-store-node/banner.txt | 5 + hugegraph-store/hg-store-node/pom.xml | 172 ++++++ .../hugegraph/store/node/AppConfig.java | 228 ++++++++ .../hugegraph/store/node/AppShutdownHook.java | 59 ++ .../store/node/StoreNodeApplication.java | 65 +++ .../controller/HgStoreMetricsController.java | 66 +++ .../controller/HgStoreStatusController.java | 94 +++ .../node/controller/HgTestController.java | 116 ++++ .../store/node/controller/IndexAPI.java | 105 ++++ .../store/node/controller/PartitionAPI.java | 248 ++++++++ .../store/node/entry/RestResult.java | 31 + .../store/node/grpc/BatchGrpcClosure.java | 212 +++++++ .../store/node/grpc/BatchScanIterator.java | 160 +++++ .../store/node/grpc/EmptyIterator.java | 56 ++ .../store/node/grpc/FusingScanIterator.java | 120 ++++ .../store/node/grpc/GRpcServerConfig.java | 46 ++ .../store/node/grpc/GrpcClosure.java | 72 +++ .../store/node/grpc/HgStoreNodeService.java | 237 ++++++++ .../store/node/grpc/HgStoreNodeState.java | 57 ++ .../store/node/grpc/HgStoreSessionImpl.java | 551 ++++++++++++++++++ .../store/node/grpc/HgStoreStateService.java | 59 ++ .../store/node/grpc/HgStoreStateSubject.java | 73 +++ .../store/node/grpc/HgStoreStreamImpl.java | 121 ++++ .../store/node/grpc/HgStoreWrapperEx.java | 123 ++++ .../store/node/grpc/ParallelScanIterator.java | 386 ++++++++++++ .../store/node/grpc/QueryCondition.java | 39 ++ .../node/grpc/ScanBatchOneShotResponse.java | 105 ++++ .../store/node/grpc/ScanBatchResponse.java | 278 +++++++++ .../store/node/grpc/ScanBatchResponse3.java | 417 +++++++++++++ .../node/grpc/ScanBatchResponseFactory.java | 63 ++ .../store/node/grpc/ScanOneShotResponse.java | 96 +++ .../hugegraph/store/node/grpc/ScanQuery.java | 104 ++++ .../store/node/grpc/ScanQueryProducer.java | 262 +++++++++ .../store/node/grpc/ScanStreamResponse.java | 261 +++++++++ .../hugegraph/store/node/grpc/ScanUtil.java | 331 +++++++++++ .../store/node/grpc/scan/GraphStoreImpl.java | 84 +++ .../node/grpc/scan/ScanResponseObserver.java | 267 +++++++++ .../node/listener/ContextClosedListener.java | 53 ++ .../node/listener/PdConfigureListener.java | 212 +++++++ .../store/node/metrics/DriveMetrics.java | 58 ++ .../store/node/metrics/GRpcExMetrics.java | 94 +++ .../store/node/metrics/JRaftMetrics.java | 314 ++++++++++ .../store/node/metrics/MetricsConfig.java | 48 ++ .../store/node/metrics/MetricsUtil.java | 52 ++ .../store/node/metrics/ProcfsEntry.java | 64 ++ .../store/node/metrics/ProcfsMetrics.java | 67 +++ .../store/node/metrics/ProcfsReader.java | 142 +++++ .../store/node/metrics/ProcfsSmaps.java | 114 ++++ .../store/node/metrics/RocksDBMetrics.java | 419 +++++++++++++ .../node/metrics/RocksDBMetricsConst.java | 165 ++++++ .../store/node/metrics/StoreMetrics.java | 112 ++++ .../store/node/metrics/SystemMetrics.java | 140 +++++ .../store/node/model/HgNodeStatus.java | 76 +++ .../hugegraph/store/node/util/Base58.java | 172 ++++++ .../apache/hugegraph/store/node/util/Err.java | 38 ++ .../hugegraph/store/node/util/HgAssert.java | 145 +++++ .../store/node/util/HgBufferProxy.java | 175 ++++++ .../hugegraph/store/node/util/HgChannel.java | 151 +++++ .../store/node/util/HgExecutorUtil.java | 98 ++++ .../hugegraph/store/node/util/HgGrpc.java | 122 ++++ .../store/node/util/HgRegexUtil.java | 92 +++ .../store/node/util/HgStoreConst.java | 39 ++ .../store/node/util/HgStoreNodeUtil.java | 83 +++ .../store/node/util/PropertyUtil.java | 96 +++ .../hugegraph/store/node/util/Result.java | 43 ++ .../hugegraph/store/node/util/TkEntry.java | 69 +++ .../src/main/resources/application-pd.yml | 38 ++ .../src/main/resources/application.yml | 51 ++ .../src/main/resources/banner.txt | 6 + .../src/main/resources/log4j2-dev.xml | 143 +++++ .../src/main/resources/version.txt | 1 + .../store/boot/StoreNodeServer00.java | 57 ++ .../store/boot/StoreNodeServer01.java | 59 ++ .../store/boot/StoreNodeServer02.java | 57 ++ .../store/boot/StoreNodeServer03.java | 57 ++ .../store/boot/StoreNodeServer04.java | 41 ++ .../store/boot/StoreNodeServer05.java | 41 ++ .../store/boot/StoreNodeServer06.java | 41 ++ .../store/node/HgStoreNodeServiceTest.java | 73 +++ .../store/node/metrics/JraftMetricsTest.java | 59 ++ .../src/test/resources/application-pd.yml | 28 + .../test/resources/application-server00.yml | 73 +++ .../test/resources/application-server01.yml | 70 +++ .../test/resources/application-server02.yml | 69 +++ .../test/resources/application-server03.yml | 71 +++ .../test/resources/application-server04.yml | 68 +++ .../test/resources/application-server05.yml | 70 +++ .../test/resources/application-server06.yml | 70 +++ .../src/test/resources/log4j2-dev.xml | 139 +++++ 89 files changed, 10604 insertions(+) create mode 100644 hugegraph-store/hg-store-node/banner.txt create mode 100644 hugegraph-store/hg-store-node/pom.xml create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppConfig.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppShutdownHook.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/StoreNodeApplication.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgStoreMetricsController.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgStoreStatusController.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgTestController.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/IndexAPI.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/PartitionAPI.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/entry/RestResult.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchGrpcClosure.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchScanIterator.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/EmptyIterator.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/FusingScanIterator.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GRpcServerConfig.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GrpcClosure.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeState.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreSessionImpl.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateService.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateSubject.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStreamImpl.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreWrapperEx.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ParallelScanIterator.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/QueryCondition.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchOneShotResponse.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse3.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponseFactory.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanOneShotResponse.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQuery.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQueryProducer.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanStreamResponse.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanUtil.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/GraphStoreImpl.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/ScanResponseObserver.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/listener/ContextClosedListener.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/listener/PdConfigureListener.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/DriveMetrics.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/GRpcExMetrics.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/JRaftMetrics.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/MetricsConfig.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/MetricsUtil.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsEntry.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsMetrics.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsReader.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsSmaps.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetrics.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetricsConst.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/StoreMetrics.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/SystemMetrics.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/model/HgNodeStatus.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Base58.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Err.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgAssert.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgBufferProxy.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgChannel.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgExecutorUtil.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgGrpc.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgRegexUtil.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgStoreConst.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgStoreNodeUtil.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/PropertyUtil.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Result.java create mode 100644 hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/TkEntry.java create mode 100644 hugegraph-store/hg-store-node/src/main/resources/application-pd.yml create mode 100644 hugegraph-store/hg-store-node/src/main/resources/application.yml create mode 100644 hugegraph-store/hg-store-node/src/main/resources/banner.txt create mode 100644 hugegraph-store/hg-store-node/src/main/resources/log4j2-dev.xml create mode 100644 hugegraph-store/hg-store-node/src/main/resources/version.txt create mode 100644 hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer00.java create mode 100644 hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer01.java create mode 100644 hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer02.java create mode 100644 hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer03.java create mode 100644 hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer04.java create mode 100644 hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer05.java create mode 100644 hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer06.java create mode 100644 hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/node/HgStoreNodeServiceTest.java create mode 100644 hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/node/metrics/JraftMetricsTest.java create mode 100644 hugegraph-store/hg-store-node/src/test/resources/application-pd.yml create mode 100644 hugegraph-store/hg-store-node/src/test/resources/application-server00.yml create mode 100644 hugegraph-store/hg-store-node/src/test/resources/application-server01.yml create mode 100644 hugegraph-store/hg-store-node/src/test/resources/application-server02.yml create mode 100644 hugegraph-store/hg-store-node/src/test/resources/application-server03.yml create mode 100644 hugegraph-store/hg-store-node/src/test/resources/application-server04.yml create mode 100644 hugegraph-store/hg-store-node/src/test/resources/application-server05.yml create mode 100644 hugegraph-store/hg-store-node/src/test/resources/application-server06.yml create mode 100644 hugegraph-store/hg-store-node/src/test/resources/log4j2-dev.xml diff --git a/hugegraph-store/hg-store-node/banner.txt b/hugegraph-store/hg-store-node/banner.txt new file mode 100644 index 0000000000..13f0501ce9 --- /dev/null +++ b/hugegraph-store/hg-store-node/banner.txt @@ -0,0 +1,5 @@ + _ _ ____ ____ _____ ___ ____ _____ _ _ ___ ____ _____ + | | | |/ ___| / ___|_ _/ _ \| _ \| ____| | \ | |/ _ \| _ \| ____| + | |_| | | _ ____\___ \ | || | | | |_) | _| _____| \| | | | | | | | _| + | _ | |_| |_____|__) || || |_| | _ <| |__|_____| |\ | |_| | |_| | |___ + |_| |_|\____| |____/ |_| \___/|_| \_\_____| |_| \_|\___/|____/|_____| \ No newline at end of file diff --git a/hugegraph-store/hg-store-node/pom.xml b/hugegraph-store/hg-store-node/pom.xml new file mode 100644 index 0000000000..90ba166ab8 --- /dev/null +++ b/hugegraph-store/hg-store-node/pom.xml @@ -0,0 +1,172 @@ + + + + + 4.0.0 + + + org.apache.hugegraph + hugegraph-store + ${revision} + ../pom.xml + + + hg-store-node + + + + + org.springframework.boot + spring-boot-starter + 2.5.14 + + + org.springframework.boot + spring-boot-starter-logging + + + + + org.springframework.boot + spring-boot-starter-actuator + 2.5.14 + + + io.micrometer + micrometer-registry-prometheus + 1.7.12 + + + io.dropwizard.metrics + metrics-core + 4.2.4 + + + org.springframework.boot + spring-boot-starter-web + 2.5.14 + + + + org.springframework.boot + spring-boot-starter-tomcat + + + + + org.springframework.boot + spring-boot-starter-jetty + 2.5.14 + + + io.github.lognet + grpc-spring-boot-starter + 4.5.5 + + + org.projectlombok + lombok + 1.18.24 + + + org.springframework.boot + spring-boot-starter-test + 2.5.14 + test + + + + + org.apache.hugegraph + hg-store-common + + + com.google.protobuf + protobuf-java-util + 3.17.2 + + + org.apache.hugegraph + hg-store-grpc + + + org.apache.hugegraph + hg-pd-client + ${revision} + + + io.grpc + * + + + + + org.apache.hugegraph + hg-store-core + + + + com.taobao.arthas + arthas-agent-attach + 3.6.4 + + + com.taobao.arthas + arthas-packaging + 3.6.4 + + + + org.apache.logging.log4j + log4j-core + 2.17.2 + + + org.apache.logging.log4j + log4j-api + 2.17.2 + + + + + + + + org.springframework.boot + spring-boot-maven-plugin + 2.5.14 + + + + repackage + + + + org.apache.hugegraph.store.node.StoreNodeApplication + + true + + + + + + + + diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppConfig.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppConfig.java new file mode 100644 index 0000000000..214e196f1c --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppConfig.java @@ -0,0 +1,228 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node; + +import java.util.HashMap; +import java.util.Map; + +import javax.annotation.PostConstruct; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Configuration; +import org.springframework.stereotype.Component; + +import lombok.Data; + +@Data +@Component +public class AppConfig { + @Value("${pdserver.address}") + private String pdServerAddress; + + @Value("${grpc.host}") + private String host; + + @Value("${grpc.port}") + private int grpcPort; + + @Value("${grpc.server.wait-time: 3600}") + private int serverWaitTime; + + @Value("${server.port}") + private int restPort; + + + //内置pd模式,用于单机部署 + @Value("${app.data-path: store}") + private String dataPath; + + @Value("${app.raft-path:}") + private String raftPath; + + //内置pd模式,用于单机部署 + @Value("${app.fake-pd: false}") + private boolean fakePd; + @Autowired + private Raft raft; + @Autowired + private ArthasConfig arthasConfig; + @Autowired + private FakePdConfig fakePdConfig; + @Autowired + private LabelConfig labelConfig; + @Autowired + private RocksdbConfig rocksdbConfig; + @Autowired + private ThreadPoolGrpc threadPoolGrpc; + @Autowired + private ThreadPoolScan threadPoolScan; + + public String getRaftPath() { + if (raftPath == null || raftPath.length() == 0) { + return dataPath; + } + return raftPath; + } + + @PostConstruct + public void init() { + Runtime rt = Runtime.getRuntime(); + if (threadPoolScan.core == 0) { + threadPoolScan.core = rt.availableProcessors() * 4; + } + + Map rocksdb = rocksdbConfig.rocksdb; + if (!rocksdb.containsKey("total_memory_size") + || "0".equals(rocksdb.get("total_memory_size"))) { + rocksdb.put("total_memory_size", Long.toString(rt.maxMemory())); + } + long totalMemory = Long.parseLong(rocksdbConfig.rocksdb.get("total_memory_size")); + if (raft.getDisruptorBufferSize() == 0) { + int size = (int) (totalMemory / 1000 / 1000 / 1000); + size = (int) Math.pow(2, Math.round(Math.log(size) / Math.log(2))) * 32; + raft.setDisruptorBufferSize(size); // 每32M增加一个buffer + } + + if (!rocksdb.containsKey("write_buffer_size") || + "0".equals(rocksdb.get("write_buffer_size"))) { + rocksdb.put("write_buffer_size", Long.toString(totalMemory / 1000)); + } + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("AppConfig \n") + .append("rocksdb:\n"); + rocksdbConfig.rocksdb.forEach((k, v) -> builder.append("\t" + k + ":") + .append(v) + .append("\n")); + builder.append("raft:\n"); + builder.append("\tdisruptorBufferSize: " + raft.disruptorBufferSize); + return builder.toString(); + } + + public String getStoreServerAddress() { + return String.format("%s:%d", host, grpcPort); + } + + public Map getRocksdbConfig() { + Map config = new HashMap<>(); + rocksdbConfig.rocksdb.forEach((k, v) -> { + config.put("rocksdb." + k, v); + }); + return config; + } + + @Data + @Configuration + public class ThreadPoolGrpc { + @Value("${thread.pool.grpc.core:600}") + private int core; + @Value("${thread.pool.grpc.max:1000}") + private int max; + @Value("${thread.pool.grpc.queue:" + Integer.MAX_VALUE + "}") + private int queue; + } + + @Data + @Configuration + public class ThreadPoolScan { + @Value("${thread.pool.scan.core: 128}") + private int core; + @Value("${thread.pool.scan.max: 1000}") + private int max; + @Value("${thread.pool.scan.queue: 0}") + private int queue; + } + + @Data + @Configuration + public class Raft { + @Value("${raft.address}") + private String address; + + @Value("${raft.rpc-timeout:10000}") + private int rpcTimeOut; + @Value("${raft.metrics:true}") + private boolean metrics; + @Value("${raft.snapshotLogIndexMargin:0}") + private int snapshotLogIndexMargin; + @Value("${raft.snapshotInterval:300}") + private int snapshotInterval; + @Value("${raft.disruptorBufferSize:0}") + private int disruptorBufferSize; + @Value("${raft.max-log-file-size: 50000000000}") + private long maxLogFileSize; + @Value("${ave-logEntry-size-ratio : 0.95}") + private double aveLogEntrySizeRation; + @Value("${raft.useRocksDBSegmentLogStorage: true}") + private boolean useRocksDBSegmentLogStorage; + @Value("${raft.maxSegmentFileSize:67108864}") + private int maxSegmentFileSize; + @Value("${raft.maxReplicatorInflightMsgs:256}") + private int maxReplicatorInflightMsgs; + + } + + @Data + @Configuration + public class ArthasConfig { + @Value("${arthas.telnetPort:8566}") + private String telnetPort; + + @Value("${arthas.httpPort:8565}") + private String httpPort; + + @Value("${arthas.ip:0.0.0.0}") + private String arthasip; + + @Value("${arthas.disabledCommands:jad}") + private String disCmd; + } + + @Data + @Configuration + public class FakePdConfig { + @Value("${fake-pd.store-list:''}") + private String storeList; + @Value("${fake-pd.peers-list:''}") + private String peersList; //fakePd模式下,raft集群初始配置 + @Value("${fake-pd.partition-count:3}") + private int partitionCount; + @Value("${fake-pd.shard-count:3}") + private int shardCount; + } + + @Data + @Configuration + @ConfigurationProperties(prefix = "app") + public class LabelConfig { + private final Map label = new HashMap<>(); + } + + @Data + @Configuration + @ConfigurationProperties(prefix = "") + public class RocksdbConfig { + private final Map rocksdb = new HashMap<>(); + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppShutdownHook.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppShutdownHook.java new file mode 100644 index 0000000000..4b02e4e498 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppShutdownHook.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node; + +import org.apache.hugegraph.rocksdb.access.RocksDBFactory; + +/** + * copy from web + */ +public class AppShutdownHook extends Thread { + + private final Thread mainThread; + private boolean shutDownSignalReceived; + + public AppShutdownHook(Thread mainThread) { + super(); + this.mainThread = mainThread; + this.shutDownSignalReceived = false; + Runtime.getRuntime().addShutdownHook(this); + } + + @Override + public void run() { + System.out.println("Shut down signal received."); + this.shutDownSignalReceived = true; + mainThread.interrupt(); + + doSomethingForShutdown(); + + try { + mainThread.join(); //当收到停止信号时,等待mainThread的执行完成 + } catch (InterruptedException ignored) { + } + System.out.println("Shut down complete."); + } + + public boolean shouldShutDown() { + return shutDownSignalReceived; + } + + private void doSomethingForShutdown() { + RocksDBFactory.getInstance().releaseAllGraphDB(); + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/StoreNodeApplication.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/StoreNodeApplication.java new file mode 100644 index 0000000000..c74ccc3295 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/StoreNodeApplication.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node; + +import org.apache.hugegraph.store.node.listener.ContextClosedListener; +import org.apache.hugegraph.store.node.listener.PdConfigureListener; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.context.ConfigurableApplicationContext; + +import com.alipay.remoting.util.StringUtils; + +/** + * + */ +@SpringBootApplication +public class StoreNodeApplication { + + //TODO Is this OK? + private final AppShutdownHook shutdownHook = new AppShutdownHook(Thread.currentThread()); + + public static void main(String[] args) { + start(); + } + + public static void start() { + // 设置solt用到的日志位置 + String logPath = System.getProperty("logging.path"); + if (StringUtils.isBlank(logPath)) { + System.setProperty("logging.path", "logs"); + } + System.setProperty("com.alipay.remoting.client.log.level", "WARN"); + if (System.getProperty("bolt.channel_write_buf_low_water_mark") == null) { + System.setProperty("bolt.channel_write_buf_low_water_mark", + Integer.toString(4 * 1024 * 1024)); + } + if (System.getProperty("bolt.channel_write_buf_high_water_mark") == null) { + System.setProperty("bolt.channel_write_buf_high_water_mark", + Integer.toString(8 * 1024 * 1024)); + } + SpringApplication application = new SpringApplication(StoreNodeApplication.class); + PdConfigureListener listener = new PdConfigureListener(); + ContextClosedListener closedListener = new ContextClosedListener(); + application.addListeners(listener); + application.addListeners(closedListener); + ConfigurableApplicationContext context = application.run(); + listener.setContext(context); + System.out.println("StoreNodeApplication started."); + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgStoreMetricsController.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgStoreMetricsController.java new file mode 100644 index 0000000000..aa6e175196 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgStoreMetricsController.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.controller; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.hugegraph.store.node.grpc.HgStoreNodeService; +import org.apache.hugegraph.store.node.metrics.DriveMetrics; +import org.apache.hugegraph.store.node.metrics.SystemMetrics; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestMethod; +import org.springframework.web.bind.annotation.RestController; + +import com.alipay.sofa.jraft.core.NodeMetrics; + +/** + * 2021/11/23 + */ +@RestController +@RequestMapping(value = "/metrics", method = RequestMethod.GET) +public class HgStoreMetricsController { + private final SystemMetrics systemMetrics = new SystemMetrics(); + private final DriveMetrics driveMetrics = new DriveMetrics(); + @Autowired + HgStoreNodeService nodeService; + + @GetMapping + public Map index() { + return new HashMap<>(); + } + + @GetMapping("system") + public Map> system() { + return this.systemMetrics.metrics(); + } + + @GetMapping("drive") + public Map> drive() { + return this.driveMetrics.metrics(); + } + + @GetMapping("raft") + public Map getRaftMetrics() { + return nodeService.getNodeMetrics(); + } + + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgStoreStatusController.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgStoreStatusController.java new file mode 100644 index 0000000000..e02315623c --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgStoreStatusController.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.controller; + +import java.io.Serializable; + +import org.apache.hugegraph.store.grpc.state.ScanState; +import org.apache.hugegraph.store.node.entry.RestResult; +import org.apache.hugegraph.store.node.grpc.HgStoreNodeState; +import org.apache.hugegraph.store.node.grpc.HgStoreStreamImpl; +import org.apache.hugegraph.store.node.model.HgNodeStatus; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PutMapping; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import com.google.protobuf.util.JsonFormat; + +/** + * created on 2021/11/1 + */ +@RestController +public class HgStoreStatusController { + + @Autowired + HgStoreStreamImpl streamImpl; + + @GetMapping("/-/echo") + public HgNodeStatus greeting( + @RequestParam(value = "name", defaultValue = "World") String name) { + return new HgNodeStatus(0, name + " is ok."); + } + + @GetMapping("/-/state") + public HgNodeStatus getState() { + return new HgNodeStatus(0, HgStoreNodeState.getState().name()); + } + + @PutMapping("/-/state") + public HgNodeStatus setState(@RequestParam(value = "name") String name) { + + switch (name) { + case "starting": + HgStoreNodeState.goStarting(); + break; + case "online": + HgStoreNodeState.goOnline(); + break; + case "stopping": + HgStoreNodeState.goStopping(); + break; + default: + return new HgNodeStatus(1000, "invalid parameter: " + name); + } + + return new HgNodeStatus(0, name); + } + + @GetMapping(value = "/-/scan", + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public Serializable getScanState() { + RestResult result = new RestResult(); + try { + ScanState state = streamImpl.getState(); + JsonFormat.Printer printer = JsonFormat.printer(); + printer = printer.includingDefaultValueFields().preservingProtoFieldNames(); + return printer.print(state); + } catch (Exception e) { + result.setState(RestResult.ERR); + result.setMessage(e.getMessage()); + return result; + } + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgTestController.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgTestController.java new file mode 100644 index 0000000000..fac0ccc610 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgTestController.java @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.controller; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hugegraph.store.PartitionEngine; +import org.apache.hugegraph.store.meta.Partition; +import org.apache.hugegraph.store.meta.Store; +import org.apache.hugegraph.store.node.grpc.HgStoreNodeService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; + +import lombok.extern.slf4j.Slf4j; + +/** + * 仅用于测试 + */ +@RestController +@Slf4j +@RequestMapping("/test") +public class HgTestController { + @Autowired + HgStoreNodeService nodeService; + + @GetMapping(value = "/leaderStore", produces = MediaType.APPLICATION_JSON_VALUE) + public Store testGetStoreInfo() { + + Store store = null; + PartitionEngine engine = nodeService.getStoreEngine().getPartitionEngine(0); + + for (Partition partition : engine.getPartitions().values()) { + store = nodeService.getStoreEngine().getHgCmdClient() + .getStoreInfo(engine.getLeader().toString()); + } + return store; + } + + @GetMapping(value = "/raftRestart/{groupId}", produces = MediaType.APPLICATION_JSON_VALUE) + public String restartRaftNode(@PathVariable(value = "groupId") int groupId) { + PartitionEngine engine = nodeService.getStoreEngine().getPartitionEngine(groupId); + engine.restartRaftNode(); + return "OK"; + } + + @GetMapping(value = "/raftDelete/{groupId}", produces = MediaType.APPLICATION_JSON_VALUE) + public String deleteRaftNode(@PathVariable(value = "groupId") int groupId) { + List graphs = new ArrayList<>(); + PartitionEngine engine = nodeService.getStoreEngine().getPartitionEngine(groupId); + if (engine != null) { + engine.getPartitions().forEach((k, v) -> { + graphs.add(v.getGraphName()); + }); + nodeService.getStoreEngine().destroyPartitionEngine(groupId, graphs); + return "OK"; + } else { + return "未找到分区"; + } + + + } + + @GetMapping(value = "/gc", produces = MediaType.APPLICATION_JSON_VALUE) + public String doGc() { + System.gc(); + return "gc OK!"; + } + + @GetMapping(value = "/flush", produces = MediaType.APPLICATION_JSON_VALUE) + public String doFlush() { + nodeService.getStoreEngine().getBusinessHandler().flushAll(); + return "flush all!"; + } + + @GetMapping(value = "/close", produces = MediaType.APPLICATION_JSON_VALUE) + public String doCloseAll() { + nodeService.getStoreEngine().getBusinessHandler().closeAll(); + return "close all!"; + } + + @GetMapping(value = "/snapshot", produces = MediaType.APPLICATION_JSON_VALUE) + public String doSnapshot() { + nodeService.getStoreEngine().getPartitionEngines().forEach((k, v) -> { + v.snapshot(); + }); + return "snapshot OK!"; + } + + @GetMapping(value = "/compact", produces = MediaType.APPLICATION_JSON_VALUE) + public String dbCompaction() { + nodeService.getStoreEngine().getPartitionEngines().forEach((k, v) -> { + nodeService.getStoreEngine().getBusinessHandler().dbCompaction("", k); + }); + return "snapshot OK!"; + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/IndexAPI.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/IndexAPI.java new file mode 100644 index 0000000000..b3a542f794 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/IndexAPI.java @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.controller; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.store.meta.Partition; +import org.apache.hugegraph.store.metric.HgStoreMetric; +import org.apache.hugegraph.store.node.grpc.HgStoreNodeService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; + +import com.alipay.sofa.jraft.entity.PeerId; +import com.alipay.sofa.jraft.util.Endpoint; + +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +@RestController +@Slf4j +@RequestMapping("/") +public class IndexAPI { + @Autowired + HgStoreNodeService nodeService; + + @GetMapping(value = "/", produces = "application/json") + public StoreInfo index() { + StoreInfo info = new StoreInfo(); + info.leaderCount = nodeService.getStoreEngine().getLeaderPartition().size(); + info.partitionCount = nodeService.getStoreEngine().getPartitionEngines().size(); + return info; + } + + public Map okMap(String k, Object v) { + Map map = new HashMap<>(); + map.put("status", 0); + map.put(k, v); + return map; + } + + @Data + class StoreInfo { + private int leaderCount; + private int partitionCount; + } + + @Data + public class Raft { + private final List partitions = new ArrayList<>(); + private int groupId; + private String role; + private String conf; + private Endpoint leader; + private long logIndex; + private List peers; + private List learners; + } + + @Data + public class PartitionInfo { + private final int id; // region id + private final String graphName; + // Region key range [startKey, endKey) + private final long startKey; + private final long endKey; + private final String version; + private final Metapb.PartitionState workState; + private HgStoreMetric.Partition metric; + private String leader; + + + public PartitionInfo(Partition pt) { + id = pt.getId(); + graphName = pt.getGraphName(); + startKey = pt.getStartKey(); + endKey = pt.getEndKey(); + + workState = pt.getWorkState(); + version = String.valueOf(pt.getVersion()); + + } + } +} + diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/PartitionAPI.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/PartitionAPI.java new file mode 100644 index 0000000000..3b12dbf388 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/PartitionAPI.java @@ -0,0 +1,248 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.controller; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.rocksdb.access.RocksDBSession; +import org.apache.hugegraph.rocksdb.access.ScanIterator; +import org.apache.hugegraph.store.HgStoreEngine; +import org.apache.hugegraph.store.PartitionEngine; +import org.apache.hugegraph.store.business.BusinessHandler; +import org.apache.hugegraph.store.business.InnerKeyCreator; +import org.apache.hugegraph.store.meta.Partition; +import org.apache.hugegraph.store.metric.HgStoreMetric; +import org.apache.hugegraph.store.node.AppConfig; +import org.apache.hugegraph.store.node.grpc.HgStoreNodeService; +import org.apache.hugegraph.util.Bytes; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; + +import com.alipay.sofa.jraft.entity.PeerId; +import com.alipay.sofa.jraft.util.Endpoint; +import com.taobao.arthas.agent.attach.ArthasAgent; + +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class PartitionAPI { + @Autowired + HgStoreNodeService nodeService; + + @Autowired + AppConfig appConfig; + + @GetMapping(value = "/partitions", produces = "application/json") + public Map getPartitions( + @RequestParam(required = false, defaultValue = "") String flags) { + + boolean accurate = false; + if (!flags.isEmpty()) { + List flagList = Arrays.asList(flags.split(",")); + if (flagList.contains("accurate")) { + accurate = true; + } + } + + List rafts = new ArrayList<>(); + HgStoreEngine storeEngine = nodeService.getStoreEngine(); + + BusinessHandler businessHandler = storeEngine.getBusinessHandler(); + Map partitionEngines = storeEngine.getPartitionEngines(); + + for (Map.Entry engineEntry : partitionEngines.entrySet()) { + PartitionEngine engine = engineEntry.getValue(); + Raft raft = new Raft(); + raft.setGroupId(engine.getGroupId()); + raft.setLeader(engine.getLeader()); + raft.setRole(engine.getRaftNode().getNodeState().name()); + raft.setConf(engine.getCurrentConf().toString()); + if (engine.isLeader()) { + raft.setPeers(engine.getRaftNode().listPeers()); + raft.setLearners(engine.getRaftNode().listLearners()); + } + raft.setTerm(engine.getLeaderTerm()); + raft.setLogIndex(engine.getCommittedIndex()); + raft.setPartitionCount(engine.getPartitions().size()); + for (Map.Entry partitionEntry : engine.getPartitions().entrySet()) { + String graphName = partitionEntry.getKey(); + Partition pt = partitionEntry.getValue(); + PartitionInfo partition = new PartitionInfo(pt); + // 此处为了打开所有的图,metric只返回已打开的图 + businessHandler.getLatestSequenceNumber(graphName, pt.getId()); + partition.setMetric( + businessHandler.getPartitionMetric(graphName, pt.getId(), accurate)); + partition.setLeader(pt.isLeader() == engine.isLeader() ? "OK" : "Error"); + raft.getPartitions().add(partition); + } + rafts.add(raft); + } + + return okMap("partitions", rafts); + } + + @GetMapping(value = "/partition/{id}", produces = "application/json") + public Raft getPartition(@PathVariable(value = "id") int id) { + + HgStoreEngine storeEngine = nodeService.getStoreEngine(); + + BusinessHandler businessHandler = storeEngine.getBusinessHandler(); + PartitionEngine engine = storeEngine.getPartitionEngine(id); + + Raft raft = new Raft(); + raft.setGroupId(engine.getGroupId()); + raft.setLeader(engine.getLeader()); + raft.setRole(engine.getRaftNode().getNodeState().name()); + if (engine.isLeader()) { + raft.setPeers(engine.getRaftNode().listPeers()); + raft.setLearners(engine.getRaftNode().listLearners()); + } + raft.setLogIndex(engine.getCommittedIndex()); + for (Map.Entry partitionEntry : engine.getPartitions().entrySet()) { + String graphName = partitionEntry.getKey(); + Partition pt = partitionEntry.getValue(); + PartitionInfo partition = new PartitionInfo(pt); + partition.setMetric(businessHandler.getPartitionMetric(graphName, pt.getId(), false)); + + raft.getPartitions().add(partition); + } + + return raft; + //return okMap("partition", rafts); + } + + /** + * 打印分区的所有key + */ + @GetMapping(value = "/partition/dump/{id}", produces = MediaType.APPLICATION_JSON_VALUE) + public Map dumpPartition(@PathVariable(value = "id") int id) throws + PDException { + HgStoreEngine storeEngine = nodeService.getStoreEngine(); + BusinessHandler handler = storeEngine.getBusinessHandler(); + InnerKeyCreator innerKeyCreator = new InnerKeyCreator(handler); + storeEngine.getPartitionEngine(id).getPartitions().forEach((graph, partition) -> { + log.info("{}----------------------------", graph); + ScanIterator cfIterator = handler.scanRaw(graph, partition.getId(), 0); + while (cfIterator.hasNext()) { + try (ScanIterator iterator = cfIterator.next()) { + byte[] cfName = cfIterator.position(); + log.info("\t{}", new String(cfName)); + while (iterator.hasNext()) { + RocksDBSession.BackendColumn col = iterator.next(); + int keyCode = innerKeyCreator.parseKeyCode(col.name); + log.info("\t\t{} --key={}, code={} ", new String(col.name), + Bytes.toHex(col.name), keyCode); + } + } + } + cfIterator.close(); + }); + return okMap("ok", null); + } + + /** + * 打印分区的所有key + */ + @GetMapping(value = "/partition/clean/{id}", produces = MediaType.APPLICATION_JSON_VALUE) + public Map cleanPartition(@PathVariable(value = "id") int id) throws + PDException { + HgStoreEngine storeEngine = nodeService.getStoreEngine(); + BusinessHandler handler = storeEngine.getBusinessHandler(); + + storeEngine.getPartitionEngine(id).getPartitions().forEach((graph, partition) -> { + handler.cleanPartition(graph, id); + }); + return okMap("ok", null); + } + + @GetMapping(value = "/arthasstart", produces = "application/json") + public Map arthasstart( + @RequestParam(required = false, defaultValue = "") String flags) { + HashMap configMap = new HashMap<>(); + configMap.put("arthas.telnetPort", appConfig.getArthasConfig().getTelnetPort()); + configMap.put("arthas.httpPort", appConfig.getArthasConfig().getHttpPort()); + configMap.put("arthas.ip", appConfig.getArthasConfig().getArthasip()); + configMap.put("arthas.disabledCommands", appConfig.getArthasConfig().getDisCmd()); + ArthasAgent.attach(configMap); +// DashResponse retPose = new DashResponse(); + List ret = new ArrayList<>(); + ret.add("Arthas 启动成功"); + return okMap("arthasstart", ret); + } + + public Map okMap(String k, Object v) { + Map map = new HashMap<>(); + map.put("status", 0); + map.put(k, v); + return map; + } + + @Data + public class Raft { + private final List partitions = new ArrayList<>(); + private int groupId; + private String role; + private String conf; + private Endpoint leader; + private long term; + private long logIndex; + private List peers; + private List learners; + private int partitionCount; + } + + @Data + public class PartitionInfo { + private final int id; // region id + private final String graphName; + // Region key range [startKey, endKey) + private final long startKey; + private final long endKey; + private final String version; + private final Metapb.PartitionState workState; + private HgStoreMetric.Partition metric; + private String leader; + + + public PartitionInfo(Partition pt) { + id = pt.getId(); + graphName = pt.getGraphName(); + startKey = pt.getStartKey(); + endKey = pt.getEndKey(); + + workState = pt.getWorkState(); + version = String.valueOf(pt.getVersion()); + + } + } +} + diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/entry/RestResult.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/entry/RestResult.java new file mode 100644 index 0000000000..14e7109bd4 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/entry/RestResult.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.entry; + +import java.io.Serializable; + +import lombok.Data; + +@Data +public class RestResult implements Serializable { + public static final String OK = "OK"; + public static final String ERR = "ERR"; + String state; + String message; + Serializable data; +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchGrpcClosure.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchGrpcClosure.java new file mode 100644 index 0000000000..f7ad7ff75a --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchGrpcClosure.java @@ -0,0 +1,212 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; +import java.util.function.Function; + +import org.apache.commons.collections.CollectionUtils; +import org.apache.hugegraph.store.grpc.common.ResCode; +import org.apache.hugegraph.store.grpc.common.ResStatus; +import org.apache.hugegraph.store.grpc.session.FeedbackRes; +import org.apache.hugegraph.store.grpc.session.PartitionFaultResponse; +import org.apache.hugegraph.store.grpc.session.PartitionFaultType; +import org.apache.hugegraph.store.grpc.session.PartitionLeader; +import org.apache.hugegraph.store.raft.RaftClosure; +import org.apache.hugegraph.store.util.HgRaftError; + +import com.alipay.sofa.jraft.Status; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +/** + * 批量处理的grpc回调封装类 + * + * @param + */ +@Slf4j +class BatchGrpcClosure { + private final CountDownLatch countDownLatch; + private final List errorStatus; + private final List results; + private final Map leaderMap; + + public BatchGrpcClosure(int count) { + countDownLatch = new CountDownLatch(count); + errorStatus = Collections.synchronizedList(new ArrayList<>()); + results = Collections.synchronizedList(new ArrayList<>()); + leaderMap = new ConcurrentHashMap<>(); + } + + public RaftClosure newRaftClosure() { + return new GrpcClosure() { + @Override + public void run(Status status) { + countDownLatch.countDown(); + if (status.isOk()) { + results.add(this.getResult()); + } else { + leaderMap.putAll(this.getLeaderMap()); + errorStatus.add(status); + } + } + }; + } + + public RaftClosure newRaftClosure(Consumer ok) { + return new GrpcClosure() { + @Override + public void run(Status status) { + countDownLatch.countDown(); + if (status.isOk()) { + results.add(this.getResult()); + } else { + leaderMap.putAll(this.getLeaderMap()); + errorStatus.add(status); + } + ok.accept(status); + } + }; + } + + /** + * 不使用计数器latch + * + * @return + */ + public RaftClosure newClosureNoLatch() { + return new GrpcClosure() { + @Override + public void run(Status status) { + if (status.isOk()) { + results.add(this.getResult()); + } else { + leaderMap.putAll(this.getLeaderMap()); + errorStatus.add(status); + } + } + }; + } + + + public PartitionFaultResponse getErrorResponse() { + PartitionFaultResponse errorResponse; + + if (leaderMap.size() > 0) { + PartitionFaultResponse.Builder partitionFault = + PartitionFaultResponse.newBuilder().setFaultType( + PartitionFaultType.PARTITION_FAULT_TYPE_NOT_LEADER); + leaderMap.forEach((k, v) -> { + partitionFault.addPartitionLeaders(PartitionLeader.newBuilder() + .setPartitionId(k) + .setLeaderId(v).build()); + }); + errorResponse = partitionFault.build(); + } else { + PartitionFaultType faultType = PartitionFaultType.PARTITION_FAULT_TYPE_UNKNOWN; + switch (HgRaftError.forNumber(errorStatus.get(0).getCode())) { + case NOT_LEADER: + faultType = PartitionFaultType.PARTITION_FAULT_TYPE_NOT_LEADER; + break; + case WAIT_LEADER_TIMEOUT: + faultType = PartitionFaultType.PARTITION_FAULT_TYPE_WAIT_LEADER_TIMEOUT; + break; + case NOT_LOCAL: + faultType = PartitionFaultType.PARTITION_FAULT_TYPE_NOT_LOCAL; + break; + default: + log.error("Unmatchable errorStatus: " + errorStatus); + } + errorResponse = PartitionFaultResponse.newBuilder().setFaultType(faultType).build(); + } + return errorResponse; + } + + public String getErrorMsg() { + StringBuilder builder = new StringBuilder(); + errorStatus.forEach(status -> { + if (!status.isOk()) { + builder.append(status.getErrorMsg()); + builder.append("\n"); + } + }); + return builder.toString(); + } + + /** + * 等待raft执行结束,返回结果给grpc + */ + public void waitFinish(StreamObserver observer, Function, V> ok, long timeout) { + try { + countDownLatch.await(timeout, TimeUnit.MILLISECONDS); + + if (errorStatus.isEmpty()) { // 没有错误时,合并结果 + observer.onNext(ok.apply(results)); + } else { + observer.onNext((V) FeedbackRes.newBuilder() + .setStatus(ResStatus.newBuilder() + .setCode(ResCode.RES_CODE_FAIL) + .setMsg(getErrorMsg())) + .setPartitionFaultResponse(this.getErrorResponse()) + .build()); + } + } catch (InterruptedException e) { + log.error("waitFinish exception: ", e); + observer.onNext((V) FeedbackRes.newBuilder() + .setStatus(ResStatus.newBuilder() + .setCode(ResCode.RES_CODE_FAIL) + .setMsg(e.getLocalizedMessage()) + .build()).build()); + } + observer.onCompleted(); + } + + /** + * 从多个结果中选择一个错误的结果返回,如果没有错误,返回第一个 + */ + public FeedbackRes selectError(List results) { + if (!CollectionUtils.isEmpty(results)) { + AtomicReference res = new AtomicReference<>(results.get(0)); + results.forEach(e -> { + try { + if (e.getStatus().getCode() != ResCode.RES_CODE_OK) { + res.set(e); + } + } catch (Exception ex) { + log.error("{}", ex); + } + }); + return res.get(); + } else { + return FeedbackRes.newBuilder() + .setStatus(ResStatus.newBuilder() + .setCode(ResCode.RES_CODE_OK).build()) + .build(); + } + + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchScanIterator.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchScanIterator.java new file mode 100644 index 0000000000..56851d8f9a --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchScanIterator.java @@ -0,0 +1,160 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import java.util.NoSuchElementException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; + +import javax.annotation.concurrent.NotThreadSafe; + +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.rocksdb.access.ScanIterator; +import org.apache.hugegraph.store.node.util.HgAssert; +import org.apache.hugegraph.store.node.util.HgStoreConst; + +/** + * 2022/2/28 + * + * @version 0.3.0 added limit support + */ +@NotThreadSafe +public final class BatchScanIterator implements ScanIterator { + private final Supplier> batchSupplier; + private final Supplier limitSupplier; + private final AtomicBoolean closed = new AtomicBoolean(); + private ScanIterator iterator; + private boolean hasNext = false; + private long curCount; + private long curLimit; + + private BatchScanIterator(Supplier> iteratorSupplier, + Supplier limitSupplier) { + this.batchSupplier = iteratorSupplier; + this.limitSupplier = limitSupplier; + } + + public static BatchScanIterator of( + Supplier> iteratorSupplier, + Supplier limitSupplier) { + HgAssert.isArgumentNotNull(iteratorSupplier, "iteratorSupplier"); + HgAssert.isArgumentNotNull(limitSupplier, "limitSupplier"); + return new BatchScanIterator(iteratorSupplier, limitSupplier); + } + + private ScanIterator getIterator() { + ScanIterator buf; + int count = 0; + this.curCount = 0L; + + do { + buf = this.batchSupplier.get().getValue(); + + if (buf == null) { + break; + } + + if (!buf.hasNext()) { + buf.close(); + buf = null; + } + + if (++count == Integer.MAX_VALUE) { + throw new RuntimeException("Do loop times more than Integer.MAX_VALUE"); + } + + } while (buf == null); + + if (buf != null) { + Long limit = this.limitSupplier.get(); + + if (limit == null || limit <= 0) { + this.curLimit = Integer.MAX_VALUE; + } else { + this.curLimit = limit; + } + + } + + return buf; + + } + + @Override + public boolean hasNext() { + + if (this.iterator == null) { + this.iterator = this.getIterator(); + } else if (!this.iterator.hasNext()) { + this.iterator.close(); + this.iterator = this.getIterator(); + } else if (this.curCount == this.curLimit) { + this.iterator.close(); + this.iterator = this.getIterator(); + } + + if (this.iterator == null) { + return false; + } else { + this.hasNext = true; + return true; + } + } + + @Override + public T next() { + if (this.hasNext) { + this.hasNext = false; + } else { + if (!this.hasNext()) { + throw new NoSuchElementException(); + } + } + + this.curCount++; + + return this.iterator.next(); + } + + @Override + public void close() { + if (!this.closed.getAndSet(true)) { + if (this.iterator != null) { + this.iterator.close(); + } + } + } + + @Override + public boolean isValid() { + throw new UnsupportedOperationException(); + } + + @Override + public long count() { + throw new UnsupportedOperationException(); + } + + @Override + public byte[] position() { + if (this.iterator != null) { + return this.iterator.position(); + } + return HgStoreConst.EMPTY_BYTES; + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/EmptyIterator.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/EmptyIterator.java new file mode 100644 index 0000000000..a6d2b6283d --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/EmptyIterator.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import org.apache.hugegraph.rocksdb.access.ScanIterator; + +/** + * 2021/11/29 + */ +final class EmptyIterator implements ScanIterator { + + @Override + public boolean hasNext() { + return false; + } + + @Override + public boolean isValid() { + return false; + } + + @Override + public T next() { + return null; + } + + @Override + public long count() { + return 0; + } + + @Override + public byte[] position() { + return new byte[0]; + } + + @Override + public void close() { + + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/FusingScanIterator.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/FusingScanIterator.java new file mode 100644 index 0000000000..01d5778684 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/FusingScanIterator.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import java.util.NoSuchElementException; +import java.util.function.Supplier; + +import org.apache.hugegraph.rocksdb.access.ScanIterator; + +/** + * This is a wrapper of the ScanIterator that provides a mechanism + * to set a threshold value in order to abort the iterating operation. + *

+ * 2023/2/8 + */ +final class FusingScanIterator implements ScanIterator { + public static final byte[] EMPTY_BYTES = new byte[0]; + private long max; + private long accumulator; + private Supplier supplier; + private ScanIterator iterator; + private byte[] position = EMPTY_BYTES; + + private FusingScanIterator() { + } + + public static FusingScanIterator maxOf(long maxThreshold, + Supplier iteratorSupplier) { + FusingScanIterator res = new FusingScanIterator(); + res.max = maxThreshold; + res.supplier = iteratorSupplier; + return res; + } + + private ScanIterator getIterator() { + ScanIterator buf = this.supplier.get(); + if (buf == null) { + return null; + } + if (!buf.hasNext()) { + buf = null; + } + return buf; + } + + private void init() { + if (this.iterator == null) { + this.iterator = this.getIterator(); + } + } + + @Override + public boolean hasNext() { + if (this.isThresholdExceeded()) { + return false; + } + if (this.iterator == null) { + this.iterator = this.getIterator(); + } + return this.iterator != null; + } + + @Override + public boolean isValid() { + return hasNext(); + } + + @Override + public byte[] position() { + return this.position; + } + + /** + * @return true, when the threshold is exceeded. + */ + private boolean isThresholdExceeded() { + return this.accumulator >= this.max; + } + + @Override + public T next() { + if (this.isThresholdExceeded()) { + throw new NoSuchElementException(); + } + this.init(); + if (this.iterator == null) { + throw new NoSuchElementException(); + } + T t = this.iterator.next(); + position = this.iterator.position(); + this.accumulator++; + if (!this.iterator.hasNext() || this.isThresholdExceeded()) { + this.iterator.close(); + this.iterator = null; + } + return t; + } + + @Override + public void close() { + if (this.iterator != null) { + this.iterator.close(); + } + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GRpcServerConfig.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GRpcServerConfig.java new file mode 100644 index 0000000000..2e3b74c29b --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GRpcServerConfig.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import org.apache.hugegraph.store.node.AppConfig; +import org.apache.hugegraph.store.node.util.HgExecutorUtil; +import org.lognet.springboot.grpc.GRpcServerBuilderConfigurer; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +import io.grpc.ServerBuilder; + +/** + * 2022/3/4 + */ +@Component +public class GRpcServerConfig extends GRpcServerBuilderConfigurer { + public final static String EXECUTOR_NAME = "hg-grpc"; + @Autowired + private AppConfig appConfig; + + @Override + public void configure(ServerBuilder serverBuilder) { + AppConfig.ThreadPoolGrpc grpc = appConfig.getThreadPoolGrpc(); + serverBuilder.executor( + HgExecutorUtil.createExecutor(EXECUTOR_NAME, grpc.getCore(), grpc.getMax(), + grpc.getQueue()) + ); + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GrpcClosure.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GrpcClosure.java new file mode 100644 index 0000000000..825a57ef90 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GrpcClosure.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hugegraph.store.grpc.session.FeedbackRes; +import org.apache.hugegraph.store.raft.RaftClosure; + +import io.grpc.stub.StreamObserver; + +/** + * 2022/1/27 + */ + +abstract class GrpcClosure implements RaftClosure { + private final Map leaderMap = new HashMap<>(); + private V result; + + /** + * 设置输出结果给raftClosure,对于Follower来说,raftClosure为空 + */ + public static void setResult(RaftClosure raftClosure, V result) { + GrpcClosure closure = (GrpcClosure) raftClosure; + if (closure != null) { + closure.setResult(result); + } + } + + public static RaftClosure newRaftClosure(StreamObserver observer) { + BatchGrpcClosure wrap = new BatchGrpcClosure<>(0); + return wrap.newRaftClosure(s -> { + wrap.waitFinish(observer, r -> { + return (V) wrap.selectError((List) r); + }, 0); + }); + } + + public V getResult() { + return result; + } + + public void setResult(V result) { + this.result = result; + } + + public Map getLeaderMap() { + return leaderMap; + } + + @Override + public void onLeaderChanged(Integer partId, Long storeId) { + leaderMap.put(partId, storeId); + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java new file mode 100644 index 0000000000..86239bf3d2 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java @@ -0,0 +1,237 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import static org.apache.hugegraph.store.grpc.common.GraphMethod.GRAPH_METHOD_DELETE; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import javax.annotation.PostConstruct; +import javax.annotation.PreDestroy; + +import org.apache.hugegraph.store.HgStoreEngine; +import org.apache.hugegraph.store.business.DefaultDataMover; +import org.apache.hugegraph.store.grpc.session.BatchReq; +import org.apache.hugegraph.store.grpc.session.CleanReq; +import org.apache.hugegraph.store.grpc.session.GraphReq; +import org.apache.hugegraph.store.grpc.session.TableReq; +import org.apache.hugegraph.store.node.AppConfig; +import org.apache.hugegraph.store.options.HgStoreEngineOptions; +import org.apache.hugegraph.store.options.RaftRocksdbOptions; +import org.apache.hugegraph.store.raft.RaftClosure; +import org.apache.hugegraph.store.raft.RaftOperation; +import org.apache.hugegraph.store.raft.RaftTaskHandler; +import org.apache.hugegraph.store.util.HgRaftError; +import org.apache.hugegraph.store.util.HgStoreException; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.core.NodeMetrics; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + +import lombok.extern.slf4j.Slf4j; + +/** + * @projectName: raft task executor + */ +@Slf4j +@Service +public class HgStoreNodeService implements RaftTaskHandler { + public static final byte BATCH_OP = 0x12; + public static final byte TABLE_OP = 0x13; + public static final byte GRAPH_OP = 0x14; + public static final byte CLEAN_OP = 0x15; + + + public static final byte MAX_OP = 0x59; + private final AppConfig appConfig; + @Autowired + HgStoreSessionImpl hgStoreSession; + private HgStoreEngine storeEngine; + + public HgStoreNodeService(@Autowired AppConfig appConfig) { + this.appConfig = appConfig; + } + + public HgStoreEngine getStoreEngine() { + return this.storeEngine; + } + + @PostConstruct + public void init() { + log.info("{}", appConfig.toString()); + HgStoreEngineOptions options = new HgStoreEngineOptions() {{ + setRaftAddress(appConfig.getRaft().getAddress()); + setDataPath(appConfig.getDataPath()); + setRaftPath(appConfig.getRaftPath()); + setPdAddress(appConfig.getPdServerAddress()); + setFakePD(appConfig.isFakePd()); + setRocksdbConfig(appConfig.getRocksdbConfig()); + setGrpcAddress(appConfig.getStoreServerAddress()); + setLabels(appConfig.getLabelConfig().getLabel()); + setRaftOptions(new RaftOptions() {{ + setMetrics(appConfig.getRaft().isMetrics()); + setRpcDefaultTimeout(appConfig.getRaft().getRpcTimeOut()); + setSnapshotLogIndexMargin(appConfig.getRaft().getSnapshotLogIndexMargin()); + setSnapshotIntervalSecs(appConfig.getRaft().getSnapshotInterval()); + setDisruptorBufferSize(appConfig.getRaft().getDisruptorBufferSize()); + setMaxLogSize(appConfig.getRaft().getMaxLogFileSize()); + setAveLogEntrySizeRatio(appConfig.getRaft().getAveLogEntrySizeRation()); + setUseRocksDBSegmentLogStorage(appConfig.getRaft() + .isUseRocksDBSegmentLogStorage()); + setMaxSegmentFileSize(appConfig.getRaft().getMaxSegmentFileSize()); + setMaxReplicatorInflightMsgs(appConfig.getRaft().getMaxReplicatorInflightMsgs()); + }}); + setFakePdOptions(new FakePdOptions() {{ + setStoreList(appConfig.getFakePdConfig().getStoreList()); + setPeersList(appConfig.getFakePdConfig().getPeersList()); + setPartitionCount(appConfig.getFakePdConfig().getPartitionCount()); + setShardCount(appConfig.getFakePdConfig().getShardCount()); + }}); + }}; + + RaftRocksdbOptions.initRocksdbGlobalConfig(options.getRocksdbConfig()); + + options.getLabels().put("rest.port", Integer.toString(appConfig.getRestPort())); + log.info("HgStoreEngine init {}", options); + options.setTaskHandler(this); + options.setDataTransfer(new DefaultDataMover()); + storeEngine = HgStoreEngine.getInstance(); + storeEngine.init(options); + + } + + public List getGraphLeaderPartitionIds(String graphName) { + return storeEngine.getPartitionManager().getLeaderPartitionIds(graphName); + } + + /** + * 添加raft 任务,转发数据给raft + * + * @return true 表示数据已被提交,false表示未提交,用于单副本入库减少批次拆分 + */ + public + void addRaftTask(byte methodId, String graphName, Integer partitionId, Req req, + RaftClosure closure) { + if (!storeEngine.isClusterReady()) { + closure.run(new Status(HgRaftError.CLUSTER_NOT_READY.getNumber(), + "The cluster is not ready, please check active stores number!")); + log.error("The cluster is not ready, please check active stores number!"); + return; + } + // + try { + // 序列化, + final byte[] buffer = new byte[req.getSerializedSize() + 1]; + final CodedOutputStream output = CodedOutputStream.newInstance(buffer); + output.write(methodId); + req.writeTo(output); + output.checkNoSpaceLeft(); + output.flush(); + // 传送给raft + storeEngine.addRaftTask(graphName, partitionId, + RaftOperation.create(methodId, buffer, req), closure); + + } catch (Exception e) { + closure.run(new Status(HgRaftError.UNKNOWN.getNumber(), e.getMessage())); + log.error("addRaftTask {}", e); + } + + } + + /** + * 来自日志的任务,一般是follower 或者 日志回滚的任务 + */ + @Override + public boolean invoke(int partId, byte[] request, RaftClosure response) throws + HgStoreException { + try { + CodedInputStream input = CodedInputStream.newInstance(request); + byte methodId = input.readRawByte(); + switch (methodId) { + case HgStoreNodeService.BATCH_OP: + invoke(partId, methodId, BatchReq.parseFrom(input), response); + break; + case HgStoreNodeService.TABLE_OP: + invoke(partId, methodId, TableReq.parseFrom(input), response); + break; + case HgStoreNodeService.GRAPH_OP: + invoke(partId, methodId, GraphReq.parseFrom(input), response); + break; + case HgStoreNodeService.CLEAN_OP: + invoke(partId, methodId, CleanReq.parseFrom(input), response); + break; + default: + return false; // 未处理 + } + } catch (IOException e) { + throw new HgStoreException(e.getMessage(), e); + } + return true; + } + + /** + * 处理raft传送过来的数据 + */ + @Override + public boolean invoke(int partId, byte methodId, Object req, RaftClosure response) throws + HgStoreException { + switch (methodId) { + case HgStoreNodeService.BATCH_OP: + hgStoreSession.doBatch(partId, (BatchReq) req, response); + break; + case HgStoreNodeService.TABLE_OP: + hgStoreSession.doTable(partId, (TableReq) req, response); + break; + case HgStoreNodeService.GRAPH_OP: + if (((GraphReq) req).getMethod() == GRAPH_METHOD_DELETE) { + storeEngine.deletePartition(partId, ((GraphReq) req).getGraphName()); + } + hgStoreSession.doGraph(partId, (GraphReq) req, response); + break; + case HgStoreNodeService.CLEAN_OP: + hgStoreSession.doClean(partId, (CleanReq) req, response); + break; + default: + return false; // 未处理 + } + return true; + } + + @PreDestroy + public void destroy() { + storeEngine.shutdown(); + } + + + private String getSerializingExceptionMessage(String target) { + return "Serializing " + + getClass().getName() + + " to a " + + target + + " threw an IOException (should never happen)."; + } + + public Map getNodeMetrics() { + return storeEngine.getNodeMetrics(); + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeState.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeState.java new file mode 100644 index 0000000000..aecb176878 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeState.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.hugegraph.store.grpc.state.NodeStateType; + +/** + * created on 2021/11/3 + */ +@ThreadSafe +public final class HgStoreNodeState { + + private static NodeStateType curState = NodeStateType.STARTING; + + public static NodeStateType getState() { + return curState; + } + + private static void setState(NodeStateType state) { + curState = state; + change(); + } + + private static void change() { + HgStoreStateSubject.notifyAll(curState); + } + + public static void goOnline() { + setState(NodeStateType.ONLINE); + } + + public static void goStarting() { + setState(NodeStateType.STARTING); + } + + public static void goStopping() { + setState(NodeStateType.STOPPING); + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreSessionImpl.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreSessionImpl.java new file mode 100644 index 0000000000..aff054917a --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreSessionImpl.java @@ -0,0 +1,551 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Metapb.GraphMode; +import org.apache.hugegraph.rocksdb.access.ScanIterator; +import org.apache.hugegraph.store.business.BusinessHandler; +import org.apache.hugegraph.store.grpc.common.Key; +import org.apache.hugegraph.store.grpc.common.Kv; +import org.apache.hugegraph.store.grpc.common.ResCode; +import org.apache.hugegraph.store.grpc.common.ResStatus; +import org.apache.hugegraph.store.grpc.session.Agg; +import org.apache.hugegraph.store.grpc.session.BatchEntry; +import org.apache.hugegraph.store.grpc.session.BatchGetReq; +import org.apache.hugegraph.store.grpc.session.BatchReq; +import org.apache.hugegraph.store.grpc.session.BatchWriteReq; +import org.apache.hugegraph.store.grpc.session.CleanReq; +import org.apache.hugegraph.store.grpc.session.FeedbackRes; +import org.apache.hugegraph.store.grpc.session.GetReq; +import org.apache.hugegraph.store.grpc.session.GraphReq; +import org.apache.hugegraph.store.grpc.session.HgStoreSessionGrpc; +import org.apache.hugegraph.store.grpc.session.KeyValueResponse; +import org.apache.hugegraph.store.grpc.session.TableReq; +import org.apache.hugegraph.store.grpc.session.ValueResponse; +import org.apache.hugegraph.store.grpc.stream.ScanStreamReq; +import org.apache.hugegraph.store.meta.Graph; +import org.apache.hugegraph.store.meta.GraphManager; +import org.apache.hugegraph.store.node.AppConfig; +import org.apache.hugegraph.store.node.util.HgGrpc; +import org.apache.hugegraph.store.node.util.HgStoreNodeUtil; +import org.apache.hugegraph.store.pd.PdProvider; +import org.apache.hugegraph.store.raft.RaftClosure; +import org.apache.hugegraph.store.util.HgStoreConst; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; + +import com.google.protobuf.ByteString; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@GRpcService +public class HgStoreSessionImpl extends HgStoreSessionGrpc.HgStoreSessionImplBase { + @Autowired() + private AppConfig appConfig; + @Autowired + private HgStoreNodeService storeService; + private HgStoreWrapperEx wrapper; + private PdProvider pdProvider; + + private HgStoreWrapperEx getWrapper() { + if (this.wrapper == null) { + synchronized (this) { + if (this.wrapper == null) { + this.wrapper = new HgStoreWrapperEx( + storeService.getStoreEngine().getBusinessHandler()); + } + } + } + return this.wrapper; + } + + private PdProvider getPD() { + if (pdProvider == null) { + synchronized (this) { + if (pdProvider == null) { + pdProvider = storeService.getStoreEngine().getPdProvider(); + } + } + } + return pdProvider; + } + + @Override + public void get2(GetReq request, StreamObserver responseObserver) { + String graph = request.getHeader().getGraph(); + String table = request.getTk().getTable(); + byte[] key = request.getTk().getKey().toByteArray(); + int code = request.getTk().getCode(); + byte[] value = getWrapper().doGet(graph, code, table, key); + + FeedbackRes.Builder builder = FeedbackRes.newBuilder(); + + FeedbackRes res = null; + if (value != null) { + res = builder.setStatus(HgGrpc.success()) + .setValueResponse( + ValueResponse.newBuilder() + .setValue(ByteString.copyFrom(value)) + ).build(); + + } else { + res = builder.setStatus(HgGrpc.success()) + .setStatus(HgGrpc.not()) + .build(); + } + + responseObserver.onNext(res); + responseObserver.onCompleted(); + } + + @Override + public void clean(CleanReq request, + StreamObserver responseObserver) { + + String graph = request.getHeader().getGraph(); + int partition = request.getPartition(); + // 发给不同的raft执行 + BatchGrpcClosure closure = new BatchGrpcClosure<>(1); + storeService.addRaftTask(HgStoreNodeService.CLEAN_OP, graph, partition, + request, + closure.newRaftClosure()); + // 等待返回结果 + closure.waitFinish(responseObserver, r -> closure.selectError(r), + appConfig.getRaft().getRpcTimeOut()); + } + + public void doClean(int partId, CleanReq request, RaftClosure response) { + String graph = request.getHeader().getGraph(); + FeedbackRes.Builder builder = FeedbackRes.newBuilder(); + try { + if (getWrapper().doClean(graph, partId)) { + builder.setStatus(HgGrpc.success()); + } else { + builder.setStatus(HgGrpc.not()); + } + } catch (Throwable t) { + String msg = "Failed to doClean, graph: " + graph + "; partitionId = " + partId; + log.error(msg, t); + builder.setStatus(HgGrpc.fail(msg)); + } + GrpcClosure.setResult(response, builder.build()); + } + + @Override + public void batchGet2(BatchGetReq request, StreamObserver responseObserver) { + String graph = request.getHeader().getGraph(); + String table = request.getTable(); + FeedbackRes.Builder builder = FeedbackRes.newBuilder(); + + List keyList = request.getKeyList(); + if (keyList == null || keyList.isEmpty()) { + builder.setStatus(HgGrpc.fail("keys is empty")); + responseObserver.onNext(builder.build()); + responseObserver.onCompleted(); + return; + } + + KeyValueResponse.Builder keyValueBuilder = KeyValueResponse.newBuilder(); + + int max = keyList.size() - 1; + AtomicInteger count = new AtomicInteger(-1); + Kv.Builder kvBuilder = Kv.newBuilder(); + getWrapper().batchGet(graph, table, + () -> { + if (count.getAndAdd(1) == max) { + return null; + } + + Key key = keyList.get(count.get()); + if (log.isDebugEnabled()) { + log.debug("batch-get: " + + HgStoreNodeUtil.toStr( + key.getKey() + .toByteArray())); + } + return HgGrpc.toHgPair(key); + }, + ( + pair -> { + if (pair.getValue() == null || pair.getKey() == null) { + return; + } + keyValueBuilder.addKv(HgGrpc.toKv(pair, kvBuilder)); + } + ) + + ); + + builder.setKeyValueResponse(keyValueBuilder.build()); + responseObserver.onNext(builder.build()); + responseObserver.onCompleted(); + + } + + @Override + public void batch(BatchReq request, StreamObserver observer) { + String graph = request.getHeader().getGraph(); + List list = request.getWriteReq().getEntryList(); + PdProvider pd = getPD(); + try { + GraphManager graphManager = pd.getGraphManager(); + Graph managerGraph = graphManager.getGraph(graph); + if (managerGraph != null && graph.endsWith("/g")) { + Metapb.Graph g = managerGraph.getProtoObj(); + if (g == null || g.getGraphState() == null) { + g = pd.getPDClient().getGraphWithOutException(graph); + managerGraph.setGraph(g); + } + if (g != null) { + Metapb.GraphState graphState = g.getGraphState(); + if (graphState != null) { + GraphMode graphMode = graphState.getMode(); + if (graphMode != null && + graphMode.getNumber() == GraphMode.ReadOnly_VALUE) { + // 状态为只读时从pd获取最新的图状态,图只读状态会在pd的通知中更新 + Metapb.Graph pdGraph = + pd.getPDClient().getGraph(graph); + Metapb.GraphState pdGraphState = + pdGraph.getGraphState(); + if (pdGraphState != null && + pdGraphState.getMode() != null && + pdGraphState.getMode().getNumber() == + GraphMode.ReadOnly_VALUE) { + // 确认pd中存储的当前状态也是只读,则不允许插入数据 + throw new PDException(-1, + "the graph space size " + + "has " + + "reached the threshold"); + } + // pd状态与本地缓存不一致,本地缓存更新为pd中的状态 + managerGraph.setProtoObj(pdGraph); + } + } + } + } + } catch (PDException e) { + ResStatus status = ResStatus.newBuilder() + .setCode(ResCode.RES_CODE_EXCESS) + .setMsg(e.getMessage()) + .build(); + FeedbackRes feedbackRes = FeedbackRes.newBuilder() + .setStatus(status) + .build(); + observer.onNext(feedbackRes); + observer.onCompleted(); + return; + } + + // 按分区拆分数据 + Map> groups = new HashMap<>(); + list.forEach((entry) -> { + Key startKey = entry.getStartKey(); + if (startKey.getCode() == HgStoreConst.SCAN_ALL_PARTITIONS_ID) { + // 所有Leader分区 + List ids = + storeService.getGraphLeaderPartitionIds(graph); + ids.forEach(id -> { + if (!groups.containsKey(id)) { + groups.put(id, new LinkedList<>()); + } + groups.get(id).add(entry); + }); + } else { + // 根据keyCode查询所属分区ID,按分区ID分组 + Integer partitionId = + pd.getPartitionByCode(graph, startKey.getCode()) + .getId(); + if (!groups.containsKey(partitionId)) { + groups.put(partitionId, new LinkedList<>()); + } + groups.get(partitionId).add(entry); + } + }); + + // 发给不同的raft执行 + BatchGrpcClosure closure = + new BatchGrpcClosure<>(groups.size()); + groups.forEach((partition, entries) -> { + storeService.addRaftTask(HgStoreNodeService.BATCH_OP, graph, + partition, + BatchReq.newBuilder() + .setHeader(request.getHeader()) + .setWriteReq( + BatchWriteReq.newBuilder() + .addAllEntry( + entries)) + .build(), + closure.newRaftClosure()); + }); + + if (!graph.isEmpty()) { + log.debug(" batch: waiting raft..."); + // 等待返回结果 + closure.waitFinish(observer, r -> closure.selectError(r), + appConfig.getRaft().getRpcTimeOut()); + log.debug(" batch: ended waiting"); + } else { + log.info(" batch: there is none of raft leader, graph = {}.", + request.getHeader().getGraph()); + observer.onNext( + FeedbackRes.newBuilder().setStatus(HgGrpc.success()) + .build()); + observer.onCompleted(); + } + } + + public void doBatch(int partId, BatchReq request, RaftClosure response) { + String graph = request.getHeader().getGraph(); + String batchId = request.getBatchId(); + FeedbackRes.Builder builder = FeedbackRes.newBuilder(); + List entries = request.getWriteReq().getEntryList(); + try { + getWrapper().doBatch(graph, partId, entries); + builder.setStatus(HgGrpc.success()); + } catch (Throwable t) { + String msg = "Failed to doBatch, graph: " + graph + "; batchId= " + batchId; + log.error(msg, t); + builder.setStatus(HgGrpc.fail(msg)); + } + GrpcClosure.setResult(response, builder.build()); + } + + // private static HgBusinessHandler.Batch toBatch(BatchEntry entry) { + // return new HgBusinessHandler.Batch() { + // @Override + // public BatchOpType getOp() { + // return BatchOpType.of(entry.getOpType().getNumber()); + // } + // + // @Override + // public int getKeyCode() { + // return entry.getStartKey().getCode(); + // } + // + // @Override + // public String getTable() { + // return entry.getTable(); + // } + // + // @Override + // public byte[] getStartKey() { + // return entry.getStartKey().getKey().toByteArray(); + // } + // + // @Override + // public byte[] getEndKey() { + // return entry.getEndKey().getKey().toByteArray(); + // } + // + // @Override + // public byte[] getValue() { + // return entry.getValue().toByteArray(); + // } + // }; + // + //} + + @Override + public void table(TableReq request, StreamObserver observer) { + if (log.isDebugEnabled()) { + log.debug("table: method = {}, graph = {}, table = {}" + , request.getMethod().name() + , request.getHeader().getGraph() + , request.getTableName() + ); + } + + String graph = request.getHeader().getGraph(); + // 所有Leader分区 + List ids = storeService.getGraphLeaderPartitionIds(graph); + // 按分区拆分数据 + Map groups = new HashMap<>(); + // 按分区拆分数据 + ids.forEach(id -> { + groups.put(id, request); + }); + + // 发给不同的raft执行 + BatchGrpcClosure closure = new BatchGrpcClosure<>(groups.size()); + groups.forEach((partition, entries) -> { + storeService.addRaftTask(HgStoreNodeService.TABLE_OP, graph, partition, + TableReq.newBuilder(request).build(), + closure.newRaftClosure()); + }); + + if (!groups.isEmpty()) { + // log.info(" table waiting raft..."); + // 等待返回结果 + closure.waitFinish(observer, r -> closure.selectError(r), + appConfig.getRaft().getRpcTimeOut()); + // log.info(" table ended waiting raft"); + } else { + // log.info(" table none leader logic"); + ResStatus status = null; + + switch (request.getMethod()) { + case TABLE_METHOD_EXISTS: + status = HgGrpc.not(); + break; + default: + status = HgGrpc.success(); + } + + // log.info(" table none leader status: {}", status.getCode()); + observer.onNext(FeedbackRes.newBuilder().setStatus(status).build()); + observer.onCompleted(); + } + + } + + public void doTable(int partId, TableReq request, RaftClosure response) { + if (log.isDebugEnabled()) { + log.debug(" - doTable[{}]: graph = {}, table = {}" + , request.getMethod().name() + , request.getHeader().getGraph() + , request.getTableName() + ); + } + + FeedbackRes.Builder builder = FeedbackRes.newBuilder(); + + try { + log.debug(" - starting wrapper:doTable "); + if (getWrapper().doTable(partId, + request.getMethod(), + request.getHeader().getGraph(), + request.getTableName())) { + builder.setStatus(HgGrpc.success()); + } else { + builder.setStatus(HgGrpc.not()); + } + log.debug(" - ended wrapper:doTable "); + } catch (Throwable t) { + String msg = "Failed to invoke doTable[ " + + request.getMethod().name() + " ], graph=" + + request.getHeader().getGraph() + " , table=" + + request.getTableName(); + log.error(msg, t); + builder.setStatus(HgGrpc.fail(msg)); + } + log.debug(" - starting GrpcClosure:setResult "); + GrpcClosure.setResult(response, builder.build()); + log.debug(" - ended GrpcClosure:setResult "); + } + + + @Override + public void graph(GraphReq request, StreamObserver observer) { + if (log.isDebugEnabled()) { + log.debug("graph: method = {}, graph = {}, table = {}" + , request.getMethod().name() + , request.getHeader().getGraph() + , request.getGraphName() + ); + } + + String graph = request.getHeader().getGraph(); + // 所有Leader分区 + List ids = storeService.getGraphLeaderPartitionIds(graph); + // 按分区拆分数据 + Map groups = new HashMap<>(); + // 按分区拆分数据 + ids.forEach(id -> { + groups.put(id, request); + }); + + // 发给不同的raft执行 + BatchGrpcClosure closure = new BatchGrpcClosure<>(groups.size()); + groups.forEach((partition, entries) -> { + storeService.addRaftTask(HgStoreNodeService.GRAPH_OP, graph, partition, + GraphReq.newBuilder(request).build(), + closure.newRaftClosure()); + }); + + if (!groups.isEmpty()) { + // 等待返回结果 + closure.waitFinish(observer, r -> closure.selectError(r), + appConfig.getRaft().getRpcTimeOut()); + + } else { + observer.onNext(FeedbackRes.newBuilder().setStatus(HgGrpc.success()).build()); + observer.onCompleted(); + } + + } + + public void doGraph(int partId, GraphReq request, RaftClosure response) { + if (log.isDebugEnabled()) { + log.debug(" - doGraph[{}]: graph = {}, table = {}" + , request.getMethod().name() + , request.getHeader().getGraph() + , request.getGraphName() + ); + } + + FeedbackRes.Builder builder = FeedbackRes.newBuilder(); + + try { + if (getWrapper().doGraph(partId, + request.getMethod(), + request.getHeader().getGraph())) { + builder.setStatus(HgGrpc.success()); + } else { + builder.setStatus(HgGrpc.not()); + } + } catch (Throwable t) { + String msg = "Failed to invoke doGraph[ " + + request.getMethod().name() + " ], graph=" + + request.getHeader().getGraph(); + log.error(msg, t); + builder.setStatus(HgGrpc.fail(msg)); + } + GrpcClosure.setResult(response, builder.build()); + } + + @Override + public void count(ScanStreamReq request, StreamObserver observer) { + ScanIterator it = null; + try { + BusinessHandler handler = storeService.getStoreEngine().getBusinessHandler(); + long count = handler.count(request.getHeader().getGraph(), request.getTable()); + observer.onNext(Agg.newBuilder().setCount(count).build()); + observer.onCompleted(); + } catch (Exception e) { + observer.onError(e); + } finally { + if (it != null) { + try { + it.close(); + } catch (Exception e) { + + } + } + } + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateService.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateService.java new file mode 100644 index 0000000000..57d5165006 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateService.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + + +import org.apache.hugegraph.store.grpc.state.HgStoreStateGrpc; +import org.apache.hugegraph.store.grpc.state.NodeStateRes; +import org.apache.hugegraph.store.grpc.state.ScanState; +import org.apache.hugegraph.store.grpc.state.SubStateReq; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; + +import com.google.protobuf.Empty; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +/** + * created on 2021/11/3 + */ +@Slf4j +@GRpcService +public class HgStoreStateService extends HgStoreStateGrpc.HgStoreStateImplBase { + + @Autowired + HgStoreStreamImpl impl; + + @Override + public void subState(SubStateReq request, StreamObserver observer) { + HgStoreStateSubject.addObserver(request.getSubId(), observer); + } + + @Override + public void unsubState(SubStateReq request, StreamObserver observer) { + HgStoreStateSubject.removeObserver(request.getSubId()); + } + + @Override + public void getScanState(SubStateReq request, StreamObserver observer) { + ScanState state = impl.getState(); + observer.onNext(state); + observer.onCompleted(); + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateSubject.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateSubject.java new file mode 100644 index 0000000000..5541b16f58 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateSubject.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import org.apache.hugegraph.store.grpc.state.NodeStateRes; +import org.apache.hugegraph.store.grpc.state.NodeStateType; +import org.apache.hugegraph.store.node.util.HgAssert; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +/** + * created on 2021/11/3 + */ +@Slf4j +public final class HgStoreStateSubject { + public final static Map> subObserverHolder = + new ConcurrentHashMap<>(); + + + public static void addObserver(String subId, StreamObserver observer) { + HgAssert.isArgumentValid(subId, "subId"); + HgAssert.isArgumentNotNull(observer == null, "observer"); + + subObserverHolder.put(subId, observer); + } + + public static void removeObserver(String subId) { + HgAssert.isArgumentValid(subId, "subId"); + subObserverHolder.remove(subId); + } + + public static void notifyAll(NodeStateType nodeState) { + + HgAssert.isArgumentNotNull(nodeState == null, "nodeState"); + NodeStateRes res = NodeStateRes.newBuilder().setState(nodeState).build(); + Iterator>> iter = + subObserverHolder.entrySet().iterator(); + + while (iter.hasNext()) { + Map.Entry> entry = iter.next(); + + try { + entry.getValue().onNext(res); + } catch (Throwable e) { + log.error("Failed to send node-state[" + nodeState + "] to subscriber[" + + entry.getKey() + "].", e); + iter.remove(); + log.error("Removed the subscriber[" + entry.getKey() + "].", e); + } + + } + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStreamImpl.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStreamImpl.java new file mode 100644 index 0000000000..7d01fa3db4 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStreamImpl.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; + +import org.apache.hugegraph.store.grpc.state.ScanState; +import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc; +import org.apache.hugegraph.store.grpc.stream.KvPageRes; +import org.apache.hugegraph.store.grpc.stream.KvStream; +import org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq; +import org.apache.hugegraph.store.grpc.stream.ScanStreamReq; +import org.apache.hugegraph.store.node.AppConfig; +import org.apache.hugegraph.store.node.util.HgExecutorUtil; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +/** + * created on 2021/10/19 + */ +@Slf4j +@GRpcService +public class HgStoreStreamImpl extends HgStoreStreamGrpc.HgStoreStreamImplBase { + + @Autowired + private HgStoreNodeService storeService; + @Autowired + private AppConfig appConfig; + private HgStoreWrapperEx wrapper; + private ThreadPoolExecutor executor; + + private HgStoreWrapperEx getWrapper() { + if (this.wrapper == null) { + synchronized (this) { + if (this.wrapper == null) { + this.wrapper = new HgStoreWrapperEx( + storeService.getStoreEngine().getBusinessHandler()); + } + } + } + return this.wrapper; + } + + public ThreadPoolExecutor getRealExecutor() { + return executor; + } + + public ThreadPoolExecutor getExecutor() { + if (this.executor == null) { + synchronized (this) { + if (this.executor == null) { + AppConfig.ThreadPoolScan scan = this.appConfig.getThreadPoolScan(); + this.executor = + HgExecutorUtil.createExecutor("hg-scan", scan.getCore(), scan.getMax(), + scan.getQueue()); + } + } + } + return this.executor; + } + + public ScanState getState() { + ThreadPoolExecutor ex = getExecutor(); + ScanState.Builder builder = ScanState.newBuilder(); + BlockingQueue queue = ex.getQueue(); + ScanState state = + builder.setActiveCount(ex.getActiveCount()).setTaskCount(ex.getTaskCount()) + .setCompletedTaskCount(ex.getCompletedTaskCount()) + .setMaximumPoolSize(ex.getMaximumPoolSize()) + .setLargestPoolSize(ex.getLargestPoolSize()).setPoolSize(ex.getPoolSize()) + .setAddress(appConfig.getStoreServerAddress()) + .setQueueSize(queue.size()) + .setQueueRemainingCapacity(queue.remainingCapacity()) + .build(); + return state; + } + + @Override + public StreamObserver scan(StreamObserver response) { + return ScanStreamResponse.of(response, getWrapper(), getExecutor(), appConfig); + } + + @Override + public void scanOneShot(ScanStreamReq request, StreamObserver response) { + ScanOneShotResponse.scanOneShot(request, response, getWrapper()); + } + + @Override + public StreamObserver scanBatch(StreamObserver response) { + return ScanBatchResponse3.of(response, getWrapper(), getExecutor()); + } + + @Override + public StreamObserver scanBatch2(StreamObserver response) { + return ScanBatchResponseFactory.of(response, getWrapper(), getExecutor()); + } + + @Override + public void scanBatchOneShot(ScanStreamBatchReq request, StreamObserver response) { + ScanBatchOneShotResponse.scanOneShot(request, response, getWrapper()); + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreWrapperEx.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreWrapperEx.java new file mode 100644 index 0000000000..414eaa7de7 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreWrapperEx.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import static org.apache.hugegraph.store.grpc.common.GraphMethod.GRAPH_METHOD_DELETE; + +import java.util.List; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import org.apache.hugegraph.rocksdb.access.ScanIterator; +import org.apache.hugegraph.store.business.BusinessHandler; +import org.apache.hugegraph.store.business.FilterIterator; +import org.apache.hugegraph.store.grpc.common.GraphMethod; +import org.apache.hugegraph.store.grpc.common.TableMethod; +import org.apache.hugegraph.store.grpc.session.BatchEntry; +import org.apache.hugegraph.store.term.HgPair; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class HgStoreWrapperEx { + + private final BusinessHandler handler; + + public HgStoreWrapperEx(BusinessHandler handler) { + this.handler = handler; + } + + public byte[] doGet(String graph, int code, String table, byte[] key) { + return this.handler.doGet(graph, code, table, key); + } + + public boolean doClean(String graph, int partId) { + return this.handler.cleanPartition(graph, partId); + } + + public ScanIterator scanAll(String graph, String table, byte[] query) { + ScanIterator scanIterator = this.handler.scanAll(graph, table, query); + return FilterIterator.of(scanIterator, query); + } + + public ScanIterator scan(String graph, int partId, String table, byte[] start, byte[] end, + int scanType, + byte[] query) { + ScanIterator scanIterator = + this.handler.scan(graph, partId, table, start, end, scanType, query); + return FilterIterator.of(scanIterator, query); + } + + public void batchGet(String graph, String table, Supplier> s, + Consumer> c) { + this.handler.batchGet(graph, table, s, (pair -> { + c.accept(new HgPair<>(pair.getKey(), pair.getValue())); + })); + } + + public ScanIterator scanPrefix(String graph, int partition, String table, byte[] prefix, + int scanType, + byte[] query) { + ScanIterator scanIterator = + this.handler.scanPrefix(graph, partition, table, prefix, scanType); + return FilterIterator.of(scanIterator, query); + } + + public void doBatch(String graph, int partId, List entryList) { + this.handler.doBatch(graph, partId, entryList); + } + + public boolean doTable(int partId, TableMethod method, String graph, String table) { + boolean flag; + switch (method) { + case TABLE_METHOD_EXISTS: + flag = this.handler.existsTable(graph, partId, table); + break; + case TABLE_METHOD_CREATE: + this.handler.createTable(graph, partId, table); + flag = true; + break; + case TABLE_METHOD_DELETE: + this.handler.deleteTable(graph, partId, table); + flag = true; + break; + case TABLE_METHOD_DROP: + this.handler.dropTable(graph, partId, table); + flag = true; + break; + case TABLE_METHOD_TRUNCATE: + this.handler.truncate(graph, partId); + flag = true; + break; + default: + throw new UnsupportedOperationException("TableMethod: " + method.name()); + } + + return flag; + } + + public boolean doGraph(int partId, GraphMethod method, String graph) { + boolean flag = true; + if (method == GRAPH_METHOD_DELETE) {// 交给 raft 执行,此处不处理 + flag = true; + } else { + throw new UnsupportedOperationException("GraphMethod: " + method.name()); + } + return flag; + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ParallelScanIterator.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ParallelScanIterator.java new file mode 100644 index 0000000000..e5d297b69e --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ParallelScanIterator.java @@ -0,0 +1,386 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Supplier; + +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.rocksdb.access.RocksDBSession; +import org.apache.hugegraph.rocksdb.access.ScanIterator; +import org.apache.hugegraph.store.buffer.KVByteBuffer; +import org.apache.hugegraph.store.grpc.common.ScanOrderType; +import org.apache.hugegraph.store.grpc.stream.ScanQueryRequest; +import org.apache.hugegraph.store.node.util.HgAssert; +import org.apache.hugegraph.store.node.util.PropertyUtil; +import org.apache.hugegraph.store.term.Bits; + +import com.alipay.sofa.jraft.util.Utils; + +import lombok.extern.slf4j.Slf4j; + +/** + * 支持平行读取的批量查询迭代器 + */ +@Slf4j +public class ParallelScanIterator implements ScanIterator { + + private static final int waitDataMaxTryTimes = 600; + protected static int maxBodySize = + PropertyUtil.getInt("app.scan.stream.body.size", 1024 * 1024); + private final int batchSize = PropertyUtil.getInt("app.scan.stream.entries.size", 20000); + private final Supplier> batchSupplier; + private final Supplier limitSupplier; + private final BlockingQueue> queue; + private final ReentrantLock queueLock = new ReentrantLock(); + final private ThreadPoolExecutor executor; + private final ScanQueryRequest query; + private final Queue scanners = new LinkedList<>(); + private final Queue pauseScanners = new LinkedList<>(); + final private List NO_DATA = new ArrayList<>(); + private final boolean orderVertex; + private final boolean orderEdge; + private int maxWorkThreads = Utils.cpus() / 8; + private int maxInQueue = maxWorkThreads * 2; + private volatile boolean finished; + private List current = null; + + private ParallelScanIterator(Supplier> iteratorSupplier, + Supplier limitSupplier, + ScanQueryRequest query, + ThreadPoolExecutor executor) { + this.executor = executor; + this.batchSupplier = iteratorSupplier; + this.limitSupplier = limitSupplier; + this.finished = false; + this.query = query; + orderVertex = query.getOrderType() == ScanOrderType.ORDER_STRICT; + orderEdge = query.getOrderType() == ScanOrderType.ORDER_WITHIN_VERTEX; + if (orderVertex) { + this.maxWorkThreads = 1; + } else { + this.maxWorkThreads = + Math.max(1, Math.min(query.getConditionCount() / 16, maxWorkThreads)); + } + this.maxInQueue = maxWorkThreads * 2; + // 边有序需要更大的队列 + queue = new LinkedBlockingQueue<>(maxInQueue * 2); + createScanner(); + } + + public static ParallelScanIterator of( + Supplier> iteratorSupplier, + Supplier limitSupplier, + ScanQueryRequest query, + ThreadPoolExecutor executor) { + HgAssert.isArgumentNotNull(iteratorSupplier, "iteratorSupplier"); + HgAssert.isArgumentNotNull(limitSupplier, "limitSupplier"); + return new ParallelScanIterator(iteratorSupplier, limitSupplier, query, executor); + } + + @Override + public boolean hasNext() { + int tryTimes = 0; + while (current == null && tryTimes < waitDataMaxTryTimes) { + try { + if (queue.size() != 0 || !finished) { + current = queue.poll(100, TimeUnit.MILLISECONDS); //定期检查client是否被关闭了 + if (current == null && !finished) { + wakeUpScanner(); + } + } else { + break; + } + } catch (InterruptedException e) { + log.error("hasNext interrupted {}", e); + break; + } + tryTimes++; + } + if (current == null && tryTimes >= waitDataMaxTryTimes) { + log.error("Wait data timeout!!!, scanner is {}/{}", scanners.size(), + pauseScanners.size()); + } + return current != null && current != NO_DATA; + } + + @Override + public boolean isValid() { + throw new UnsupportedOperationException(); + } + + @Override + public List next() { + List t = current; + current = null; + if (queue.size() < maxWorkThreads) { + wakeUpScanner(); + } + return t; + } + + @Override + public void close() { + finished = true; + synchronized (scanners) { + scanners.forEach(scanner -> { + scanner.close(); + }); + } + synchronized (pauseScanners) { + pauseScanners.forEach(s -> { + s.close(); + }); + } + queue.clear(); + } + + /** + * 创建扫描器 + */ + private void createScanner() { + synchronized (scanners) { + for (int i = 0; i < maxWorkThreads; i++) { + scanners.add(new KVScanner()); + } + scanners.forEach(scanner -> { + executor.execute(() -> scanner.scanKV()); + }); + } + } + + /** + * 唤醒扫描器 + */ + private void wakeUpScanner() { + synchronized (pauseScanners) { + if (!pauseScanners.isEmpty()) { + KVScanner scanner = pauseScanners.poll(); + if (scanner != null) { + executor.execute(() -> scanner.scanKV()); + } + } + } + } + + /** + * 休眠扫描器 + * + * @param scanner + */ + private void suspendScanner(KVScanner scanner) { + synchronized (pauseScanners) { + pauseScanners.add(scanner); + } + } + + private void quitScanner(KVScanner scanner) { + synchronized (scanners) { + scanner.close(); + scanners.remove(scanner); + if (scanners.size() == 0) { + putData(NO_DATA); + this.finished = true; + } + } + } + + /** + * 添加到队列,返回队列是否已满 + * + * @param data + * @return false: 队列已满 + */ + private boolean putData(List data) { + try { + this.queue.put(data); + } catch (InterruptedException e) { + log.error("exception ", e); + this.finished = true; + return false; + } + return this.queue.size() < maxInQueue; + } + + private boolean putData(List data, boolean hasNext) { + try { + queueLock.lock(); + this.queue.put(data); + } catch (InterruptedException e) { + log.error("exception ", e); + this.finished = true; + return false; + } finally { + if (!hasNext) { + queueLock.unlock(); + } + } + // 数据未结束,线程继续执行 + return hasNext || this.queue.size() < maxInQueue; + } + + private synchronized KVPair getIterator() { + return this.batchSupplier.get(); + } + + private long getLimit() { + Long limit = this.limitSupplier.get(); + if (limit == null || limit <= 0) { + limit = Long.valueOf(Integer.MAX_VALUE); + } + return limit; + } + + static class KV { + public int sn; + public byte[] key; + public byte[] value; + + public boolean hasSN = false; + + public static KV of(RocksDBSession.BackendColumn col) { + KV kv = new KV(); + kv.key = col.name; + kv.value = col.value; + return kv; + } + + public static KV ofSeparator(int value) { + KV kv = new KV(); + kv.key = new byte[4]; + Bits.putInt(kv.key, 0, value); + return kv; + } + + public KV setNo(int sn) { + this.sn = sn; + hasSN = true; + return this; + } + + public void write(KVByteBuffer buffer) { + if (hasSN) { + buffer.putInt(sn); + } + buffer.put(key); + buffer.put(value); + } + + public int size() { + return this.key.length + this.value.length + 1; + } + } + + class KVScanner { + + private final ReentrantLock iteratorLock = new ReentrantLock(); + private ScanIterator iterator = null; + private QueryCondition query = null; + private long limit; + private long counter; + private volatile boolean closed = false; + + private ScanIterator getIterator() { + // 迭代器没有数据,或该点以达到limit,切换新的迭代器 + if (iterator == null || !iterator.hasNext() || counter >= limit) { + if (iterator != null) { + iterator.close(); + } + KVPair pair = ParallelScanIterator.this.getIterator(); + query = pair.getKey(); + iterator = pair.getValue(); + limit = getLimit(); + counter = 0; + } + return iterator; + } + + public void scanKV() { + boolean canNext = true; + ArrayList dataList = new ArrayList<>(batchSize); + dataList.ensureCapacity(batchSize); + iteratorLock.lock(); + try { + long entriesSize = 0, bodySize = 0; + while (canNext && !closed) { + iterator = this.getIterator(); + if (iterator == null) { + break; + } + while (iterator.hasNext() && entriesSize < batchSize && + bodySize < maxBodySize && + counter < limit && !closed) { + KV kv = KV.of(iterator.next()); + dataList.add(orderVertex ? kv.setNo(query.getSerialNo()) : kv); + bodySize += kv.size(); + entriesSize++; + counter++; + } + if ((entriesSize >= batchSize || bodySize >= maxBodySize) || + (orderEdge && bodySize >= maxBodySize / 2)) { + if (orderEdge) { + //边排序,保证一个点的所有边连续,阻止其他点插入 + canNext = putData(dataList, iterator != null && iterator.hasNext()); + } else { + canNext = putData(dataList); + } + dataList = new ArrayList<>(batchSize); + dataList.ensureCapacity(batchSize); + entriesSize = bodySize = 0; + } + } + if (!dataList.isEmpty()) { + if (orderEdge) { + putData(dataList, false); + } else { + putData(dataList); + } + } + } catch (Exception e) { + log.error("exception {}", e); + } finally { + iteratorLock.unlock(); + if (iterator != null && counter < limit && !closed) { + suspendScanner(this); + } else { + quitScanner(this); + } + } + } + + public void close() { + closed = true; + iteratorLock.lock(); + try { + if (iterator != null) { + iterator.close(); + } + } finally { + iteratorLock.unlock(); + } + } + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/QueryCondition.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/QueryCondition.java new file mode 100644 index 0000000000..efcda92438 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/QueryCondition.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +/** + * 2023/2/8 + */ +public interface QueryCondition { + byte[] getStart(); + + byte[] getEnd(); + + byte[] getPrefix(); + + int getKeyCode(); + + int getScanType(); + + byte[] getQuery(); + + byte[] getPosition(); + + int getSerialNo(); +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchOneShotResponse.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchOneShotResponse.java new file mode 100644 index 0000000000..e862c88afe --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchOneShotResponse.java @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import static org.apache.hugegraph.store.node.grpc.ScanUtil.getIterator; + +import org.apache.hugegraph.rocksdb.access.RocksDBSession; +import org.apache.hugegraph.rocksdb.access.ScanIterator; +import org.apache.hugegraph.store.grpc.common.Kv; +import org.apache.hugegraph.store.grpc.stream.KvPageRes; +import org.apache.hugegraph.store.grpc.stream.ScanQueryRequest; +import org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq; +import org.apache.hugegraph.store.node.util.HgGrpc; +import org.apache.hugegraph.store.node.util.HgStoreNodeUtil; + +import com.google.protobuf.ByteString; + +import io.grpc.Status; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + + +/** + * created on 2022/04/08 + * + * @version 0.1.0 + */ +@Slf4j +public class ScanBatchOneShotResponse { + + /** + * Handle one-shot batch scan + * + * @param request + * @param responseObserver + */ + public static void scanOneShot(ScanStreamBatchReq request, + StreamObserver responseObserver, + HgStoreWrapperEx wrapper) { + + String graph = request.getHeader().getGraph(); + ScanQueryRequest queryRequest = request.getQueryRequest(); + ScanIterator iterator = getIterator(graph, queryRequest, wrapper); + + KvPageRes.Builder resBuilder = KvPageRes.newBuilder(); + Kv.Builder kvBuilder = Kv.newBuilder(); + + long limit = queryRequest.getLimit(); + + if (limit <= 0) { + limit = Integer.MAX_VALUE; + log.warn("As limit is less than or equals 0, default limit was effective:[ {} ]", + Integer.MAX_VALUE); + } + + int count = 0; + + try { + while (iterator.hasNext()) { + + if (++count > limit) { + break; + } + + RocksDBSession.BackendColumn col = iterator.next(); + + resBuilder.addData(kvBuilder + .setKey(ByteString.copyFrom(col.name)) + .setValue(ByteString.copyFrom(col.value)) + .setCode(HgStoreNodeUtil.toInt(iterator.position())) +//position == partition-id. + ); + + } + + responseObserver.onNext(resBuilder.build()); + responseObserver.onCompleted(); + + } catch (Throwable t) { + String msg = "Failed to do oneshot batch scan, scanning was interrupted, cause by:"; + responseObserver.onError( + HgGrpc.toErr(Status.Code.INTERNAL, msg, t)); + } finally { + iterator.close(); + } + + } + + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse.java new file mode 100644 index 0000000000..caac4eeaf5 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse.java @@ -0,0 +1,278 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + + +import static org.apache.hugegraph.store.node.grpc.ScanUtil.getParallelIterator; + +import java.util.List; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import org.apache.hugegraph.rocksdb.access.ScanIterator; +import org.apache.hugegraph.store.buffer.ByteBufferAllocator; +import org.apache.hugegraph.store.buffer.KVByteBuffer; +import org.apache.hugegraph.store.grpc.stream.KvStream; +import org.apache.hugegraph.store.grpc.stream.ScanQueryRequest; +import org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq; +import org.apache.hugegraph.store.node.util.HgGrpc; +import org.apache.hugegraph.store.node.util.PropertyUtil; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +/** + * 批量查询处理器,批量查询数据,流式返回数据。 + * 1、服务端流式发送数据给客户端 + * 2、客户端每消费一批次数据,返回批次号给服务端 + * 3、服务端根据批次号决定发送多少数据,保证传送数据的不间断, + */ +@Slf4j +public class ScanBatchResponse implements StreamObserver { + static ByteBufferAllocator bfAllocator = + new ByteBufferAllocator(ParallelScanIterator.maxBodySize * 3 / 2, 1000); + static ByteBufferAllocator alloc = + new ByteBufferAllocator(ParallelScanIterator.maxBodySize * 3 / 2, 1000); + private final int maxInFlightCount = PropertyUtil.getInt("app.scan.stream.inflight", 16); + private final int activeTimeout = PropertyUtil.getInt("app.scan.stream.timeout", 60); //单位秒 + private final StreamObserver sender; + private final HgStoreWrapperEx wrapper; + private final ThreadPoolExecutor executor; + private final Object stateLock = new Object(); + private final Lock iteratorLock = new ReentrantLock(); + // 当前正在遍历的迭代器 + private ScanIterator iterator; + // 下一次发送的序号 + private volatile int seqNo; + // Client已消费的序号 + private volatile int clientSeqNo; + // 已经发送的条目数 + private volatile long count; + // 客户端要求返回的最大条目数 + private volatile long limit; + private ScanQueryRequest query; + // 上次读取数据时间 + private long activeTime; + private volatile State state; + + + public ScanBatchResponse(StreamObserver response, HgStoreWrapperEx wrapper, + ThreadPoolExecutor executor) { + this.sender = response; + this.wrapper = wrapper; + this.executor = executor; + this.iterator = null; + this.seqNo = 1; + this.state = State.IDLE; + this.activeTime = System.currentTimeMillis(); + } + + /** + * 接收客户端发送的消息 + * 服务端另起线程处理消息,不阻塞网络 + * + * @param request + */ + @Override + public void onNext(ScanStreamBatchReq request) { + switch (request.getQueryCase()) { + case QUERY_REQUEST: // 查询条件 + executor.execute(() -> { + startQuery(request.getHeader().getGraph(), request.getQueryRequest()); + }); + break; + case RECEIPT_REQUEST: // 消息异步应答 + this.clientSeqNo = request.getReceiptRequest().getTimes(); + if (seqNo - clientSeqNo < maxInFlightCount) { + synchronized (stateLock) { + if (state == State.IDLE) { + state = State.DOING; + executor.execute(() -> { + sendEntries(); + }); + } else if (state == State.DONE) { + sendNoDataEntries(); + } + } + } + break; + case CANCEL_REQUEST: // 关闭流 + closeQuery(); + break; + default: + sender.onError( + HgGrpc.toErr("Unsupported sub-request: [ " + request + " ]")); + } + } + + @Override + public void onError(Throwable t) { + log.error("onError ", t); + closeQuery(); + } + + @Override + public void onCompleted() { + closeQuery(); + } + + + /** + * 生成迭代器 + * + * @param request + */ + private void startQuery(String graphName, ScanQueryRequest request) { + this.query = request; + this.limit = request.getLimit(); + this.count = 0; + this.iterator = getParallelIterator(graphName, request, this.wrapper, executor); + synchronized (stateLock) { + if (state == State.IDLE) { + state = State.DOING; + executor.execute(() -> { + sendEntries(); + }); + } + } + } + + /** + * 生成迭代器 + */ + private void closeQuery() { + setStateDone(); + try { + closeIter(); + this.sender.onCompleted(); + } catch (Exception e) { + log.error("exception ", e); + } + int active = ScanBatchResponseFactory.getInstance().removeStreamObserver(this); + log.info("ScanBatchResponse closeQuery, active count is {}", active); + } + + private void closeIter() { + try { + if (this.iterator != null) { + this.iterator.close(); + this.iterator = null; + } + } catch (Exception e) { + + } + } + + /** + * 发送数据 + */ + private void sendEntries() { + if (state == State.DONE || iterator == null) { + setStateIdle(); + return; + } + iteratorLock.lock(); + try { + if (state == State.DONE || iterator == null) { + setStateIdle(); + return; + } + KvStream.Builder dataBuilder = KvStream.newBuilder().setVersion(1); + while (state != State.DONE && iterator.hasNext() + && (seqNo - clientSeqNo < maxInFlightCount) + && this.count < limit) { + KVByteBuffer buffer = new KVByteBuffer(alloc.get()); + List dataList = iterator.next(); + dataList.forEach(kv -> { + kv.write(buffer); + this.count++; + }); + dataBuilder.setStream(buffer.flip().getBuffer()); + dataBuilder.setSeqNo(seqNo++); + dataBuilder.complete(e -> alloc.release(buffer.getBuffer())); + this.sender.onNext(dataBuilder.build()); + this.activeTime = System.currentTimeMillis(); + } + if (!iterator.hasNext() || this.count >= limit || state == State.DONE) { + closeIter(); + this.sender.onNext(KvStream.newBuilder().setOver(true).build()); + setStateDone(); + } else { + setStateIdle(); + } + } catch (Throwable e) { + if (this.state != State.DONE) { + log.error(" send data exception: ", e); + setStateIdle(); + if (this.sender != null) { + try { + this.sender.onError(e); + } catch (Exception ex) { + + } + } + } + } finally { + iteratorLock.unlock(); + } + } + + private void sendNoDataEntries() { + try { + this.sender.onNext(KvStream.newBuilder().setOver(true).build()); + } catch (Exception e) { + } + } + + private State setStateDone() { + synchronized (this.stateLock) { + this.state = State.DONE; + } + return state; + } + + private State setStateIdle() { + synchronized (this.stateLock) { + if (this.state != State.DONE) { + this.state = State.IDLE; + } + } + return state; + } + + /** + * 检查是否活跃,超过一定时间客户端没有请求数据,认为已经不活跃,关闭连接释放资源 + */ + public void checkActiveTimeout() { + if ((System.currentTimeMillis() - activeTime) > activeTimeout * 1000L) { + log.warn("The stream is not closed, and the timeout is forced to close"); + closeQuery(); + } + } + + /** + * 任务状态 + */ + private enum State { + IDLE, + DOING, + DONE, + ERROR + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse3.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse3.java new file mode 100644 index 0000000000..df81a7bbb4 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse3.java @@ -0,0 +1,417 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import java.util.List; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantLock; + +import javax.annotation.concurrent.NotThreadSafe; + +import org.apache.hugegraph.rocksdb.access.RocksDBSession; +import org.apache.hugegraph.rocksdb.access.ScanIterator; +import org.apache.hugegraph.store.grpc.common.Kv; +import org.apache.hugegraph.store.grpc.stream.KvPageRes; +import org.apache.hugegraph.store.grpc.stream.ScanCondition; +import org.apache.hugegraph.store.grpc.stream.ScanQueryRequest; +import org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq; +import org.apache.hugegraph.store.node.util.Base58; +import org.apache.hugegraph.store.node.util.HgAssert; +import org.apache.hugegraph.store.node.util.HgGrpc; +import org.apache.hugegraph.store.node.util.HgStoreConst; +import org.apache.hugegraph.store.node.util.HgStoreNodeUtil; + +import com.google.protobuf.ByteString; + +import io.grpc.Status; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +/** + * created on 2022/03/27 + * + * @version 3.6.0 + */ +@Slf4j +public class ScanBatchResponse3 { + private final static long DEFAULT_PACKAGE_SIZE = 10_000; + private final static int MAX_NOT_RECEIPT = 10; + + public static StreamObserver of(StreamObserver responseObserver, + HgStoreWrapperEx wrapper, ThreadPoolExecutor executor) { + HgAssert.isArgumentNotNull(responseObserver, "responseObserver"); + HgAssert.isArgumentNotNull(wrapper, "wrapper"); + return new Broker(responseObserver, wrapper, executor); + } + + private enum OrderState { + NEW(0), + WORKING(1);//, PAUSE(2), COMPLETE(10); + int value; + + OrderState(int value) { + this.value = value; + } + } + + /*** Broker ***/ + private static class Broker implements StreamObserver { + private final StreamObserver responseObserver; + private final HgStoreWrapperEx wrapper; + private final ThreadPoolExecutor executor; + private final OrderManager manager = new OrderManager(); + private String graph; + + Broker(StreamObserver responseObserver, HgStoreWrapperEx wrapper, + ThreadPoolExecutor executor) { + this.responseObserver = responseObserver; + this.wrapper = wrapper; + this.executor = executor; + } + + @Override + public void onNext(ScanStreamBatchReq request) { + this.handleHeader(request); + switch (request.getQueryCase()) { + case QUERY_REQUEST: + this.makeADeal(request.getQueryRequest()); + break; + case RECEIPT_REQUEST: + this.manager.receipt(request.getReceiptRequest().getTimes()); + break; + case CANCEL_REQUEST: + this.manager.finished(); + break; + default: + responseObserver.onError( + HgGrpc.toErr("Unsupported sub-request: [ " + request + " ]")); + } + } + + @Override + public void onError(Throwable t) { + log.warn(t.getMessage()); + this.manager.breakdown(); + } + + @Override + public void onCompleted() { + this.manager.finished(); + } + + private void handleHeader(ScanStreamBatchReq request) { + if (this.graph == null) { + this.graph = request.getHeader().getGraph(); + } + } + + private void makeADeal(ScanQueryRequest request) { + String deliverId = ""; + if (log.isDebugEnabled()) { + List conditions = request.getConditionList(); + if (conditions.size() > 0) { + ScanCondition c = conditions.get(0); + if (c.getPrefix() != null && c.getPrefix().size() > 0) { + deliverId = Base58.encode(c.getPrefix().toByteArray()); + log.info("[ANALYSIS DEAL] [{}] prefixLength: {}", deliverId, + conditions.size()); + } + + } + } + + OrderDeliverer deliverer = new OrderDeliverer(deliverId, this.responseObserver); + OrderWorker worker = new OrderWorker( + request.getLimit(), + request.getPageSize(), + ScanUtil.getIterator(this.graph, request, this.wrapper), + deliverer, + this.executor); + + this.manager.deal(worker, deliverer); + } + + } + + @NotThreadSafe + private static class OrderManager { + OrderState state = OrderState.NEW; + OrderWorker worker; + OrderDeliverer deliverer; + + synchronized void deal(OrderWorker worker, OrderDeliverer deliverer) { + if (log.isDebugEnabled()) { + log.debug("Receiving query request."); + } + if (this.state == OrderState.NEW) { + this.worker = worker; + this.deliverer = deliverer; + this.worker.hereWeGo(); + this.state = OrderState.WORKING; + } + } + + synchronized void receipt(int receiptTimes) { + if (log.isDebugEnabled()) { + log.debug("Receiving receipt request."); + } + this.worker.setReceipt(receiptTimes); + } + + synchronized void finished() { + if (log.isDebugEnabled()) { + log.debug("Receiving finished request."); + } +/* if (this.state.value > OrderState.NEW.value + && this.state.value < OrderState.COMPLETE.value) { + this.state = OrderState.COMPLETE; + }*/ + this.breakdown(); + } + + synchronized void breakdown() { + if (this.worker != null) { + this.worker.breakdown(); + } + } + } + + private static class OrderDeliverer { + private final StreamObserver responseObserver; + private final AtomicBoolean finishFlag = new AtomicBoolean(); + private final String delivererId; + private final AtomicLong count = new AtomicLong(); + + OrderDeliverer(String delivererId, StreamObserver responseObserver) { + this.responseObserver = responseObserver; + this.delivererId = delivererId; + } + + void deliver(KvPageRes.Builder dataBuilder, int times, boolean isOver) { + if (this.finishFlag.get()) { + return; + } + count.addAndGet(dataBuilder.getDataCount()); + this.responseObserver.onNext(dataBuilder.setOver(isOver).setTimes(times).build()); + if (log.isDebugEnabled()) { + log.debug("deliver times : {}, over: {}", times, isOver); + } + + if (isOver) { + if (log.isDebugEnabled()) { + if (delivererId != null && !delivererId.isEmpty()) { + log.debug("[ANALYSIS OVER] [{}] count: {}, times: {}", delivererId, count, + times); + } + } + this.finish(); + } + } + + void finish() { + if (!finishFlag.getAndSet(true)) { + this.responseObserver.onCompleted(); + } + } + + void error(String msg) { + if (!finishFlag.getAndSet(true)) { + this.responseObserver.onError(HgGrpc.toErr(msg)); + } + } + + void error(String msg, Throwable t) { + if (!finishFlag.getAndSet(true)) { + this.responseObserver.onError(HgGrpc.toErr(Status.INTERNAL, + msg, t)); + } + } + } + + /*** Worker ***/ + private static class OrderWorker { + private final ScanIterator iterator; + private final OrderDeliverer deliverer; + private final AtomicBoolean pauseFlag = new AtomicBoolean(); + private final AtomicBoolean completeFlag = new AtomicBoolean(); + private final ReentrantLock workingLock = new ReentrantLock(); + private final AtomicBoolean isWorking = new AtomicBoolean(); + private final AtomicBoolean breakdown = new AtomicBoolean(); + private final AtomicInteger receiptTimes = new AtomicInteger(); + private final AtomicInteger curTimes = new AtomicInteger(); + private final ThreadPoolExecutor executor; + private final long limit; + private long packageSize; + private long counter; + + OrderWorker(long limit, long packageSize, ScanIterator iterator, OrderDeliverer deliverer, + ThreadPoolExecutor executor) { + this.limit = limit; + this.packageSize = packageSize; + this.iterator = iterator; + this.deliverer = deliverer; + this.executor = executor; + + if (this.packageSize <= 0) { + this.packageSize = DEFAULT_PACKAGE_SIZE; + log.warn( + "As page-Size is less than or equals 0, default package-size was " + + "effective.[ {} ]", + DEFAULT_PACKAGE_SIZE); + } + + } + + void hereWeGo() { + if (this.completeFlag.get()) { + log.warn("job complete."); + return; + } + if (this.isWorking.get()) { + log.warn("has been working."); + return; + } + + if (this.workingLock.isLocked()) { + log.warn("working now"); + return; + } + + executor.execute(() -> working()); + Thread.yield(); + } + + void setReceipt(int times) { + this.receiptTimes.set(times); + this.continueWorking(); + } + + boolean checkContinue() { + return (this.curTimes.get() - this.receiptTimes.get() < MAX_NOT_RECEIPT); + } + + void continueWorking() { + if (this.checkContinue()) { + synchronized (this.iterator) { + this.iterator.notify(); + } + } + } + + void breakdown() { + this.breakdown.set(true); + synchronized (this.iterator) { + this.iterator.notify(); + } + } + + private void working() { + if (this.isWorking.getAndSet(true)) { + return; + } + + this.workingLock.lock(); + + try { + synchronized (this.iterator) { + KvPageRes.Builder dataBuilder = KvPageRes.newBuilder(); + Kv.Builder kvBuilder = Kv.newBuilder(); + long packageCount = 0; + + while (iterator.hasNext()) { + if (++this.counter > limit) { + this.completeFlag.set(true); + break; + } + + if (++packageCount > packageSize) { + + if (this.breakdown.get()) { + break; + } + + deliverer.deliver(dataBuilder, curTimes.incrementAndGet(), false); + Thread.yield(); + + if (!this.checkContinue()) { + long start = System.currentTimeMillis(); + iterator.wait( + HgStoreConst.SCAN_WAIT_CLIENT_TAKING_TIME_OUT_SECONDS * + 1000); + + if (System.currentTimeMillis() - start + >= + HgStoreConst.SCAN_WAIT_CLIENT_TAKING_TIME_OUT_SECONDS * 1000) { + throw new TimeoutException("Waiting continue more than " + + + HgStoreConst.SCAN_WAIT_CLIENT_TAKING_TIME_OUT_SECONDS + + " seconds."); + } + + if (this.breakdown.get()) { + break; + } + + } + + packageCount = 1; + dataBuilder = KvPageRes.newBuilder(); + } + + RocksDBSession.BackendColumn col = iterator.next(); + + dataBuilder.addData(kvBuilder + .setKey(ByteString.copyFrom(col.name)) + .setValue(ByteString.copyFrom(col.value)) + .setCode(HgStoreNodeUtil.toInt( + iterator.position())) +//position == partition-id. + ); + + } + + this.completeFlag.set(true); + + deliverer.deliver(dataBuilder, curTimes.incrementAndGet(), true); + + } + + } catch (InterruptedException e) { + log.error("Interrupted waiting of iterator, canceled while.", e); + this.deliverer.error("Failed to finish scanning, cause by InterruptedException."); + } catch (TimeoutException t) { + log.info(t.getMessage()); + this.deliverer.error("Sever waiting exceeded [" + + HgStoreConst.SCAN_WAIT_CLIENT_TAKING_TIME_OUT_SECONDS + + "] seconds."); + } catch (Throwable t) { + log.error("Failed to do while for scanning, cause by:", t); + this.deliverer.error("Failed to finish scanning ", t); + } finally { + this.workingLock.unlock(); + this.iterator.close(); + } + } + + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponseFactory.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponseFactory.java new file mode 100644 index 0000000000..52b3378fc3 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponseFactory.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import java.util.Set; +import java.util.concurrent.ThreadPoolExecutor; + +import org.apache.hugegraph.store.grpc.stream.KvStream; + +import com.alipay.sofa.jraft.util.concurrent.ConcurrentHashSet; + +import io.grpc.stub.StreamObserver; + +public class ScanBatchResponseFactory { + private final static ScanBatchResponseFactory instance = new ScanBatchResponseFactory(); + private final Set streamObservers = new ConcurrentHashSet<>(); + + public static ScanBatchResponseFactory getInstance() { + return instance; + } + + public static StreamObserver of(StreamObserver responseObserver, + HgStoreWrapperEx wrapper, ThreadPoolExecutor executor) { + StreamObserver observer = new ScanBatchResponse(responseObserver, wrapper, executor); + getInstance().addStreamObserver(observer); + getInstance().checkStreamActive(); + return observer; + } + + public int addStreamObserver(StreamObserver observer) { + streamObservers.add(observer); + return streamObservers.size(); + } + + public int removeStreamObserver(StreamObserver observer) { + streamObservers.remove(observer); + return streamObservers.size(); + } + + /** + * 检查是否Stream是否活跃,超时的Stream及时关闭 + */ + public void checkStreamActive() { + streamObservers.forEach(streamObserver -> { + ((ScanBatchResponse) streamObserver).checkActiveTimeout(); + }); + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanOneShotResponse.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanOneShotResponse.java new file mode 100644 index 0000000000..841ca09cab --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanOneShotResponse.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import org.apache.hugegraph.rocksdb.access.RocksDBSession; +import org.apache.hugegraph.rocksdb.access.ScanIterator; +import org.apache.hugegraph.store.grpc.common.Kv; +import org.apache.hugegraph.store.grpc.stream.KvPageRes; +import org.apache.hugegraph.store.grpc.stream.ScanStreamReq; +import org.apache.hugegraph.store.node.util.HgGrpc; +import org.apache.hugegraph.store.node.util.HgStoreNodeUtil; + +import com.google.protobuf.ByteString; + +import io.grpc.Status; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + + +/** + * created on 2022/02/17 + * + * @version 3.6.0 + */ +@Slf4j +public class ScanOneShotResponse { + + /** + * Handle one-shot scan + * + * @param request + * @param responseObserver + */ + public static void scanOneShot(ScanStreamReq request, + StreamObserver responseObserver, + HgStoreWrapperEx wrapper) { + KvPageRes.Builder resBuilder = KvPageRes.newBuilder(); + Kv.Builder kvBuilder = Kv.newBuilder(); + ScanIterator iterator = ScanUtil.getIterator(ScanUtil.toSq(request), wrapper); + + long limit = request.getLimit(); + + if (limit <= 0) { + responseObserver.onError(HgGrpc.toErr("limit<=0, please to invoke stream scan.")); + return; + } + + int count = 0; + + try { + while (iterator.hasNext()) { + + if (++count > limit) { + break; + } + + RocksDBSession.BackendColumn col = iterator.next(); + + resBuilder.addData(kvBuilder + .setKey(ByteString.copyFrom(col.name)) + .setValue(ByteString.copyFrom(col.value)) + .setCode(HgStoreNodeUtil.toInt(iterator.position())) +//position == partition-id. + ); + + } + + responseObserver.onNext(resBuilder.build()); + responseObserver.onCompleted(); + + } catch (Throwable t) { + String msg = "an exception occurred during data scanning"; + responseObserver.onError(HgGrpc.toErr(Status.INTERNAL, msg, t)); + } finally { + iterator.close(); + } + + } + + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQuery.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQuery.java new file mode 100644 index 0000000000..27508e5e76 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQuery.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import java.util.Arrays; + +import org.apache.hugegraph.store.grpc.common.ScanMethod; + +/** + * 2022/2/28 + */ +class ScanQuery implements QueryCondition { + String graph; + String table; + ScanMethod method; + + byte[] start; + byte[] end; + byte[] prefix; + int keyCode; + int scanType; + byte[] query; + byte[] position; + int serialNo; + + private ScanQuery() { + } + + static ScanQuery of() { + return new ScanQuery(); + } + + @Override + public byte[] getStart() { + return this.start; + } + + @Override + public byte[] getEnd() { + return this.end; + } + + @Override + public byte[] getPrefix() { + return this.prefix; + } + + @Override + public int getKeyCode() { + return this.keyCode; + } + + @Override + public int getScanType() { + return this.scanType; + } + + @Override + public byte[] getQuery() { + return this.query; + } + + @Override + public byte[] getPosition() { + return this.position; + } + + @Override + public int getSerialNo() { + return this.serialNo; + } + + @Override + public String toString() { + return "ScanQuery{" + + "graph='" + graph + '\'' + + ", table='" + table + '\'' + + ", method=" + method + + ", start=" + Arrays.toString(start) + + ", end=" + Arrays.toString(end) + + ", prefix=" + Arrays.toString(prefix) + + ", partition=" + keyCode + + ", scanType=" + scanType + + ", serialNo=" + serialNo + + ", query=" + Arrays.toString(query) + + ", position=" + Arrays.toString(position) + + '}'; + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQueryProducer.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQueryProducer.java new file mode 100644 index 0000000000..e5b0d32a62 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQueryProducer.java @@ -0,0 +1,262 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; + +import javax.annotation.concurrent.NotThreadSafe; + +import org.apache.hugegraph.store.grpc.common.ScanMethod; +import org.apache.hugegraph.store.grpc.stream.ScanCondition; +import org.apache.hugegraph.store.grpc.stream.ScanQueryRequest; +import org.apache.hugegraph.store.node.util.HgAssert; + +import lombok.extern.slf4j.Slf4j; + +/** + * Buffering the data of ScanQueryRequest and generating ScanQuery. + * It will not hold the reference of ScanQueryRequest. + *

+ * 2023/2/8 + */ +@NotThreadSafe +@Slf4j +class ScanQueryProducer implements Iterable { + + private String graph; + private String[] tables; + private ScanMethod method; + private int scanType; + private byte[] query; + private byte[] position; + + private List conditionList; + + private ScanQueryProducer() { + } + + public static ScanQueryProducer requestOf(String graph, String[] tables, + ScanQueryRequest request) { + HgAssert.isArgumentValid(graph, "graph"); + HgAssert.isArgumentNotNull(tables, "tables"); + HgAssert.isArgumentNotNull(request, "ScanQueryRequest"); + + ScanQueryProducer res = new ScanQueryProducer(); + res.graph = graph; + res.tables = tables; // a trick that reduce the data-size transferred through network; + + res.method = request.getMethod(); + res.scanType = request.getScanType(); + res.query = request.getQuery().toByteArray(); + res.position = request.getPosition().toByteArray(); + + res.conditionList = request.getConditionList(); + + if (res.conditionList == null) { + res.conditionList = Collections.emptyList(); + } + + if (res.conditionList.isEmpty()) { + log.warn("the condition-list of ScanQueryRequest is empty."); + } + + return res; + } + + private ScanQuery createQuery(String tableName, ScanCondition condition) { + ScanQuery sq = ScanQuery.of(); + sq.graph = this.graph; + sq.table = tableName; + sq.method = this.method; + sq.scanType = this.scanType; + sq.query = this.query; + sq.position = this.position; + + if (condition != null) { + sq.keyCode = condition.getCode(); + sq.start = condition.getStart().toByteArray(); + sq.end = condition.getEnd().toByteArray(); + sq.prefix = condition.getPrefix().toByteArray(); + sq.serialNo = condition.getSerialNo(); + } + + return sq; + } + + private String getTableName(int tableIndex) { + if (tableIndex + 1 > this.tables.length) { + return null; + } + + return this.tables[tableIndex]; + } + + @Override + public Iterator iterator() { + if (this.conditionList.isEmpty()) { + return new NoConditionsIterator(); + } else { + return new ConditionsIterator(); + } + } + + /** + * Return an Iterator contains Scan-Queries grouped ScanQuery that + * created by same resource but filled with different tables; + * + * @return + */ + public Iterator groupedIterator() { + if (this.conditionList.isEmpty()) { + return new GroupedNoConditionsIterator(); + } else { + return new GroupedConditionsIterator(); + } + } + + /*---------------inner classes below--------------------*/ + + private class GroupedNoConditionsIterator implements Iterator { + private boolean isHasNext = true; + + @Override + public boolean hasNext() { + return isHasNext; + } + + @Override + public ScanQuery[] next() { + if (!this.hasNext()) { + throw new NoSuchElementException(); + } + + ScanQuery[] res = new ScanQuery[ScanQueryProducer.this.tables.length]; + + for (int i = 0; i < res.length; i++) { + res[i] = ScanQueryProducer.this.createQuery(ScanQueryProducer.this.tables[i], null); + } + + this.isHasNext = false; + + return res; + } + } + + private class GroupedConditionsIterator implements Iterator { + private final Iterator conditionIterator = + ScanQueryProducer.this.conditionList.iterator(); + + @Override + public boolean hasNext() { + return conditionIterator.hasNext(); + } + + @Override + public ScanQuery[] next() { + ScanCondition condition = this.conditionIterator.next(); + ScanQuery[] res = new ScanQuery[ScanQueryProducer.this.tables.length]; + + for (int i = 0; i < res.length; i++) { + res[i] = ScanQueryProducer.this.createQuery(ScanQueryProducer.this.tables[i], + condition); + } + + return res; + } + } + + /** + * TODO: no testing + */ + private class NoConditionsIterator implements Iterator { + private String tableName; + private int tableIndex; + + @Override + public boolean hasNext() { + if (this.tableName != null) { + return true; + } + + this.tableName = ScanQueryProducer.this.getTableName(this.tableIndex); + + return this.tableName != null; + } + + @Override + public ScanQuery next() { + if (!this.hasNext()) { + throw new NoSuchElementException(); + } + + ScanQuery res = ScanQueryProducer.this.createQuery(this.tableName, null); + this.tableIndex++; + this.tableName = ScanQueryProducer.this.getTableName(this.tableIndex); + + return res; + } + + } + + /** + * TODO: no testing + */ + private class ConditionsIterator implements Iterator { + private final Iterator conditionIterator = + ScanQueryProducer.this.conditionList.iterator(); + private ScanCondition condition; + private String tableName; + private int tableIndex; + + @Override + public boolean hasNext() { + if (this.condition != null) { + return true; + } + return conditionIterator.hasNext(); + } + + @Override + public ScanQuery next() { + if (!this.hasNext()) { + throw new NoSuchElementException(); + } + if (this.condition == null) { + this.condition = conditionIterator.next(); + } + if (this.tableName == null) { + this.tableName = ScanQueryProducer.this.getTableName(this.tableIndex); + } + + ScanQuery res = ScanQueryProducer.this.createQuery(this.tableName, this.condition); + this.tableIndex++; + this.tableName = ScanQueryProducer.this.getTableName(this.tableIndex); + + if (this.tableName == null) { + this.condition = null; + this.tableIndex = 0; + } + + return res; + } + + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanStreamResponse.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanStreamResponse.java new file mode 100644 index 0000000000..26aaec3175 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanStreamResponse.java @@ -0,0 +1,261 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import static org.apache.hugegraph.store.node.grpc.ScanUtil.getIterator; + +import java.util.Collections; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.hugegraph.rocksdb.access.RocksDBSession; +import org.apache.hugegraph.rocksdb.access.ScanIterator; +import org.apache.hugegraph.store.grpc.common.Kv; +import org.apache.hugegraph.store.grpc.stream.KvPageRes; +import org.apache.hugegraph.store.grpc.stream.ScanStreamReq; +import org.apache.hugegraph.store.node.AppConfig; +import org.apache.hugegraph.store.node.util.HgAssert; +import org.apache.hugegraph.store.node.util.HgChannel; +import org.apache.hugegraph.store.node.util.HgGrpc; +import org.apache.hugegraph.store.node.util.HgStoreNodeUtil; + +import com.google.protobuf.ByteString; + +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +/** + * created on 2022/02/17 + * + * @version 3.6.0 + */ +@Slf4j +public class ScanStreamResponse implements StreamObserver { + private static final String msg = + "to wait for client taking data exceeded max time: [{}] seconds,stop scanning."; + private final StreamObserver responseObserver; + private final HgStoreWrapperEx wrapper; + private final AtomicBoolean finishFlag = new AtomicBoolean(); + private final ThreadPoolExecutor executor; + private final AtomicBoolean isStarted = new AtomicBoolean(); + private final AtomicBoolean isStop = new AtomicBoolean(false); + private final AppConfig config; + private final int waitTime; + private final HgChannel channel; + private ScanIterator iterator; + private long limit = 0; + private int times = 0; + private long pageSize = 0; + private int total = 0; + private String graph; + private String table; + + ScanStreamResponse(StreamObserver responseObserver, + HgStoreWrapperEx wrapper, + ThreadPoolExecutor executor, AppConfig appConfig) { + this.responseObserver = responseObserver; + this.wrapper = wrapper; + this.executor = executor; + this.config = appConfig; + this.waitTime = this.config.getServerWaitTime(); + this.channel = HgChannel.of(waitTime); + } + + public static ScanStreamResponse of(StreamObserver responseObserver, + HgStoreWrapperEx wrapper, + ThreadPoolExecutor executor, AppConfig appConfig) { + HgAssert.isArgumentNotNull(responseObserver, "responseObserver"); + HgAssert.isArgumentNotNull(wrapper, "wrapper"); + HgAssert.isArgumentNotNull(executor, "executor"); + return new ScanStreamResponse(responseObserver, wrapper, executor, appConfig); + } + + @Override + public void onNext(ScanStreamReq request) { + try { + if (request.getCloseFlag() == 1) { + close(); + } else { + next(request); + } + } catch (Exception e) { + responseObserver.onError(e); + } + } + + @Override + public void onError(Throwable t) { + this.isStop.set(true); + this.finishServer(); + log.warn("onError from client [ graph: {} , table: {}]; Reason: {}]", graph, table, + t.getMessage()); + } + + @Override + public void onCompleted() { + this.isStop.set(true); + this.finishServer(); + } + + private void initIterator(ScanStreamReq request) { + try { + if (this.isStarted.getAndSet(true)) { + return; + } + this.iterator = getIterator(request, this.wrapper); + this.graph = request.getHeader().getGraph(); + this.table = request.getTable(); + this.limit = request.getLimit(); + this.pageSize = request.getPageSize(); + if (this.pageSize <= 0) { + log.warn( + "As page-Size is less than or equals 0, no data will be send to the " + + "client."); + } + /*** Start scanning loop ***/ + Runnable scanning = () -> + { + // log.debug("Start scanning, graph = {}, table= {}, limit = " + + // "{}, page size = {}", this.graph, this.table, this.limit, + // this.pageSize); + KvPageRes.Builder dataBuilder = KvPageRes.newBuilder(); + Kv.Builder kvBuilder = Kv.newBuilder(); + int pageCount = 0; + try { + while (iterator.hasNext()) { + if (limit > 0 && ++this.total > limit) { + break; + } + if (++pageCount > pageSize) { + long start = System.currentTimeMillis(); + if (!this.channel.send(dataBuilder)) { + if (System.currentTimeMillis() - start >= waitTime * 1000L) { + log.warn(msg, waitTime); + this.timeoutSever(); + } + return; + } + if (this.isStop.get()) { + return; + } + pageCount = 1; + dataBuilder = KvPageRes.newBuilder(); + } + dataBuilder.addData(toKv(kvBuilder, iterator.next(), iterator.position())); + } + this.channel.send(dataBuilder); + } catch (Throwable t) { + String msg = "an exception occurred while scanning data:"; + StatusRuntimeException ex = + HgGrpc.toErr(Status.INTERNAL, msg + t.getMessage(), t); + responseObserver.onError(ex); + } finally { + try { + this.iterator.close(); + this.channel.close(); + } catch (Exception e) { + + } + } + + }; + this.executor.execute(scanning); + } catch (Exception e) { + StatusRuntimeException ex = HgGrpc.toErr(Status.INTERNAL, null, e); + responseObserver.onError(ex); + try { + this.iterator.close(); + this.channel.close(); + } catch (Exception exception) { + + } + } + + /*** Scanning loop end ***/ + } + + private Kv toKv(Kv.Builder kvBuilder, RocksDBSession.BackendColumn col, + byte[] position) { + return kvBuilder + .setKey(ByteString.copyFrom(col.name)) + .setValue(ByteString.copyFrom(col.value)) + .setCode(HgStoreNodeUtil.toInt(position)) + .build(); + } + + private void close() { + this.isStop.set(true); + this.channel.close(); + if (!this.finishFlag.get()) { + responseObserver.onNext(KvPageRes.newBuilder() + .addAllData(Collections.EMPTY_LIST) + .setOver(true) + .setTimes(++times) + .build() + ); + } + + this.finishServer(); + } + + private void next(ScanStreamReq request) { + this.initIterator(request); + KvPageRes.Builder resBuilder; + + try { + resBuilder = this.channel.receive(); + times++; + } catch (Exception e) { + String msg = "failed to poll a page of data, cause by:"; + log.error(msg, e); + responseObserver.onError(HgGrpc.toErr(msg + e.getMessage())); + return; + } + boolean isOver = false; + if (resBuilder == null || resBuilder.getDataList() == null || + resBuilder.getDataList().isEmpty()) { + isOver = true; + resBuilder = KvPageRes.newBuilder().addAllData(Collections.EMPTY_LIST); + } + if (!this.finishFlag.get()) { + responseObserver.onNext(resBuilder.setOver(isOver).setTimes(times).build()); + } + if (isOver) { + this.finishServer(); + } + + } + + private void finishServer() { + if (!this.finishFlag.getAndSet(true)) { + responseObserver.onCompleted(); + } + } + + private void timeoutSever() { + if (!this.finishFlag.getAndSet(true)) { + String msg = "server wait time exceeds the threshold[" + waitTime + + "] seconds."; + responseObserver.onError( + HgGrpc.toErr(Status.Code.DEADLINE_EXCEEDED, msg)); + } + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanUtil.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanUtil.java new file mode 100644 index 0000000000..aa97842db0 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanUtil.java @@ -0,0 +1,331 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import javax.annotation.concurrent.NotThreadSafe; + +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.rocksdb.access.ScanIterator; +import org.apache.hugegraph.store.business.SelectIterator; +import org.apache.hugegraph.store.grpc.common.ScanMethod; +import org.apache.hugegraph.store.grpc.stream.ScanQueryRequest; +import org.apache.hugegraph.store.grpc.stream.ScanStreamReq; +import org.apache.hugegraph.store.grpc.stream.SelectParam; + +import lombok.extern.slf4j.Slf4j; + + +/** + * created on 2022/02/22 + * + * @version 1.0.0 + */ +@Slf4j +class ScanUtil { + + private final static Map tableKeyMap = new HashMap<>(); + + + static ScanIterator getIterator(ScanStreamReq request, HgStoreWrapperEx wrapper) { + String graph = request.getHeader().getGraph(); + String table = request.getTable(); + ScanMethod method = request.getMethod(); + byte[] start = request.getStart().toByteArray(); + byte[] end = request.getEnd().toByteArray(); + byte[] prefix = request.getPrefix().toByteArray(); + int partition = request.getCode(); + int scanType = request.getScanType(); + byte[] query = request.getQuery().toByteArray(); + + ScanIterator iter = null; + switch (method) { + case ALL: + iter = wrapper.scanAll(graph, table, query); + break; + case PREFIX: + iter = wrapper.scanPrefix(graph, partition, table, prefix, scanType, query); + break; + case RANGE: + iter = wrapper.scan(graph, partition, table, start, end, scanType, query); + break; + } + if (iter == null) { + log.warn("Failed to create a scanIterator with ScanMethod: [" + method + "]"); + iter = new EmptyIterator(); + } + SelectParam selects = request.getSelects(); + List properties = null; + if (selects != null) { + properties = selects.getPropertiesList(); + } + iter = new SelectIterator(iter, properties); + iter.seek(request.getPosition().toByteArray()); + return iter; + } + + static ScanIterator getIterator(ScanQuery sq, HgStoreWrapperEx wrapper) { + if (log.isDebugEnabled()) { + log.debug("{}", sq); + } + + ScanIterator iter = null; + switch (sq.method) { + case ALL: + iter = wrapper.scanAll(sq.graph, sq.table, sq.query); + break; + case PREFIX: + iter = wrapper.scanPrefix(sq.graph, sq.keyCode, sq.table, sq.prefix, sq.scanType, + sq.query); + break; + case RANGE: + iter = wrapper.scan(sq.graph, sq.keyCode, sq.table, sq.start, sq.end, sq.scanType, + sq.query); + break; + } + + if (iter == null) { + log.warn("Failed to create a scanIterator with ScanMethod: [" + sq.method + "]"); + iter = new EmptyIterator(); + } + + iter.seek(sq.position); + + return iter; + + } + + static ScanQuery toSq(ScanStreamReq request) { + ScanQuery res = ScanQuery.of(); + + res.graph = request.getHeader().getGraph(); + res.table = request.getTable(); + res.method = request.getMethod(); + + res.keyCode = request.getCode(); + res.start = request.getStart().toByteArray(); + res.end = request.getEnd().toByteArray(); + res.prefix = request.getPrefix().toByteArray(); + res.scanType = request.getScanType(); + res.query = request.getQuery().toByteArray(); + res.position = request.getPosition().toByteArray(); + + if (log.isDebugEnabled()) { + log.debug("{}", res); + } + //TODO: removed below. + + return res; + } + + static ScanIterator getIterator(String graph, ScanQueryRequest request, + HgStoreWrapperEx wrapper) { + ScanIteratorSupplier supplier = new ScanIteratorSupplier(graph, request, wrapper); + return BatchScanIterator.of(supplier, supplier.getLimitSupplier()); + } + + /** + * 支持并行读取的多迭代器 + */ + static ScanIterator getParallelIterator(String graph, ScanQueryRequest request, + HgStoreWrapperEx wrapper, ThreadPoolExecutor executor) { + ScanIteratorSupplier supplier = new ScanIteratorSupplier(graph, request, wrapper); + return ParallelScanIterator.of(supplier, supplier.getLimitSupplier(), + request, executor); + } + + @NotThreadSafe + private static class ScanIteratorSupplier implements + Supplier> { + + private final AtomicBoolean isEmpty = new AtomicBoolean(); + + private final String graph; + private final long perKeyMax; + private final long skipDegree; + private final HgStoreWrapperEx wrapper; + private long perKeyLimit; + private List sqs = new LinkedList<>(); + private Iterator sqIterator; + + private ScanQueryProducer scanQueryProducer; + private Iterator scanQueryIterator; + + ScanIteratorSupplier(String graph, ScanQueryRequest request, HgStoreWrapperEx wrapper) { + this.graph = graph; + this.perKeyLimit = request.getPerKeyLimit(); + this.perKeyMax = request.getPerKeyMax(); + this.skipDegree = + request.getSkipDegree() == 0 ? Integer.MAX_VALUE : request.getSkipDegree(); + this.wrapper = wrapper; + + if (this.perKeyLimit <= 0) { + this.perKeyLimit = Integer.MAX_VALUE; + log.warn("as perKeyLimit <=0 so default perKeyLimit was effective: {}", + Integer.MAX_VALUE); + } + //init(request); + init2(request); + } + + private void init(ScanQueryRequest request) { + this.sqs = Arrays.stream(request.getTable().split(",")) + .map(table -> { + if (table == null) { + return null; + } + if (table.isEmpty()) { + return null; + } + + List list = request.getConditionList() + .stream() + .map(condition -> { + ScanQuery sq = + ScanQuery.of(); + sq.graph = this.graph; + sq.table = table; + sq.method = + request.getMethod(); + sq.scanType = + request.getScanType(); + sq.query = + request.getQuery() + .toByteArray(); + sq.position = + request.getPosition() + .toByteArray(); + + sq.keyCode = + condition.getCode(); + sq.start = + condition.getStart() + .toByteArray(); + sq.end = condition.getEnd() + .toByteArray(); + sq.prefix = + condition.getPrefix() + .toByteArray(); + sq.serialNo = + condition.getSerialNo(); + return sq; + }) + .filter(e -> e != null) + .collect(Collectors.toList()); + + if (list == null || list.isEmpty()) { + ScanQuery sq = ScanQuery.of(); + sq.graph = this.graph; + sq.table = table; + sq.method = request.getMethod(); + sq.scanType = request.getScanType(); + sq.query = request.getQuery().toByteArray(); + sq.position = request.getPosition().toByteArray(); + list = Collections.singletonList(sq); + } + return list; + + } + ) + .flatMap(e -> e.stream()) + .collect(Collectors.toList()); + + this.sqIterator = this.sqs.iterator(); + } + + + //@Override + public KVPair get1() { + ScanIterator iterator = null; + ScanQuery query = null; + if (this.sqIterator != null && this.sqIterator.hasNext()) { + query = this.sqIterator.next(); + iterator = getIterator(query, this.wrapper); + } else { + this.sqs.clear(); + this.sqIterator = null; + } + return new KVPair<>(query, iterator); + } + + public Supplier getLimitSupplier() { + return () -> Math.min(perKeyLimit, skipDegree); + } + + /*----------- new -to add max --------------*/ + + private void init2(ScanQueryRequest request) { + List tableList = Arrays.stream(request.getTable().split(",")) + .filter(e -> e != null && !e.isEmpty()) + .collect(Collectors.toList()); + + if (tableList.isEmpty()) { + throw new RuntimeException("table name is invalid"); + } + + String[] tables = tableList.toArray(new String[tableList.size()]); + this.scanQueryProducer = ScanQueryProducer.requestOf(this.graph, tables, request); + this.scanQueryIterator = this.scanQueryProducer.groupedIterator(); + } + + @Override + public KVPair get() { + ScanIterator iterator = null; + ScanQuery query = null; + + if (this.scanQueryIterator != null && this.scanQueryIterator.hasNext()) { + ScanQuery[] queries = this.scanQueryIterator.next(); + query = queries[0]; + iterator = FusingScanIterator.maxOf(this.perKeyMax, new Query2Iterator((queries))); + } else { + this.scanQueryProducer = null; + this.scanQueryIterator = null; + } + return new KVPair<>(query, iterator); + } + + private class Query2Iterator implements Supplier { + ScanQuery[] queries; + int index; + + Query2Iterator(ScanQuery[] queries) { + this.queries = queries; + } + + @Override + public ScanIterator get() { + if (index + 1 > queries.length) { + return null; + } + return getIterator(queries[index++], wrapper); + } + } + + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/GraphStoreImpl.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/GraphStoreImpl.java new file mode 100644 index 0000000000..864ba3b889 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/GraphStoreImpl.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc.scan; + +import java.util.concurrent.ThreadPoolExecutor; + +import org.apache.hugegraph.store.business.BusinessHandler; +import org.apache.hugegraph.store.grpc.GraphStoreGrpc.GraphStoreImplBase; +import org.apache.hugegraph.store.grpc.Graphpb; +import org.apache.hugegraph.store.grpc.Graphpb.ResponseHeader; +import org.apache.hugegraph.store.grpc.Graphpb.ScanPartitionRequest; +import org.apache.hugegraph.store.grpc.Graphpb.ScanResponse; +import org.apache.hugegraph.store.node.grpc.HgStoreNodeService; +import org.apache.hugegraph.store.node.grpc.HgStoreStreamImpl; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +/** + * graphpb.proto 实现类 + */ +@Slf4j +@GRpcService +public class GraphStoreImpl extends GraphStoreImplBase { + + private final ResponseHeader okHeader = + ResponseHeader.newBuilder().setError( + Graphpb.Error.newBuilder().setType(Graphpb.ErrorType.OK)) + .build(); + BusinessHandler handler; + @Autowired + private HgStoreNodeService storeService; + @Autowired + private HgStoreStreamImpl storeStream; + + public BusinessHandler getHandler() { + if (this.handler == null) { + synchronized (this) { + if (this.handler == null) { + this.handler = + storeService.getStoreEngine().getBusinessHandler(); + } + } + } + return this.handler; + } + + public ThreadPoolExecutor getExecutor() { + return this.storeStream.getExecutor(); + } + + + /** + * 流式回复消息,每个消息带有seqNo + * 客户端每消费一个消息,应答一个seqNo + * 服务端根据客户端的seqNo决定发送几个数据包 + * + * @param ro + * @return + */ + @Override + public StreamObserver scanPartition( + StreamObserver ro) { + return new ScanResponseObserver(ro, getHandler(), getExecutor()); + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/ScanResponseObserver.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/ScanResponseObserver.java new file mode 100644 index 0000000000..7bf56e4cdf --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/ScanResponseObserver.java @@ -0,0 +1,267 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.grpc.scan; + +import java.util.ArrayList; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.ReentrantLock; + +import org.apache.hugegraph.store.business.BusinessHandler; +import org.apache.hugegraph.store.business.GraphStoreIterator; +import org.apache.hugegraph.store.grpc.Graphpb.Error; +import org.apache.hugegraph.store.grpc.Graphpb.ErrorType; +import org.apache.hugegraph.store.grpc.Graphpb.ResponseHeader; +import org.apache.hugegraph.store.grpc.Graphpb.ScanPartitionRequest; +import org.apache.hugegraph.store.grpc.Graphpb.ScanPartitionRequest.Request; +import org.apache.hugegraph.store.grpc.Graphpb.ScanPartitionRequest.ScanType; +import org.apache.hugegraph.store.grpc.Graphpb.ScanResponse; + +import com.google.protobuf.Descriptors; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class ScanResponseObserver implements + StreamObserver { + + private static final int BATCH_SIZE = 100000; + private static final int MAX_PAGE = 8; // + private static final Error ok = Error.newBuilder().setType(ErrorType.OK).build(); + private static final ResponseHeader okHeader = + ResponseHeader.newBuilder().setError(ok).build(); + private final BusinessHandler handler; + private final AtomicInteger nextSeqNo = new AtomicInteger(0); + private final AtomicInteger cltSeqNo = new AtomicInteger(0); + private final ThreadPoolExecutor executor; + private final AtomicBoolean readOver = new AtomicBoolean(false); + private final LinkedBlockingQueue packages = + new LinkedBlockingQueue(MAX_PAGE * 2); + private final Descriptors.FieldDescriptor vertexField = + ScanResponse.getDescriptor().findFieldByNumber(3); + private final Descriptors.FieldDescriptor edgeField = + ScanResponse.getDescriptor().findFieldByNumber(4); + private final ReentrantLock readLock = new ReentrantLock(); + private final ReentrantLock sendLock = new ReentrantLock(); + private StreamObserver sender; + private ScanPartitionRequest scanReq; + private GraphStoreIterator iter; + private volatile long leftCount; + private volatile Future sendTask; + private volatile Future readTask; + + /* + * 2022年11月1日 + * 1.onNext 需要进行异步处理,以防止grpc的调用阻塞 + * 2.不要读取迭代器或者发送数据不要产生线程等待 + * 3.在发送前,尽量准备好要发送的数据 + * */ + + /* + * 2022年11月2日 + * 1.读取rocksdb迭代器的线程read + * 2.进行数据转换并发送到阻塞队列的线程offer + * 3.从阻塞队列读取数据,并发送的线程,包括在没有读取到数据的情况下唤醒读取和发送的线程send + * */ + + public ScanResponseObserver(StreamObserver sender, + BusinessHandler handler, + ThreadPoolExecutor executor) { + this.sender = sender; + this.handler = handler; + this.executor = executor; + } + + private boolean readCondition() { + return packages.remainingCapacity() != 0 && !readOver.get(); + } + + private boolean readTaskCondition() { + return readCondition() && (readTask == null || readTask.isDone()); + } + + private boolean sendCondition() { + return nextSeqNo.get() - cltSeqNo.get() < MAX_PAGE; + } Runnable rr = new Runnable() { + @Override + public void run() { + try { + if (readCondition()) { + synchronized (iter) { + while (readCondition()) { + Request r = scanReq.getScanRequest(); + ScanType t = r.getScanType(); + boolean isVertex = t.equals(ScanType.SCAN_VERTEX); + ArrayList data = new ArrayList<>(BATCH_SIZE); + int count = 0; + while (iter.hasNext() && leftCount > -1) { + count++; + leftCount--; + T next = (T) iter.next(); + data.add(next); + if (count >= BATCH_SIZE) { + offer(data, isVertex); + // data.clear(); + break; + } + } + if (!(iter.hasNext() && leftCount > -1)) { + if (data.size() > 0 && + data.size() < BATCH_SIZE) { + offer(data, isVertex); + } + readOver.set(true); + data = null; + //log.warn("scan complete , count: {},time: {}", + // sum, System.currentTimeMillis()); + return; + } + } + } + } + } catch (Exception e) { + log.warn("read data with error: ", e); + sender.onError(e); + } + } + }; + + private boolean sendTaskCondition() { + return sendCondition() && (sendTask == null || sendTask.isDone()); + } + + private void offer(Iterable data, boolean isVertex) { + ScanResponse.Builder builder = ScanResponse.newBuilder(); + builder.setHeader(okHeader).setSeqNo(nextSeqNo.get()); + if (isVertex) { + builder = builder.setField(vertexField, data); + } else { + builder = builder.setField(edgeField, data); + } + ScanResponse response = builder.build(); + packages.offer(response); + startSend(); + } + + private void startRead() { + if (readTaskCondition()) { + if (readLock.tryLock()) { + if (readTaskCondition()) { + readTask = executor.submit(rr); + } + readLock.unlock(); + } + } + } Runnable sr = () -> { + while (sendCondition()) { + ScanResponse response; + try { + if (readOver.get()) { + if ((response = packages.poll()) == null) { + sender.onCompleted(); + } else { + sender.onNext(response); + nextSeqNo.incrementAndGet(); + } + } else { + response = packages.poll(10, + TimeUnit.MILLISECONDS); + if (response != null) { + sender.onNext(response); + nextSeqNo.incrementAndGet(); + startRead(); + } else { + break; + } + } + + } catch (InterruptedException e) { + break; + } + } + }; + + private void startSend() { + if (sendTaskCondition()) { + if (sendLock.tryLock()) { + if (sendTaskCondition()) { + sendTask = executor.submit(sr); + } + sendLock.unlock(); + } + } + } + + @Override + public void onNext(ScanPartitionRequest scanReq) { + if (scanReq.hasScanRequest() && !scanReq.hasReplyRequest()) { + this.scanReq = scanReq; + Request request = scanReq.getScanRequest(); + long rl = request.getLimit(); + leftCount = rl > 0 ? rl : Long.MAX_VALUE; + iter = handler.scan(scanReq); + if (!iter.hasNext()) { + close(); + sender.onCompleted(); + } else { + readTask = executor.submit(rr); + } + } else { + cltSeqNo.getAndIncrement(); + startSend(); + } + } + + @Override + public void onError(Throwable t) { + close(); + log.warn("receive client error:", t); + } + + @Override + public void onCompleted() { + close(); + } + + private void close() { + try { + nextSeqNo.set(0); + if (sendTask != null) { + sendTask.cancel(true); + } + if (readTask != null) { + readTask.cancel(true); + } + readOver.set(true); + iter.close(); + } catch (Exception e) { + log.warn("on Complete with error:", e); + } + } + + + + + + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/listener/ContextClosedListener.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/listener/ContextClosedListener.java new file mode 100644 index 0000000000..e990acfe6f --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/listener/ContextClosedListener.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.listener; + +import java.util.concurrent.ThreadPoolExecutor; + +import org.apache.hugegraph.store.node.grpc.HgStoreStreamImpl; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.ApplicationListener; +import org.springframework.context.event.ContextClosedEvent; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class ContextClosedListener implements ApplicationListener { + + @Autowired + HgStoreStreamImpl storeStream; + + @Override + public void onApplicationEvent(ContextClosedEvent event) { + try { + log.info("closing scan threads...."); + ThreadPoolExecutor executor = storeStream.getRealExecutor(); + if (executor != null) { + try { + executor.shutdownNow(); + } catch (Exception e) { + + } + } + } catch (Exception ignored) { + + } finally { + log.info("closed scan threads"); + } + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/listener/PdConfigureListener.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/listener/PdConfigureListener.java new file mode 100644 index 0000000000..709e7fdb9d --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/listener/PdConfigureListener.java @@ -0,0 +1,212 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.listener; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.Charset; +import java.security.SecureRandom; +import java.util.Iterator; +import java.util.Locale; +import java.util.Map; +import java.util.Properties; +import java.util.function.Consumer; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang.StringUtils; +import org.apache.hugegraph.pd.client.KvClient; +import org.apache.hugegraph.pd.client.PDConfig; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.kv.ScanPrefixResponse; +import org.apache.hugegraph.pd.grpc.kv.WatchResponse; +import org.springframework.boot.context.event.ApplicationEnvironmentPreparedEvent; +import org.springframework.context.ApplicationListener; +import org.springframework.context.ConfigurableApplicationContext; +import org.springframework.core.env.MutablePropertySources; +import org.springframework.core.env.PropertiesPropertySource; +import org.yaml.snakeyaml.Yaml; + +import com.google.common.base.Charsets; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class PdConfigureListener implements + ApplicationListener { + + private static final String CONFIG_PREFIX = "S:"; + private static final String CONFIG_FIX_PREFIX = "S:FS"; + private static final String TIMESTAMP_KEY = "S:Timestamp"; + private static final String PD_CONFIG_FILE_NAME = "application-pd.yml"; + private final String workDir = System.getProperty("user.dir"); + private final String fileSeparator = System.getProperty("file.separator"); + private final String configFilePath = + workDir + fileSeparator + "conf" + fileSeparator + PD_CONFIG_FILE_NAME; + private final String restartShellPath = workDir + fileSeparator + "bin" + fileSeparator + + "restart-hugegraph-store.sh"; + private ConfigurableApplicationContext context; + private File pdConfFile; + // private String restartPath = workDir + fileSeparator + "lib" + fileSeparator; + + @Override + public void onApplicationEvent(ApplicationEnvironmentPreparedEvent event) { + MutablePropertySources sources = event.getEnvironment().getPropertySources(); + String pdAddress = event.getEnvironment().getProperty("pdserver.address"); + pdConfFile = new File(configFilePath); + // String[] defaultProfiles = event.getEnvironment().getDefaultProfiles(); + // String defaultProfile = defaultProfiles[0]; + // PropertySource appSource = null; + // for (PropertySource source : sources) { + // log.info("source name:{},{}", source.getName(), source.getSource()); + // boolean applicationConfig = source.getName().contains("application.yml"); + // if (applicationConfig) { + // appSource = source; + // break; + // } + // } + // Map appSourceMap = (Map) + // appSource + // .getSource(); + // OriginTrackedValue pdTrackedValue = appSourceMap.get("pdserver.address"); + // String pdAddress = pdTrackedValue.getValue().toString(); + KvClient client = new KvClient(PDConfig.of(pdAddress)); + try { + ScanPrefixResponse response = client.scanPrefix(CONFIG_PREFIX); + Map kvsMap = response.getKvsMap(); + String pdConfig = kvsMap.get(CONFIG_FIX_PREFIX); + if (!StringUtils.isEmpty(pdConfig)) { + updatePdConfig(sources, client, pdConfig); + } else { + // send local application-pd.yml to pd + if (pdConfFile.exists()) { + String commons = FileUtils.readFileToString(pdConfFile, Charsets.UTF_8); + log.info("send local application-pd.yml to pd....{}", commons); + client.put(CONFIG_FIX_PREFIX, commons); + } + } + log.info("Start listening for keys :" + TIMESTAMP_KEY); + client.listen(TIMESTAMP_KEY, (Consumer) o -> { + log.info("receive message to restart :" + o); + try { + // 优先更新最新配置文件,以免修改像端口之类的参数导致旧文件被优先加载 + ScanPrefixResponse responseNew = client.scanPrefix(CONFIG_PREFIX); + Map kvsMapNew = responseNew.getKvsMap(); + String config = kvsMapNew.get(CONFIG_FIX_PREFIX); + updatePdConfig(sources, client, config); + restart(); + } catch (Exception e) { + log.error("start listener with error:", e); + } + }); + + } catch (Exception e) { + log.error("start listener with error:", e); + } + + } + + private void updatePdConfig(MutablePropertySources sources, KvClient client, + String pdConfig) throws + PDException, + IOException { + Properties configs = getYmlConfig(pdConfig); + String property = client.get(TIMESTAMP_KEY).getValue(); + long pdLastModified = 0; + if (!StringUtils.isEmpty(property)) { + pdLastModified = Long.parseLong(property); + } + if (!pdConfFile.exists() || pdConfFile.lastModified() <= pdLastModified) { + log.info("update local application-pd.yml from pd....{}", pdConfig); + writeYml(pdConfig); + PropertiesPropertySource source = new PropertiesPropertySource("pd-config", configs); + sources.addFirst(source); + } + } + + private Properties getYmlConfig(String yml) { + Yaml yaml = new Yaml(); + Iterable load = yaml.loadAll(yml); + Iterator iterator = load.iterator(); + Properties properties = new Properties(); + while (iterator.hasNext()) { + Map next = (Map) iterator.next(); + map2Properties(next, "", properties); + } + return properties; + } + + private void map2Properties(Map map, String prefix, Properties properties) { + + for (Map.Entry entry : map.entrySet()) { + String key = entry.getKey(); + String newPrefix = StringUtils.isEmpty(prefix) ? key : prefix + "." + key; + Object value = entry.getValue(); + if (!(value instanceof Map)) { + properties.put(newPrefix, value); + } else { + map2Properties((Map) value, newPrefix, properties); + } + + } + } + + public ConfigurableApplicationContext getContext() { + return context; + } + + public void setContext(ConfigurableApplicationContext context) { + this.context = context; + } + + // private void restartBySpringBootApplication() { + // ApplicationArguments args = context.getBean(ApplicationArguments.class); + // Thread thread = new Thread(() -> { + // context.close(); + // try { + // Thread.sleep(5000L); + // } catch (InterruptedException e) { + // + // } + // StoreNodeApplication.start(); + // }); + // thread.setDaemon(false); + // thread.start(); + // } + + private void restart() throws InterruptedException, IOException { + ProcessBuilder builder; + String os = System.getProperty("os.name"); + if (os.toLowerCase(Locale.getDefault()).contains("win")) { + builder = new ProcessBuilder("cmd", "/c", restartShellPath).inheritIO(); + } else { + log.info("run shell {}", restartShellPath); + builder = new ProcessBuilder("sh", "-c", restartShellPath).inheritIO(); + } + SecureRandom random = new SecureRandom(); + int sleepTime = random.nextInt(60); + log.info("app will restart in {} seconds:", sleepTime); + Thread.sleep(sleepTime * 1000); + Process process = builder.start(); + log.info("waiting restart.... {}", restartShellPath); + process.waitFor(); + } + + private void writeYml(String yml) throws IOException { + FileUtils.writeStringToFile(pdConfFile, yml, Charset.defaultCharset(), false); + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/DriveMetrics.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/DriveMetrics.java new file mode 100644 index 0000000000..8fc578c054 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/DriveMetrics.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.metrics; + +import java.io.File; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; + +/** + * 2021/11/23 + */ +@Deprecated +public class DriveMetrics { + + private static final long MIB = 1024 * 1024; + + // TODO: add a cache + public Map> metrics() { + File[] rootDrive = File.listRoots(); + + if (rootDrive == null) { + return new LinkedHashMap(0); + } + + Map> metrics = new HashMap<>(); + + for (File d : rootDrive) { + Map buf = new HashMap<>(); + buf.put("total_space", d.getTotalSpace() / MIB); + buf.put("free_space", d.getFreeSpace() / MIB); + buf.put("usable_space", d.getUsableSpace() / MIB); + buf.put("size_unit", "MB"); + + metrics.put(d.getPath().replace("\\", ""), buf); + + } + + return metrics; + + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/GRpcExMetrics.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/GRpcExMetrics.java new file mode 100644 index 0000000000..cb5619a8c1 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/GRpcExMetrics.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.metrics; + +import java.util.concurrent.ThreadPoolExecutor; + +import org.apache.hugegraph.store.node.grpc.GRpcServerConfig; +import org.apache.hugegraph.store.node.util.HgExecutorUtil; + +import io.micrometer.core.instrument.Gauge; +import io.micrometer.core.instrument.MeterRegistry; + +/** + * 2022/3/8 + */ +public class GRpcExMetrics { + public final static String PREFIX = "grpc"; + private final static ExecutorWrapper wrapper = new ExecutorWrapper(); + private static MeterRegistry registry; + + private GRpcExMetrics() { + } + + public synchronized static void init(MeterRegistry meterRegistry) { + if (registry == null) { + registry = meterRegistry; + registerMeters(); + } + } + + private static void registerMeters() { + registerExecutor(); + + } + + private static void registerExecutor() { + + Gauge.builder(PREFIX + ".executor.pool.size", wrapper, (e) -> e.getPoolSize()) + .description("The current number of threads in the pool.") + .register(registry); + + Gauge.builder(PREFIX + ".executor.core.pool.size", wrapper, (e) -> e.getCorePoolSize()) + .description( + "The largest number of threads that have ever simultaneously been in the " + + "pool.") + .register(registry); + + Gauge.builder(PREFIX + ".executor.active.count", wrapper, (e) -> e.getActiveCount()) + .description("The approximate number of threads that are actively executing tasks.") + .register(registry); + } + + private static class ExecutorWrapper { + ThreadPoolExecutor pool; + + void init() { + if (this.pool == null) { + pool = HgExecutorUtil.getThreadPoolExecutor(GRpcServerConfig.EXECUTOR_NAME); + } + } + + double getPoolSize() { + init(); + return this.pool == null ? 0d : this.pool.getPoolSize(); + } + + int getCorePoolSize() { + init(); + return this.pool == null ? 0 : this.pool.getCorePoolSize(); + } + + int getActiveCount() { + init(); + return this.pool == null ? 0 : this.pool.getActiveCount(); + } + + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/JRaftMetrics.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/JRaftMetrics.java new file mode 100644 index 0000000000..10d5b14791 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/JRaftMetrics.java @@ -0,0 +1,314 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.metrics; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.hugegraph.store.HgStoreEngine; +import org.apache.hugegraph.store.node.util.HgRegexUtil; + +import com.alipay.sofa.jraft.core.NodeMetrics; +import com.codahale.metrics.Counter; +import com.codahale.metrics.Meter; +import com.codahale.metrics.Snapshot; +import com.codahale.metrics.Timer; + +import io.micrometer.core.instrument.Gauge; +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.Tag; +import lombok.extern.slf4j.Slf4j; + +/** + * 2022/1/4 + */ +@Slf4j +public class JRaftMetrics { + public final static String PREFIX = "jraft"; + public static final String LABELS = "quantile"; + public static final String LABEL_50 = "0.5"; + public static final String LABEL_75 = "0.75"; + public static final String LABEL_95 = "0.95"; + public static final String LABEL_98 = "0.98"; + public static final String LABEL_99 = "0.99"; + public static final String LABEL_999 = "0.999"; + private final static HgStoreEngine storeEngine = HgStoreEngine.getInstance(); + private final static AtomicInteger groups = new AtomicInteger(0); + private final static Tag handleDataTag = Tag.of("handle", "data"); + // private final static Tag handleTxTag = Tag.of("handle", "tx"); //reservation + private final static Set groupSet = new HashSet<>(); + private final static String REGEX_REFINE_REPLICATOR = "(replicator)(.+?:\\d+)(.*)"; + private static MeterRegistry registry; + + private JRaftMetrics() { + } + + public synchronized static void init(MeterRegistry meterRegistry) { + if (registry == null) { + registry = meterRegistry; + registerMeters(); + } + } + + private static void registerMeters() { + Gauge.builder(PREFIX + ".groups", JRaftMetrics::updateGroups) + .description("Number of raft-groups, which handled the data of graph.") + .tags(Collections.singleton(handleDataTag)) + .register(registry); + + } + + private static int updateGroups() { + int buf = getGroups(); + if (buf != groups.get()) { + groups.set(buf); + registerNodeMetrics(); + } + return buf; + } + + private static int getGroups() { + return storeEngine.getRaftGroupCount(); + } + + private static Map getRaftGroupMetrics() { + Map map = storeEngine.getNodeMetrics(); + + if (map == null) { + return Collections.emptyMap(); + } + + return map; + } + + private static void registerNodeMetrics() { + Map map = getRaftGroupMetrics(); + + synchronized (groupSet) { + map.forEach((group, metrics) -> { + if (!groupSet.add(group)) { + return; + } + + metrics.getMetricRegistry().getGauges() + .forEach((k, v) -> registerGauge(group, k, v)); + metrics.getMetricRegistry().getMeters() + .forEach((k, v) -> registerMeter(group, k, v)); + metrics.getMetricRegistry().getCounters() + .forEach((k, v) -> registerCounter(group, k, v)); + metrics.getMetricRegistry().getTimers() + .forEach((k, v) -> registerTimer(group, k, v)); + metrics.getMetricRegistry().getHistograms() + .forEach((k, v) -> registerHistogram(group, k, v)); + }); + } + + } + + private static HistogramWrapper toWrapper(com.codahale.metrics.Histogram histogram) { + return new HistogramWrapper(histogram); + } + + private static String refineMetrics(String name, List tags) { + if (name == null || name.isEmpty()) { + return name; + } + + List buf = HgRegexUtil.toGroupValues(REGEX_REFINE_REPLICATOR, name); + String res = null; + + /*Extracted name of replicator into a tag.*/ + + if (buf != null && buf.size() == 4) { + res = buf.get(1) + buf.get(3); + + String value = buf.get(2); + + if (value != null && value.startsWith("-")) { + value = value.substring(1); + } + + tags.add(Tag.of("replicator", value)); + } else { + res = name; + } + + return res; + } + + private static void registerHistogram(String group, String name, + com.codahale.metrics.Histogram histogram) { + if (histogram == null) { + return; + } + + List tags = new LinkedList<>(); + tags.add(handleDataTag); + tags.add(Tag.of("group", group)); + + name = refineMetrics(name, tags); + + String baseName = PREFIX + "." + name.toLowerCase(); + + HistogramWrapper wrapper = toWrapper(histogram); + + Gauge.builder(baseName + ".median", wrapper, (d) -> d.getSnapshot().getMedian()) + .tags(tags).register(registry); + Gauge.builder(baseName + ".min", wrapper, (d) -> d.getSnapshot().getMin()) + .tags(tags).register(registry); + Gauge.builder(baseName + ".max", wrapper, (d) -> d.getSnapshot().getMax()) + .tags(tags).register(registry); + Gauge.builder(baseName + ".mean", wrapper, (d) -> d.getSnapshot().getMean()) + .tags(tags).register(registry); + + baseName = baseName + ".summary"; + Gauge.builder(baseName, wrapper, (d) -> d.getSnapshot().getMedian()) + .tags(tags).tag(LABELS, LABEL_50).register(registry); + Gauge.builder(baseName, wrapper, (d) -> d.getSnapshot().get75thPercentile()) + .tags(tags).tag(LABELS, LABEL_75).register(registry); + Gauge.builder(baseName, wrapper, (d) -> d.getSnapshot().get95thPercentile()) + .tags(tags).tag(LABELS, LABEL_95).register(registry); + Gauge.builder(baseName, wrapper, (d) -> d.getSnapshot().get98thPercentile()) + .tags(tags).tag(LABELS, LABEL_98).register(registry); + Gauge.builder(baseName, wrapper, (d) -> d.getSnapshot().get99thPercentile()) + .tags(tags).tag(LABELS, LABEL_99).register(registry); + Gauge.builder(baseName, wrapper, (d) -> d.getSnapshot().get999thPercentile()) + .tags(tags).tag(LABELS, LABEL_999).register(registry); + + Gauge.builder(baseName + ".sum", wrapper, + (d) -> Arrays.stream(d.getSnapshot().getValues()).sum()) + .tags(tags).register(registry); + Gauge.builder(baseName + ".count", wrapper, (d) -> d.getSnapshot().size()) + .tags(tags).register(registry); + + } + + private static void registerTimer(String group, String name, com.codahale.metrics.Timer timer) { + List tags = new LinkedList<>(); + tags.add(handleDataTag); + tags.add(Tag.of("group", group)); + + name = refineMetrics(name, tags); + + String baseName = PREFIX + "." + name.toLowerCase(); + + Gauge.builder(baseName + ".count", timer, Timer::getCount) + .tags(tags).register(registry); + + Gauge.builder(baseName + ".timer", timer, Timer::getCount) + .tags(tags).tag("rate", "1m").register(registry); + Gauge.builder(baseName + ".timer", timer, Timer::getCount) + .tags(tags).tag("rate", "5m").register(registry); + Gauge.builder(baseName + ".timer", timer, Timer::getCount) + .tags(tags).tag("rate", "15m").register(registry); + Gauge.builder(baseName + ".timer", timer, Timer::getCount) + .tags(tags).tag("rate", "mean").register(registry); + + } + + private static void registerMeter(String group, String name, com.codahale.metrics.Meter meter) { + List tags = new LinkedList<>(); + tags.add(handleDataTag); + tags.add(Tag.of("group", group)); + + name = refineMetrics(name, tags); + + String baseName = PREFIX + "." + name.toLowerCase(); + + Gauge.builder(baseName + ".count", meter, Meter::getCount) + .tags(tags) + .register(registry); + + Gauge.builder(baseName + ".rate", meter, Meter::getCount) + .tags(tags).tag("rate", "1m") + .register(registry); + Gauge.builder(baseName + ".rate", meter, Meter::getCount) + .tags(tags).tag("rate", "5m") + .register(registry); + Gauge.builder(baseName + ".rate", meter, Meter::getCount) + .tags(tags).tag("rate", "15m") + .register(registry); + Gauge.builder(baseName + ".rate", meter, Meter::getCount) + .tags(tags).tag("rate", "mean") + .register(registry); + + } + + private static void registerCounter(String group, String name, + com.codahale.metrics.Counter counter) { + List tags = new LinkedList<>(); + tags.add(handleDataTag); + tags.add(Tag.of("group", group)); + + name = refineMetrics(name, tags); + + name = name.toLowerCase(); + + //Adapted a counter to be a gauge. + Gauge.builder(PREFIX + "." + name + ".count", counter, Counter::getCount) + .tags(tags).register(registry); + } + + private static void registerGauge(String group, String name, + com.codahale.metrics.Gauge gauge) { + List tags = new LinkedList<>(); + tags.add(handleDataTag); + tags.add(Tag.of("group", group)); + + name = refineMetrics(name, tags); + + name = name.toLowerCase(); + + if (gauge.getValue() instanceof Number) { + Gauge.builder(PREFIX + "." + name, gauge, (g) -> ((Number) g.getValue()).doubleValue()) + .tags(tags).register(registry); + } else { + Gauge.builder(PREFIX + "." + name, () -> 1.0) + .tags(tags) + .tag("str.gauge", String.valueOf(gauge.getValue())).register(registry); + } + + } + + private static class HistogramWrapper { + private final com.codahale.metrics.Histogram histogram; + + private Snapshot snapshot; + private long ts = System.currentTimeMillis(); + + HistogramWrapper(com.codahale.metrics.Histogram histogram) { + this.histogram = histogram; + this.snapshot = this.histogram.getSnapshot(); + } + + Snapshot getSnapshot() { + if (System.currentTimeMillis() - this.ts > 30_000) { + this.snapshot = this.histogram.getSnapshot(); + this.ts = System.currentTimeMillis(); + } + return this.snapshot; + } + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/MetricsConfig.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/MetricsConfig.java new file mode 100644 index 0000000000..7062ee7eb8 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/MetricsConfig.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.metrics; + +import org.springframework.boot.actuate.autoconfigure.metrics.MeterRegistryCustomizer; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +import io.micrometer.core.instrument.MeterRegistry; + +/** + * 2021/11/24 + */ +@Configuration +public class MetricsConfig { + + @Bean + public MeterRegistryCustomizer metricsCommonTags() { + return (registry) -> registry.config().commonTags("hg", "store"); + } + + @Bean + public MeterRegistryCustomizer registerMeters() { + return (registry) -> { + StoreMetrics.init(registry); + RocksDBMetrics.init(registry); + JRaftMetrics.init(registry); + ProcfsMetrics.init(registry); + GRpcExMetrics.init(registry); + }; + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/MetricsUtil.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/MetricsUtil.java new file mode 100644 index 0000000000..62c966fbfd --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/MetricsUtil.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.metrics; + +import com.codahale.metrics.Counter; +import com.codahale.metrics.Gauge; +import com.codahale.metrics.Histogram; +import com.codahale.metrics.Meter; +import com.codahale.metrics.MetricRegistry; +import com.codahale.metrics.Timer; + +@Deprecated +public class MetricsUtil { + + private static final MetricRegistry registry = new MetricRegistry(); + + public static Gauge registerGauge(Class clazz, String name, + Gauge gauge) { + return registry.register(MetricRegistry.name(clazz, name), gauge); + } + + public static Counter registerCounter(Class clazz, String name) { + return registry.counter(MetricRegistry.name(clazz, name)); + } + + public static Histogram registerHistogram(Class clazz, String name) { + return registry.histogram(MetricRegistry.name(clazz, name)); + } + + public static Meter registerMeter(Class clazz, String name) { + return registry.meter(MetricRegistry.name(clazz, name)); + } + + public static Timer registerTimer(Class clazz, String name) { + return registry.timer(MetricRegistry.name(clazz, name)); + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsEntry.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsEntry.java new file mode 100644 index 0000000000..ed32d88df1 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsEntry.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hugegraph.store.node.metrics; + + +import static org.apache.hugegraph.store.node.metrics.ProcfsReader.ReadResult; + +import java.io.IOException; +import java.util.Collection; +import java.util.Objects; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +// TODO: refer license later, 83% match, maybe refer to metrics-jvm-extras (0.1.3) APL2.0 +abstract class ProcfsEntry { + + private static final Logger log = LoggerFactory.getLogger(ProcfsEntry.class); + + private final Object lock = new Object(); + + private final ProcfsReader reader; + + private long lastHandle = -1; + + protected ProcfsEntry(ProcfsReader reader) { + this.reader = Objects.requireNonNull(reader); + } + + protected final void collect() { + synchronized (lock) { + try { + final ReadResult result = reader.read(); + if (result != null && (lastHandle == -1 || lastHandle != result.getReadTime())) { + reset(); + handle(result.getLines()); + lastHandle = result.getReadTime(); + } + } catch (IOException e) { + reset(); + log.warn("Failed reading '" + reader.getEntryPath() + "'!", e); + } + } + } + + protected abstract void reset(); + + protected abstract void handle(Collection lines); + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsMetrics.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsMetrics.java new file mode 100644 index 0000000000..6a3f97f27f --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsMetrics.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.metrics; + + +import io.micrometer.core.instrument.Gauge; +import io.micrometer.core.instrument.MeterRegistry; + +/** + * 2022/3/1 + * + * @version 0.1.0 + */ +public class ProcfsMetrics { + + public final static String PREFIX = "process_memory"; + private final static ProcfsSmaps smaps = new ProcfsSmaps(); + private static MeterRegistry registry; + + private ProcfsMetrics() { + + } + + public synchronized static void init(MeterRegistry meterRegistry) { + if (registry == null) { + registry = meterRegistry; + registerMeters(); + } + } + + private static void registerMeters() { + registerProcessGauge(); + } + + private static void registerProcessGauge() { + Gauge.builder(PREFIX + ".rss.bytes", () -> smaps.get(ProcfsSmaps.KEY.RSS)) + .register(registry); + + Gauge.builder(PREFIX + ".pss.bytes", () -> smaps.get(ProcfsSmaps.KEY.PSS)) + .register(registry); + + Gauge.builder(PREFIX + ".vss.bytes", () -> smaps.get(ProcfsSmaps.KEY.VSS)) + .register(registry); + + Gauge.builder(PREFIX + ".swap.bytes", () -> smaps.get(ProcfsSmaps.KEY.SWAP)) + .register(registry); + + Gauge.builder(PREFIX + ".swappss.bytes", () -> smaps.get(ProcfsSmaps.KEY.SWAPPSS)) + .register(registry); + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsReader.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsReader.java new file mode 100644 index 0000000000..b378244f9f --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsReader.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hugegraph.store.node.metrics; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +// TODO: refer license later, 90% match, maybe refer to metrics-jvm-extras (0.1.3) APL2.0 +class ProcfsReader { + + /* default */ static final long CACHE_DURATION_MS = 100; + private static final Map instances = new HashMap<>(); + private static final Object instancesLock = new Object(); + private static final Map> data = new HashMap<>(); + private static final Object dataLock = new Object(); + private static final Path BASE = Paths.get("/proc", "self"); + private final Path entryPath; + private final boolean osSupport; + /* default */ long lastReadTime = -1; + + private ProcfsReader(String entry) { + this(BASE, entry, false); + } + + /* default */ ProcfsReader(Path base, String entry) { + this(base, entry, true); + } + + private ProcfsReader(Path base, String entry, boolean forceOSSupport) { + Objects.requireNonNull(base); + Objects.requireNonNull(entry); + + this.entryPath = base.resolve(entry); + + this.osSupport = forceOSSupport + || System.getProperty("os.name").toLowerCase(Locale.ENGLISH) + .startsWith("linux"); + } + + /* default */ + static ProcfsReader getInstance(String entry) { + Objects.requireNonNull(entry); + + synchronized (instancesLock) { + ProcfsReader reader = instances.get(entry); + if (reader == null) { + reader = new ProcfsReader(entry); + instances.put(entry, reader); + } + return reader; + } + } + + /* default */ Path getEntryPath() { + return entryPath; + } + + /* default */ ReadResult read() throws IOException { + return read(currentTime()); + } + + /* default */ ReadResult read(long currentTimeMillis) throws IOException { + synchronized (dataLock) { + final Path key = getEntryPath().getFileName(); + + final ReadResult readResult; + if (lastReadTime == -1 || lastReadTime + CACHE_DURATION_MS < currentTimeMillis) { + final List lines = readPath(entryPath); + cacheResult(key, lines); + lastReadTime = currentTime(); + readResult = new ReadResult(lines, lastReadTime); + } else { + readResult = new ReadResult(data.get(key), lastReadTime); + } + return readResult; + } + } + + /* default */ List readPath(Path path) throws IOException { + Objects.requireNonNull(path); + + if (!osSupport) { + return Collections.emptyList(); + } + return Files.readAllLines(path); + } + + /* default */ void cacheResult(Path key, List lines) { + Objects.requireNonNull(key); + Objects.requireNonNull(lines); + + data.put(key, lines); + } + + /* default */ long currentTime() { + return System.currentTimeMillis(); + } + + /* default */ static class ReadResult { + + private final List lines; + + private final long readTime; + + /* default */ ReadResult(List lines, long readTime) { + this.lines = Objects.requireNonNull(lines); + this.readTime = readTime; + } + + public long getReadTime() { + return readTime; + } + + public List getLines() { + return lines; + } + + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsSmaps.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsSmaps.java new file mode 100644 index 0000000000..69edf52de0 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsSmaps.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hugegraph.store.node.metrics; + +import java.util.Collection; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.LongUnaryOperator; + +// TODO: refer license later, 88% match, maybe refer to metrics-jvm-extras (0.1.0) APL2.0 +public class ProcfsSmaps extends ProcfsEntry { + + private static final int KILOBYTE = 1024; + private final Map values = new HashMap<>(); + + public ProcfsSmaps() { + super(ProcfsReader.getInstance("smaps")); + } + + /* default */ ProcfsSmaps(ProcfsReader reader) { + super(reader); + } + + private static long parseKiloBytes(String line) { + Objects.requireNonNull(line); + + return Long.parseLong(line.split("\\s+")[1]); + } + + @Override + protected void reset() { + EnumSet.allOf(KEY.class).forEach(key -> values.put(key, new AtomicLong(-1))); + } + + @Override + protected void handle(Collection lines) { + Objects.requireNonNull(lines); + + for (final String line : lines) { + if (line.startsWith("Size:")) { + inc(KEY.VSS, parseKiloBytes(line) * KILOBYTE); + } else if (line.startsWith("Rss:")) { + inc(KEY.RSS, parseKiloBytes(line) * KILOBYTE); + } else if (line.startsWith("Pss:")) { + inc(KEY.PSS, parseKiloBytes(line) * KILOBYTE); + } else if (line.startsWith("Swap:")) { + inc(KEY.SWAP, parseKiloBytes(line) * KILOBYTE); + } else if (line.startsWith("SwapPss:")) { + inc(KEY.SWAPPSS, parseKiloBytes(line) * KILOBYTE); + } + } + } + + public Long get(KEY key) { + Objects.requireNonNull(key); + + collect(); + return Long.valueOf(values.get(key).longValue()); + } + + private void inc(KEY key, long increment) { + Objects.requireNonNull(key); + + values.get(key).getAndUpdate(new LongUnaryOperator() { + + @Override + public long applyAsLong(long currentValue) { + return currentValue + increment + (currentValue == -1 ? 1 : 0); + } + + }); + } + + public enum KEY { + /** + * Virtual set size + */ + VSS, + /** + * Resident set size + */ + RSS, + /** + * Proportional set size + */ + PSS, + /** + * Paged out memory + */ + SWAP, + /** + * Paged out memory accounting shared pages. Since Linux 4.3. + */ + SWAPPSS + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetrics.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetrics.java new file mode 100644 index 0000000000..5575fc3b9a --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetrics.java @@ -0,0 +1,419 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.metrics; + +import static org.apache.hugegraph.store.node.metrics.RocksDBMetricsConst.HISTOGRAMS; +import static org.apache.hugegraph.store.node.metrics.RocksDBMetricsConst.LABELS; +import static org.apache.hugegraph.store.node.metrics.RocksDBMetricsConst.LABEL_50; +import static org.apache.hugegraph.store.node.metrics.RocksDBMetricsConst.LABEL_95; +import static org.apache.hugegraph.store.node.metrics.RocksDBMetricsConst.LABEL_99; +import static org.apache.hugegraph.store.node.metrics.RocksDBMetricsConst.PREFIX; +import static org.apache.hugegraph.store.node.metrics.RocksDBMetricsConst.TICKERS; + +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import org.apache.hugegraph.rocksdb.access.RocksDBFactory; +import org.apache.hugegraph.rocksdb.access.RocksDBSession; +import org.apache.hugegraph.store.HgStoreEngine; +import org.apache.hugegraph.store.node.util.HgAssert; +import org.rocksdb.HistogramData; +import org.rocksdb.HistogramType; +import org.rocksdb.MemoryUsageType; +import org.rocksdb.Statistics; +import org.rocksdb.TickerType; + +import io.micrometer.core.instrument.Gauge; +import io.micrometer.core.instrument.Meter; +import io.micrometer.core.instrument.MeterRegistry; +import lombok.extern.slf4j.Slf4j; + +/** + * 2021/12/30 + * + * @version 1.2.0 on 2022/03/22 added auto meter removing when graph has been closed. + */ +@Slf4j +public class RocksDBMetrics { + private final static RocksDBFactory rocksDBFactory = RocksDBFactory.getInstance(); + private final static AtomicInteger rocks = new AtomicInteger(0); + private final static Set graphSet = new HashSet<>(); + private final static HgStoreEngine storeEngine = HgStoreEngine.getInstance(); + private final static MemoryUseWrapper memoryUseWrapper = new MemoryUseWrapper(); + private final static Map statisticsHolder = new HashMap<>(); + private final static Map histogramHolder = new HashMap<>(); + private final static Map> graphMeterMap = new ConcurrentHashMap<>(); + private static MeterRegistry registry; + + private RocksDBMetrics() { + } + + public static void init(final MeterRegistry meterRegistry) { + HgAssert.isArgumentNotNull(meterRegistry, "meterRegistry"); + + if (registry != null) { + return; + + } + + registry = meterRegistry; + + Gauge.builder("rocks.num", RocksDBMetrics::updateRocks) + .description("Number of instance of RocksDB running in this node") + .register(registry); + + registerMemoryUse(); + } + + private static int updateRocks() { + int buf = getRocks(); + + if (buf != rocks.get()) { + rocks.set(buf); + registerMeter(); + } + + return buf; + } + + private static int getRocks() { + return rocksDBFactory.getSessionSize(); + } + + private static Set getGraphs() { + return rocksDBFactory.getGraphNames(); + } + + private static RocksDBSession getRocksDBSession(String graph) { + return rocksDBFactory.queryGraphDB(graph); + } + + + private static synchronized void registerMeter() { + Set graphs = getGraphs(); + + if (graphs == null) { + log.error( + "Failed to fetch the collection of names of graph, when invoking to register " + + "RocksDB gauge."); + return; + } + + graphs.forEach(g -> { + if (!graphSet.add(g)) { + return; + } + + StatisticsWrapper stats = new StatisticsWrapper(g); + statisticsHolder.put(g, stats); + + for (final TickerType ticker : TICKERS) { + String gaugeName = PREFIX + "." + ticker.name().toLowerCase(); + + saveGraphMeter(g, + Gauge.builder(gaugeName, () -> stats.getTickerCount(ticker)) + .description("RocksDB reported statistics for " + ticker.name()) + .tag("graph", g) + .register(registry) + ); + } + + for (final HistogramType histogram : HISTOGRAMS) { + registerHistogram(g, registry, histogram, stats); + } + + registrySessionRefNum(g); + + }); + + graphSet.removeAll(graphSet.stream().filter(g -> !graphs.contains(g)) + .peek(g -> removeMeters(g)) + .collect(Collectors.toList()) + ); + + } + + private static void saveGraphMeter(String g, Meter meter) { + graphMeterMap.computeIfAbsent(g, k -> new HashSet<>()).add(meter); + } + + private static void removeMeters(String g) { + graphMeterMap.getOrDefault(g, Collections.emptySet()).forEach(e -> registry.remove(e)); + } + + private static void registerHistogram(String graph, MeterRegistry registry, HistogramType + histogramType, StatisticsWrapper stats) { + + HistogramDataWrapper histogram = new HistogramDataWrapper(histogramType, + () -> stats.getHistogramData( + histogramType)); + histogramHolder.put(histogram, histogramType); + + String baseName = PREFIX + "." + histogramType.name().toLowerCase(); + saveGraphMeter(graph, + Gauge.builder(baseName + ".max", histogram, HistogramDataWrapper::getMax) + .tag("graph", graph).register(registry)); + saveGraphMeter(graph, Gauge.builder(baseName + ".mean", histogram, + HistogramDataWrapper::getAverage).tag("graph", graph) + .register(registry)); + saveGraphMeter(graph, + Gauge.builder(baseName + ".min", histogram, HistogramDataWrapper::getMin) + .tag("graph", graph).register(registry)); + + baseName = baseName + ".summary"; + saveGraphMeter(graph, Gauge.builder(baseName, histogram, HistogramDataWrapper::getMedian) + .tags("graph", graph, LABELS, LABEL_50).register(registry)); + saveGraphMeter(graph, + Gauge.builder(baseName, histogram, HistogramDataWrapper::getPercentile95) + .tags("graph", graph, LABELS, LABEL_95).register(registry)); + saveGraphMeter(graph, + Gauge.builder(baseName, histogram, HistogramDataWrapper::getPercentile99) + .tags("graph", graph, LABELS, LABEL_99).register(registry)); + saveGraphMeter(graph, + Gauge.builder(baseName + ".sum", histogram, HistogramDataWrapper::getSum) + .tags("graph", graph).register(registry)); + saveGraphMeter(graph, + Gauge.builder(baseName + ".count", histogram, HistogramDataWrapper::getCount) + .tags("graph", graph).register(registry)); + + } + + private static void registerMemoryUse() { + Gauge.builder(PREFIX + ".table.reader.total", memoryUseWrapper, + (e) -> e.getTableReaderTotal()) + .description("The current number of threads in the pool.") + .register(registry); + Gauge.builder(PREFIX + ".mem.table.total", memoryUseWrapper, (e) -> e.getMemTableTotal()) + .description("The current number of threads in the pool.") + .register(registry); + Gauge.builder(PREFIX + ".mem.table.unFlushed", memoryUseWrapper, + (e) -> e.getMemTableUnFlushed()) + .description("The current number of threads in the pool.") + .register(registry); + Gauge.builder(PREFIX + ".cache.total", memoryUseWrapper, (e) -> e.getCacheTotal()) + .description("The current number of threads in the pool.") + .register(registry); + Gauge.builder(PREFIX + ".block.cache.pinned-usage", memoryUseWrapper, + (e) -> e.getProperty("rocksdb.block-cache-pinned-usage")) + .description("The current number of threads in the pool.") + .register(registry); + + } + + private static void registrySessionRefNum(String graph) { + + SessionWrapper sessionWrapper = new SessionWrapper(graph); + saveGraphMeter(graph, + Gauge.builder(PREFIX + ".session.ref.count", sessionWrapper, + (e) -> e.getRefCount() - 1) + .description("The current amount of reference of session") + .tag("ref", "self").tag("graph", graph) + .strongReference(true) + .register(registry) + ); + + } + + private static T getValue(S stat, Function fun, T defaultValue) { + if (stat == null) { + return defaultValue; + } + return fun.apply(stat); + } + + private static class SessionWrapper { + + private final String graphName; + + SessionWrapper(String graph) { + + this.graphName = graph; + } + + public int getRefCount() { + try (RocksDBSession session = getRocksDBSession(graphName)) { + if (session != null) { + return getValue(session, e -> e.getRefCount(), -1); + } + return 0; + } + } + } + + private static class MemoryUseWrapper { + Map mems = null; + long lastTime = 0; + + private void loadData() { + if (mems == null || System.currentTimeMillis() - lastTime > 30000) { + mems = storeEngine.getBusinessHandler().getApproximateMemoryUsageByType(null); + lastTime = System.currentTimeMillis(); + } + } + + public Long getTableReaderTotal() { + loadData(); + return mems.get(MemoryUsageType.kTableReadersTotal); + } + + public Long getMemTableTotal() { + loadData(); + return mems.get(MemoryUsageType.kMemTableTotal); + } + + public Long getCacheTotal() { + loadData(); + return mems.get(MemoryUsageType.kCacheTotal); + } + + public Long getMemTableUnFlushed() { + loadData(); + return mems.get(MemoryUsageType.kMemTableUnFlushed); + } + + public Long getProperty(String property) { + Set graphs = rocksDBFactory.getGraphNames(); + if (graphs.size() > 0) { + try (RocksDBSession session = getRocksDBSession((String) graphs.toArray()[0])) { + if (session != null) { + return Long.parseLong(session.getProperty(property)); + } + } + } + return null; + } + } + + private static class StatisticsWrapper { + + private final String graphName; + private final Map tickerCounteMap = new ConcurrentHashMap<>(); + private final Map histogramDataMap = + new ConcurrentHashMap<>(); + long lastTime = 0; + + StatisticsWrapper(String graph) { + + this.graphName = graph; + loadData(); + + } + + private void loadData() { + if (System.currentTimeMillis() - lastTime < 30000) { + return; + } + lastTime = System.currentTimeMillis(); + try (RocksDBSession session = getRocksDBSession(graphName)) { + if (session == null) { + // log.error("Failed to fetch the RocksDBSession with graph's name: [ " + + // graph + " ]"); + return; + } + + Statistics statistics = session.getRocksDbStats(); + for (final TickerType ticker : TICKERS) { + tickerCounteMap.put(ticker, statistics.getTickerCount(ticker)); + } + + for (final HistogramType histogram : HISTOGRAMS) { + histogramDataMap.put(histogram, statistics.getHistogramData(histogram)); + } + } + } + + public long getTickerCount(TickerType tickerType) { + this.loadData(); + return tickerCounteMap.containsKey(tickerType) ? tickerCounteMap.get(tickerType) : 0; + } + + public HistogramData getHistogramData(HistogramType histogramType) { + this.loadData(); + return histogramDataMap.get(histogramType); + } + + } + + private static class HistogramDataWrapper { + private final Supplier supplier; + private final HistogramType histogramType; + private HistogramData data = new HistogramData(0d, 0d, 0d, 0d, 0d); + private long ts = System.currentTimeMillis() - 30_000; + + HistogramDataWrapper(HistogramType histogramType, Supplier supplier) { + this.supplier = supplier; + this.histogramType = histogramType; + } + + private HistogramData getData() { + if (System.currentTimeMillis() - this.ts > 30_000) { + HistogramData buf = this.supplier.get(); + if (buf != null) { + this.data = buf; + this.ts = System.currentTimeMillis(); + } + } + return this.data; + } + + public double getMedian() { + return getData().getMedian(); + } + + public double getPercentile95() { + return getData().getPercentile95(); + } + + public double getPercentile99() { + return getData().getPercentile99(); + } + + public double getAverage() { + return getData().getAverage(); + } + + public double getStandardDeviation() { + return getData().getStandardDeviation(); + } + + public double getMax() { + return getData().getMax(); + } + + public long getCount() { + return getData().getCount(); + } + + public long getSum() { + return getData().getSum(); + } + + public double getMin() { + return getData().getMin(); + } + + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetricsConst.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetricsConst.java new file mode 100644 index 0000000000..79c009a700 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetricsConst.java @@ -0,0 +1,165 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.metrics; + +import org.rocksdb.HistogramType; +import org.rocksdb.TickerType; + +/** + * TODO: refer license later, 80% match, maybe refer to pantheon, This file need refactor! + */ +public final class RocksDBMetricsConst { + public static final String PREFIX = "rocks.stats"; + public static final String LABELS = "quantile"; + public static final String LABEL_50 = "0.5"; + public static final String LABEL_95 = "0.95"; + public static final String LABEL_99 = "0.99"; + + // Tickers - RocksDB equivalent of counters + public static final TickerType[] TICKERS = { + TickerType.BLOCK_CACHE_ADD, + TickerType.BLOCK_CACHE_HIT, + TickerType.BLOCK_CACHE_ADD_FAILURES, + TickerType.BLOCK_CACHE_INDEX_MISS, + TickerType.BLOCK_CACHE_INDEX_HIT, + TickerType.BLOCK_CACHE_INDEX_ADD, + TickerType.BLOCK_CACHE_INDEX_BYTES_INSERT, + TickerType.BLOCK_CACHE_INDEX_BYTES_EVICT, + TickerType.BLOCK_CACHE_FILTER_MISS, + TickerType.BLOCK_CACHE_FILTER_HIT, + TickerType.BLOCK_CACHE_FILTER_ADD, + TickerType.BLOCK_CACHE_FILTER_BYTES_INSERT, + TickerType.BLOCK_CACHE_FILTER_BYTES_EVICT, + TickerType.BLOCK_CACHE_DATA_MISS, + TickerType.BLOCK_CACHE_DATA_HIT, + TickerType.BLOCK_CACHE_DATA_ADD, + TickerType.BLOCK_CACHE_DATA_BYTES_INSERT, + TickerType.BLOCK_CACHE_BYTES_READ, + TickerType.BLOCK_CACHE_BYTES_WRITE, + TickerType.BLOOM_FILTER_USEFUL, + TickerType.PERSISTENT_CACHE_HIT, + TickerType.PERSISTENT_CACHE_MISS, + TickerType.SIM_BLOCK_CACHE_HIT, + TickerType.SIM_BLOCK_CACHE_MISS, + TickerType.MEMTABLE_HIT, + TickerType.MEMTABLE_MISS, + TickerType.GET_HIT_L0, + TickerType.GET_HIT_L1, + TickerType.GET_HIT_L2_AND_UP, + TickerType.COMPACTION_KEY_DROP_NEWER_ENTRY, + TickerType.COMPACTION_KEY_DROP_OBSOLETE, + TickerType.COMPACTION_KEY_DROP_RANGE_DEL, + TickerType.COMPACTION_KEY_DROP_USER, + TickerType.COMPACTION_RANGE_DEL_DROP_OBSOLETE, + TickerType.NUMBER_KEYS_WRITTEN, + TickerType.NUMBER_KEYS_READ, + TickerType.NUMBER_KEYS_UPDATED, + TickerType.BYTES_WRITTEN, + TickerType.BYTES_READ, + TickerType.NUMBER_DB_SEEK, + TickerType.NUMBER_DB_NEXT, + TickerType.NUMBER_DB_PREV, + TickerType.NUMBER_DB_SEEK_FOUND, + TickerType.NUMBER_DB_NEXT_FOUND, + TickerType.NUMBER_DB_PREV_FOUND, + TickerType.ITER_BYTES_READ, + TickerType.NO_FILE_CLOSES, + TickerType.NO_FILE_OPENS, + TickerType.NO_FILE_ERRORS, + // TickerType.STALL_L0_SLOWDOWN_MICROS, + // TickerType.STALL_MEMTABLE_COMPACTION_MICROS, + // TickerType.STALL_L0_NUM_FILES_MICROS, + TickerType.STALL_MICROS, + TickerType.DB_MUTEX_WAIT_MICROS, + TickerType.RATE_LIMIT_DELAY_MILLIS, + TickerType.NO_ITERATORS, + TickerType.NUMBER_MULTIGET_BYTES_READ, + TickerType.NUMBER_MULTIGET_KEYS_READ, + TickerType.NUMBER_MULTIGET_CALLS, + TickerType.NUMBER_FILTERED_DELETES, + TickerType.NUMBER_MERGE_FAILURES, + TickerType.BLOOM_FILTER_PREFIX_CHECKED, + TickerType.BLOOM_FILTER_PREFIX_USEFUL, + TickerType.NUMBER_OF_RESEEKS_IN_ITERATION, + TickerType.GET_UPDATES_SINCE_CALLS, + TickerType.BLOCK_CACHE_COMPRESSED_MISS, + TickerType.BLOCK_CACHE_COMPRESSED_HIT, + TickerType.BLOCK_CACHE_COMPRESSED_ADD, + TickerType.BLOCK_CACHE_COMPRESSED_ADD_FAILURES, + TickerType.WAL_FILE_SYNCED, + TickerType.WAL_FILE_BYTES, + TickerType.WRITE_DONE_BY_SELF, + TickerType.WRITE_DONE_BY_OTHER, + TickerType.WRITE_TIMEDOUT, + TickerType.WRITE_WITH_WAL, + TickerType.COMPACT_READ_BYTES, + TickerType.COMPACT_WRITE_BYTES, + TickerType.FLUSH_WRITE_BYTES, + TickerType.NUMBER_DIRECT_LOAD_TABLE_PROPERTIES, + TickerType.NUMBER_SUPERVERSION_ACQUIRES, + TickerType.NUMBER_SUPERVERSION_RELEASES, + TickerType.NUMBER_SUPERVERSION_CLEANUPS, + TickerType.NUMBER_BLOCK_COMPRESSED, + TickerType.NUMBER_BLOCK_DECOMPRESSED, + TickerType.NUMBER_BLOCK_NOT_COMPRESSED, + TickerType.MERGE_OPERATION_TOTAL_TIME, + TickerType.FILTER_OPERATION_TOTAL_TIME, + TickerType.ROW_CACHE_HIT, + TickerType.ROW_CACHE_MISS, + TickerType.READ_AMP_ESTIMATE_USEFUL_BYTES, + TickerType.READ_AMP_TOTAL_READ_BYTES, + TickerType.NUMBER_RATE_LIMITER_DRAINS, + TickerType.NUMBER_ITER_SKIP, + TickerType.NUMBER_MULTIGET_KEYS_FOUND, + }; + + // Histograms - treated as prometheus summaries + public static final HistogramType[] HISTOGRAMS = { + HistogramType.DB_GET, + HistogramType.DB_WRITE, + HistogramType.COMPACTION_TIME, + HistogramType.SUBCOMPACTION_SETUP_TIME, + HistogramType.TABLE_SYNC_MICROS, + HistogramType.COMPACTION_OUTFILE_SYNC_MICROS, + HistogramType.WAL_FILE_SYNC_MICROS, + HistogramType.MANIFEST_FILE_SYNC_MICROS, + HistogramType.TABLE_OPEN_IO_MICROS, + HistogramType.DB_MULTIGET, + HistogramType.READ_BLOCK_COMPACTION_MICROS, + HistogramType.READ_BLOCK_GET_MICROS, + HistogramType.WRITE_RAW_BLOCK_MICROS, + HistogramType.STALL_L0_SLOWDOWN_COUNT, + HistogramType.STALL_MEMTABLE_COMPACTION_COUNT, + HistogramType.STALL_L0_NUM_FILES_COUNT, + HistogramType.HARD_RATE_LIMIT_DELAY_COUNT, + HistogramType.SOFT_RATE_LIMIT_DELAY_COUNT, + HistogramType.NUM_FILES_IN_SINGLE_COMPACTION, + HistogramType.DB_SEEK, + HistogramType.WRITE_STALL, + HistogramType.SST_READ_MICROS, + HistogramType.NUM_SUBCOMPACTIONS_SCHEDULED, + HistogramType.BYTES_PER_READ, + HistogramType.BYTES_PER_WRITE, + HistogramType.BYTES_PER_MULTIGET, + HistogramType.BYTES_COMPRESSED, + HistogramType.BYTES_DECOMPRESSED, + HistogramType.COMPRESSION_TIMES_NANOS, + HistogramType.DECOMPRESSION_TIMES_NANOS, + HistogramType.READ_NUM_MERGE_OPERANDS, + }; +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/StoreMetrics.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/StoreMetrics.java new file mode 100644 index 0000000000..9fcdbc685e --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/StoreMetrics.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.metrics; + + +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; + +import org.apache.hugegraph.store.HgStoreEngine; +import org.apache.hugegraph.store.meta.Partition; + +import io.micrometer.core.instrument.Gauge; +import io.micrometer.core.instrument.MeterRegistry; + +/** + * 2021/12/28 + */ +public final class StoreMetrics { + public final static String PREFIX = "hg"; + private final static HgStoreEngine storeEngine = HgStoreEngine.getInstance(); + private final static AtomicInteger graphs = new AtomicInteger(0); + private static MeterRegistry registry; + + private StoreMetrics() { + } + + public synchronized static void init(MeterRegistry meterRegistry) { + if (registry == null) { + registry = meterRegistry; + registerMeters(); + } + } + + private static void registerMeters() { + Gauge.builder(PREFIX + ".up", () -> 1).register(registry); + Gauge.builder(PREFIX + ".graphs", StoreMetrics::updateGraphs) + .description("Number of graphs stored in this node") + .register(registry); + + } + + private static int getGraphs() { + return getGraphPartitions().size(); + } + + private static int updateGraphs() { + int buf = getGraphs(); + if (buf != graphs.get()) { + graphs.set(buf); + registerPartitionGauge(); + } + return buf; + } + + private static void registerPartitionGauge() { + Map> map = getGraphPartitions(); + + map.forEach((k, v) -> Gauge.builder(PREFIX + ".partitions", new PartitionsGetter(k)) + .description("Number of partitions stored in the node") + .tag("graph", k) + .register(registry)); + + } + + private static int getPartitions(String graph) { + Map map = getGraphPartitions().get(graph); + if (map == null) { + return 0; + } else { + return map.size(); + } + } + + private static Map> getGraphPartitions() { + Map> map = + storeEngine.getPartitionManager().getPartitions(); + if (map == null) { + return Collections.emptyMap(); + } + return map; + } + + private static class PartitionsGetter implements Supplier { + private final String graph; + + PartitionsGetter(String graph) { + this.graph = graph; + } + + @Override + public Number get() { + return getPartitions(this.graph); + } + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/SystemMetrics.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/SystemMetrics.java new file mode 100644 index 0000000000..1935c90c05 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/SystemMetrics.java @@ -0,0 +1,140 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.metrics; + +import java.lang.management.ClassLoadingMXBean; +import java.lang.management.GarbageCollectorMXBean; +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryUsage; +import java.lang.management.ThreadMXBean; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.util.Bytes; + +@Deprecated +public class SystemMetrics { + + private static final long MB = Bytes.MB; + + private static long totalNonHeapMemory() { + try { + return ManagementFactory.getMemoryMXBean() + .getNonHeapMemoryUsage() + .getCommitted(); + } catch (Throwable ignored) { + return 0; + } + } + + private static String formatName(String name) { + return StringUtils.replace(name, " ", "_").toLowerCase(); + } + + public Map> metrics() { + Map> metrics = new LinkedHashMap<>(); + metrics.put("basic", this.getBasicMetrics()); + metrics.put("heap", this.getHeapMetrics()); + metrics.put("nonheap", this.getNonHeapMetrics()); + metrics.put("thread", this.getThreadMetrics()); + metrics.put("class_loading", this.getClassLoadingMetrics()); + metrics.put("garbage_collector", this.getGarbageCollectionMetrics()); + + return metrics; + } + + private Map getBasicMetrics() { + Map metrics = new LinkedHashMap<>(); + Runtime runtime = Runtime.getRuntime(); + // Heap allocated memory (measured in bytes) + long total = runtime.totalMemory(); + // Heap free memory + long free = runtime.freeMemory(); + long used = total - free; + + metrics.put("mem", (total + totalNonHeapMemory()) / MB); + metrics.put("mem_total", total / MB); + metrics.put("mem_used", used / MB); + metrics.put("mem_free", free / MB); + metrics.put("mem_unit", "MB"); + metrics.put("processors", runtime.availableProcessors()); + metrics.put("uptime", ManagementFactory.getRuntimeMXBean().getUptime()); + metrics.put("systemload_average", + ManagementFactory.getOperatingSystemMXBean() + .getSystemLoadAverage()); + return metrics; + } + + private Map getHeapMetrics() { + Map metrics = new LinkedHashMap<>(); + MemoryUsage memoryUsage = ManagementFactory.getMemoryMXBean() + .getHeapMemoryUsage(); + metrics.put("committed", memoryUsage.getCommitted() / MB); + metrics.put("init", memoryUsage.getInit() / MB); + metrics.put("used", memoryUsage.getUsed() / MB); + metrics.put("max", memoryUsage.getMax() / MB); + return metrics; + } + + private Map getNonHeapMetrics() { + Map metrics = new LinkedHashMap<>(); + MemoryUsage memoryUsage = ManagementFactory.getMemoryMXBean() + .getNonHeapMemoryUsage(); + metrics.put("committed", memoryUsage.getCommitted() / MB); + metrics.put("init", memoryUsage.getInit() / MB); + metrics.put("used", memoryUsage.getUsed() / MB); + metrics.put("max", memoryUsage.getMax() / MB); + return metrics; + } + + private Map getThreadMetrics() { + Map metrics = new LinkedHashMap<>(); + ThreadMXBean threadMxBean = ManagementFactory.getThreadMXBean(); + metrics.put("peak", threadMxBean.getPeakThreadCount()); + metrics.put("daemon", threadMxBean.getDaemonThreadCount()); + metrics.put("total_started", threadMxBean.getTotalStartedThreadCount()); + metrics.put("count", threadMxBean.getThreadCount()); + return metrics; + } + + private Map getClassLoadingMetrics() { + Map metrics = new LinkedHashMap<>(); + ClassLoadingMXBean classLoadingMxBean = ManagementFactory + .getClassLoadingMXBean(); + metrics.put("count", classLoadingMxBean.getLoadedClassCount()); + metrics.put("loaded", classLoadingMxBean.getTotalLoadedClassCount()); + metrics.put("unloaded", classLoadingMxBean.getUnloadedClassCount()); + return metrics; + } + + private Map getGarbageCollectionMetrics() { + Map metrics = new LinkedHashMap<>(); + List gcMxBeans = ManagementFactory + .getGarbageCollectorMXBeans(); + for (GarbageCollectorMXBean gcMxBean : gcMxBeans) { + String name = formatName(gcMxBean.getName()); + metrics.put(name + "_count", gcMxBean.getCollectionCount()); + metrics.put(name + "_time", gcMxBean.getCollectionTime()); + } + metrics.put("time_unit", "ms"); + return metrics; + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/model/HgNodeStatus.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/model/HgNodeStatus.java new file mode 100644 index 0000000000..6f70e2d48d --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/model/HgNodeStatus.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.model; + +import java.util.Objects; + +/** + * created on 2021/11/1 + */ +public class HgNodeStatus { + private int status; + private String text; + + public HgNodeStatus(int status, String text) { + this.status = status; + this.text = text; + } + + public int getStatus() { + return status; + } + + public HgNodeStatus setStatus(int status) { + this.status = status; + return this; + } + + public String getText() { + return text; + } + + public HgNodeStatus setText(String text) { + this.text = text; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + HgNodeStatus that = (HgNodeStatus) o; + return status == that.status && Objects.equals(text, that.text); + } + + @Override + public int hashCode() { + return Objects.hash(status, text); + } + + @Override + public String toString() { + return "HgNodeStatus{" + + "status=" + status + + ", text='" + text + '\'' + + '}'; + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Base58.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Base58.java new file mode 100644 index 0000000000..12382e1358 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Base58.java @@ -0,0 +1,172 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.util; + +import java.math.BigInteger; +import java.nio.charset.StandardCharsets; + +/** + * TODO: refer license later, 78% match, maybe refer to google? ensure it later + */ +public class Base58 { + + public static final char[] ALPHABET = + "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz".toCharArray(); + private static final int[] INDEXES = new int[128]; + + static { + for (int i = 0; i < INDEXES.length; i++) { + INDEXES[i] = -1; + } + for (int i = 0; i < ALPHABET.length; i++) { + INDEXES[ALPHABET[i]] = i; + } + } + + /** + * Encodes the given bytes in base58. No checksum is appended. + */ + public static String encode(byte[] input) { + if (input.length == 0) { + return ""; + } + input = copyOfRange(input, 0, input.length); + // Count leading zeroes. + int zeroCount = 0; + while (zeroCount < input.length && input[zeroCount] == 0) { + ++zeroCount; + } + // The actual encoding. + byte[] temp = new byte[input.length * 2]; + int j = temp.length; + + int startAt = zeroCount; + while (startAt < input.length) { + byte mod = divmod58(input, startAt); + if (input[startAt] == 0) { + ++startAt; + } + temp[--j] = (byte) ALPHABET[mod]; + } + + // Strip extra '1' if there are some after decoding. + while (j < temp.length && temp[j] == ALPHABET[0]) { + ++j; + } + // Add as many leading '1' as there were leading zeros. + while (--zeroCount >= 0) { + temp[--j] = (byte) ALPHABET[0]; + } + + byte[] output = copyOfRange(temp, j, temp.length); + return new String(output, StandardCharsets.US_ASCII); + } + + public static byte[] decode(String input) throws IllegalArgumentException { + if (input.length() == 0) { + return new byte[0]; + } + byte[] input58 = new byte[input.length()]; + // Transform the String to a base58 byte sequence + for (int i = 0; i < input.length(); ++i) { + char c = input.charAt(i); + + int digit58 = -1; + if (c >= 0 && c < 128) { + digit58 = INDEXES[c]; + } + if (digit58 < 0) { + throw new IllegalArgumentException("Illegal character " + c + " at " + i); + } + + input58[i] = (byte) digit58; + } + // Count leading zeroes + int zeroCount = 0; + while (zeroCount < input58.length && input58[zeroCount] == 0) { + ++zeroCount; + } + // The encoding + byte[] temp = new byte[input.length()]; + int j = temp.length; + + int startAt = zeroCount; + while (startAt < input58.length) { + byte mod = divmod256(input58, startAt); + if (input58[startAt] == 0) { + ++startAt; + } + + temp[--j] = mod; + } + // Do no add extra leading zeroes, move j to first non null byte. + while (j < temp.length && temp[j] == 0) { + ++j; + } + + return copyOfRange(temp, j - zeroCount, temp.length); + } + + public static BigInteger decodeToBigInteger(String input) throws IllegalArgumentException { + return new BigInteger(1, decode(input)); + } + + // + // number -> number / 58, returns number % 58 + // + private static byte divmod58(byte[] number, int startAt) { + int remainder = 0; + for (int i = startAt; i < number.length; i++) { + int digit256 = (int) number[i] & 0xFF; + int temp = remainder * 256 + digit256; + + number[i] = (byte) (temp / 58); + + remainder = temp % 58; + } + + return (byte) remainder; + } + + // + // number -> number / 256, returns number % 256 + // + private static byte divmod256(byte[] number58, int startAt) { + int remainder = 0; + for (int i = startAt; i < number58.length; i++) { + int digit58 = (int) number58[i] & 0xFF; + int temp = remainder * 58 + digit58; + + number58[i] = (byte) (temp / 256); + + remainder = temp % 256; + } + + return (byte) remainder; + } + + private static byte[] copyOfRange(byte[] source, int from, int to) { + byte[] range = new byte[to - from]; + System.arraycopy(source, from, range, 0, range.length); + + return range; + } + + +} + diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Err.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Err.java new file mode 100644 index 0000000000..c36b658de5 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Err.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.util; + + +class Err { + private final String msg; + + private Err(String msg) { + this.msg = msg; + } + + public static Err of(String msg) { + return new Err(msg); + } + + @Override + public String toString() { + return "Err{" + + "msg='" + msg + '\'' + + '}'; + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgAssert.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgAssert.java new file mode 100644 index 0000000000..a6dbff59fc --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgAssert.java @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.util; + +import java.util.Collection; +import java.util.Map; +import java.util.function.Supplier; + + +public final class HgAssert { + @Deprecated + public static void isTrue(boolean expression, String message) { + if (message == null) { + throw new IllegalArgumentException("message is null"); + } + if (!expression) { + throw new IllegalArgumentException(message); + } + } + + public static void isTrue(boolean expression, Supplier msg) { + if (msg == null) { + throw new IllegalArgumentException("message supplier is null"); + } + if (!expression) { + throw new IllegalArgumentException(msg.get()); + } + } + + @Deprecated + public static void isTrue(boolean expression, RuntimeException e) { + if (e == null) { + throw new IllegalArgumentException("e is null"); + } + if (!expression) { + throw e; + } + } + + public static void isFalse(boolean expression, String message) { + isTrue(!expression, message); + } + + public static void isFalse(boolean expression, Supplier msg) { + isTrue(!expression, msg); + } + + public static void isFalse(boolean expression, RuntimeException e) { + isTrue(!expression, e); + } + + public static void isArgumentValid(byte[] bytes, String parameter) { + isFalse(isInvalid(bytes), () -> "The argument is invalid: " + parameter); + } + + public static void isArgumentValid(String str, String parameter) { + isFalse(isInvalid(str), () -> "The argument is invalid: " + parameter); + } + + public static void isArgumentNotNull(Object obj, String parameter) { + isTrue(obj != null, () -> "The argument is null: " + parameter); + } + + public static void istValid(byte[] bytes, String msg) { + isFalse(isInvalid(bytes), msg); + } + + public static void isValid(String str, String msg) { + isFalse(isInvalid(str), msg); + } + + public static void isNotNull(Object obj, String msg) { + isTrue(obj != null, msg); + } + + public static boolean isContains(Object[] objs, Object obj) { + if (objs == null || objs.length == 0 || obj == null) { + return false; + } + for (Object item : objs) { + if (obj.equals(item)) { + return true; + } + } + return false; + } + + public static boolean isInvalid(String... strs) { + if (strs == null || strs.length == 0) { + return true; + } + for (String item : strs) { + if (item == null || "".equals(item.trim())) { + return true; + } + } + return false; + } + + public static boolean isInvalid(byte[] bytes) { + return bytes == null || bytes.length == 0; + } + + public static boolean isInvalid(Map map) { + return map == null || map.isEmpty(); + } + + public static boolean isInvalid(Collection list) { + return list == null || list.isEmpty(); + } + + public static boolean isContains(Collection list, T item) { + if (list == null || item == null) { + return false; + } + return list.contains(item); + } + + public static boolean isNull(Object... objs) { + if (objs == null) { + return true; + } + for (Object item : objs) { + if (item == null) { + return true; + } + } + return false; + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgBufferProxy.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgBufferProxy.java new file mode 100644 index 0000000000..580989a7ed --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgBufferProxy.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.util; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.Executor; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import javax.annotation.CheckForNull; + +import lombok.extern.slf4j.Slf4j; + +/** + * 2022/3/15 + * + * @version 0.2.0 + */ +@Slf4j +public final class HgBufferProxy { + private final BlockingQueue> queue; + private final AtomicBoolean closed = new AtomicBoolean(false); + private final ReentrantLock lock = new ReentrantLock(); + private final Runnable applier; + private final Executor executor; + + private HgBufferProxy(Executor executor, Runnable applier) { + this.executor = executor; + this.applier = applier; + this.queue = new LinkedBlockingQueue<>(); + } + + public static HgBufferProxy of(Executor executor, Runnable applier) { + HgAssert.isArgumentNotNull(applier, "applier"); + HgAssert.isArgumentNotNull(executor, "executor"); + return new HgBufferProxy(executor, applier); + } + + public void send(T t) { + if (t == null) { + throw new IllegalArgumentException("the argument t is null"); + } + + if (this.closed.get()) { + log.warn("the proxy has been closed"); + return; + } + + this.lock.lock(); + try { + this.queue.offer(() -> t); + } finally { + lock.unlock(); + } + } + + /** + * return an item from the chan + * + * @throws RuntimeException when fail to receive an item. + */ + @CheckForNull + public T receive(int timeoutSeconds) { + return receive(timeoutSeconds, (timeout) -> { + throw new RuntimeException("Timeout, max time: " + timeout + " seconds;"); + }); + } + + private void apply() { + this.lock.lock(); + try { + if (!this.closed.get()) { + this.executor.execute(this.applier); + Thread.yield(); + } + } finally { + this.lock.unlock(); + } + + } + + /** + * return an item from the chan + * + * @return null when the chan has been closed + * @throws RuntimeException + */ + @CheckForNull + public T receive(int timeoutSeconds, Consumer timeoutCallBack) { + Supplier s = null; + + if (this.closed.get()) { + s = this.queue.poll(); + return s != null ? s.get() : null; + } + + if (this.queue.size() <= 1) { + this.apply(); + } + + lock.lock(); + try { + if (this.isClosed()) { + s = this.queue.poll(); + return s != null ? s.get() : null; + } + } finally { + lock.unlock(); + } + + try { + s = this.queue.poll(timeoutSeconds, TimeUnit.SECONDS); + } catch (Throwable t) { + log.error("Failed to receive a item from chan. cause by: ", t); + throw new RuntimeException(t); + } + + if (s == null) { + if (this.closed.get()) { + s = this.queue.poll(); + } else { + if (timeoutCallBack == null) { + throw new RuntimeException( + "Timeout, max time: " + timeoutSeconds + " seconds;"); + } else { + timeoutCallBack.accept(timeoutSeconds); + } + } + } + + return s != null ? s.get() : null; + + } + + public boolean isClosed() { + return this.closed.get(); + } + + /** + * @throws RuntimeException when fail to close the chan + */ + public void close() { + if (this.closed.get()) { + return; + } + lock.lock(); + this.closed.set(true); + try { + this.queue.offer(() -> null); + } finally { + lock.unlock(); + } + + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgChannel.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgChannel.java new file mode 100644 index 0000000000..722ff90ae8 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgChannel.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.util; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import javax.annotation.CheckForNull; + +import lombok.extern.slf4j.Slf4j; + +/** + * Golang style channel without buffering + *

+ * 2022/2/28 + * + * @version 1.1 on 2022/04/02 + */ +@Slf4j +public final class HgChannel { + + private final BlockingQueue> queue; + private final AtomicBoolean closed = new AtomicBoolean(false); + private final long timeoutSeconds; + + private HgChannel(long timeoutSeconds) { + this.timeoutSeconds = timeoutSeconds; + this.queue = new SynchronousQueue(); + } + + public static HgChannel of() { + return new HgChannel(Long.MAX_VALUE); + } + + public static HgChannel of(long timeoutSeconds) { + return new HgChannel(timeoutSeconds); + } + + /** + * @param t + * @return true if send successfully, false when it is timeout. + * @throws IllegalArgumentException when the argument t is null. + * @throws IllegalStateException when the chan has been closed. + * @throws RuntimeException when InterruptedException happen + */ + public boolean send(T t) { + if (t == null) { + throw new IllegalArgumentException("the argument t is null"); + } + synchronized (this.queue) { + if (this.closed.get()) { + return false; + } + boolean flag; + try { + flag = this.queue.offer(() -> t, timeoutSeconds, TimeUnit.SECONDS); + } catch (InterruptedException e) { + log.error("failed to send a item to chan. cause by: ", t); + throw new RuntimeException(e); + } + return flag; + } + } + + + /** + * return an item from the chan + * + * @throws RuntimeException when fail to receive an item. + */ + @CheckForNull + public T receive() { + return receive((timeout) -> { + throw new RuntimeException("Timeout, max time: " + timeout + " seconds;"); + }); + } + + /** + * return an item from the chan + * + * @return null when the chan has been closed + * @throws RuntimeException + */ + @CheckForNull + public T receive(Consumer timeoutCallBack) { + Supplier s; + synchronized (this.closed) { + if (this.closed.get()) { + s = this.queue.poll(); + } else { + try { + s = this.queue.poll(timeoutSeconds, TimeUnit.SECONDS); + } catch (Throwable t) { + log.error("Failed to receive a item from chan. cause by: ", t); + throw new RuntimeException(t); + } + if (s == null) { + if (timeoutCallBack == null) { + throw new RuntimeException( + "Timeout, max time: " + timeoutSeconds + " seconds;"); + } else { + timeoutCallBack.accept(timeoutSeconds); + } + } + } + } + if (s == null) { + return null; + } else { + return s.get(); + } + } + + public boolean isClosed() { + return this.closed.get(); + } + + /** + * @throws RuntimeException when fail to close the chan + */ + public void close() { + if (this.closed.get()) { + return; + } + this.closed.set(true); + this.queue.offer(() -> null); + Thread.yield(); + this.queue.poll(); + + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgExecutorUtil.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgExecutorUtil.java new file mode 100644 index 0000000000..b8bb30f13f --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgExecutorUtil.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.util; + +import java.util.Map; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import lombok.extern.slf4j.Slf4j; + +/** + * 2022/03/04 + */ +@Slf4j +public final class HgExecutorUtil { + + private final static Map threadPoolMap = new ConcurrentHashMap<>(); + + public static ThreadFactory newThreadFactory(String namePrefix) { + return new HgDefaultThreadFactory(namePrefix); + } + + public static ThreadPoolExecutor getThreadPoolExecutor(String name) { + if (name == null) { + return null; + } + return threadPoolMap.get(name); + } + + public static ThreadPoolExecutor createExecutor(String name, int coreThreads, int maxThreads, + int queueSize) { + ThreadPoolExecutor res = threadPoolMap.get(name); + if (res != null) { + return res; + } + synchronized (threadPoolMap) { + res = threadPoolMap.get(name); + if (res != null) { + return res; + } + BlockingQueue queue; + if (queueSize <= 0) { + queue = new SynchronousQueue(); + } else { + queue = new LinkedBlockingQueue<>(queueSize); + } + res = new ThreadPoolExecutor(coreThreads, maxThreads, 60L, TimeUnit.SECONDS, queue, + newThreadFactory(name)); + threadPoolMap.put(name, res); + } + return res; + } + + /** + * The default thread factory, which added threadNamePrefix in construction method. + */ + static class HgDefaultThreadFactory implements ThreadFactory { + + private final ThreadGroup group; + private final AtomicInteger threadNumber = new AtomicInteger(1); + private final String namePrefix; + + HgDefaultThreadFactory(String threadNamePrefix) { + SecurityManager s = System.getSecurityManager(); + group = (s != null) ? s.getThreadGroup() : Thread.currentThread().getThreadGroup(); + this.namePrefix = threadNamePrefix + "-"; + } + + @Override + public Thread newThread(Runnable r) { + Thread t = new Thread(group, r, namePrefix + threadNumber.getAndIncrement(), 0); + t.setDaemon(true); + t.setPriority(Thread.NORM_PRIORITY); + return t; + } + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgGrpc.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgGrpc.java new file mode 100644 index 0000000000..adbc96e1e4 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgGrpc.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.util; + +import org.apache.hugegraph.store.grpc.common.Key; +import org.apache.hugegraph.store.grpc.common.Kv; +import org.apache.hugegraph.store.grpc.common.ResCode; +import org.apache.hugegraph.store.grpc.common.ResStatus; +import org.apache.hugegraph.store.grpc.common.Tk; +import org.apache.hugegraph.store.grpc.common.Tse; +import org.apache.hugegraph.store.term.HgPair; + +import com.google.common.base.Throwables; +import com.google.protobuf.ByteString; + +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import jline.internal.Log; + + +public abstract class HgGrpc { + private static final ResCode OK = ResCode.RES_CODE_OK; + + public static ResStatus not() { + return toStatus(ResCode.RES_CODE_NOT_EXIST, "not exist"); + } + + public static ResStatus fail() { + return toStatus(ResCode.RES_CODE_FAIL, "failure"); + } + + public static ResStatus fail(String msg) { + return toStatus(ResCode.RES_CODE_FAIL, msg); + } + + public static ResStatus success() { + return success("success"); + } + + public static ResStatus success(String msg) { + return toStatus(ResCode.RES_CODE_OK, msg); + } + + public static ResStatus toStatus(ResCode code, String msg) { + return ResStatus.newBuilder() + .setCode(code) + .setMsg(msg).build(); + } + + public static HgPair toHgPair(Key key) { + return new HgPair(key.getCode(), key.getKey().toByteArray()); + } + + public static HgPair toHgPair(Tk tk) { + return new HgPair( + tk.getTable(), tk.getKey().toByteArray() + ); + } + + public static Kv toKv(HgPair pair) { + return toKv(pair, Kv.newBuilder()); + } + + public static Kv toKv(HgPair pair, Kv.Builder builder) { + return builder.clear() + .setKey(ByteString.copyFrom(pair.getKey())) + .setValue(ByteString.copyFrom(pair.getValue())) + .build(); + } + + public static HgPair toHgPair(Tse tse) { + return new HgPair( + tse.getStart().getKey().toByteArray(), + tse.getEnd().getKey().toByteArray() + ); + } + + public static StatusRuntimeException toErr(String msg) { + return toErr(Status.INTERNAL, msg, null); + } + + public static StatusRuntimeException toErr(Status.Code code, + String des) { + return toErr(code, des, null); + } + + public static StatusRuntimeException toErr(Status.Code code, + String des, + Throwable t) { + return toErr(code.toStatus(), des, t); + } + + public static StatusRuntimeException toErr(Status status, + String des, + Throwable t) { + if (t != null) { + // 为给client返回完整异常信息 + des = (des == null ? "" : des + ",") + + Throwables.getStackTraceAsString(t); + } + Status wdStatus = status.withDescription(des); + Log.error(wdStatus); + Status fullStatus = wdStatus.withCause(t); + return new StatusRuntimeException(fullStatus); + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgRegexUtil.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgRegexUtil.java new file mode 100644 index 0000000000..1336eaf8d1 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgRegexUtil.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.util; + +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * created on 2022/03/07 + */ +public class HgRegexUtil { + + public static String getGroupValue(String regex, String source, int groupId) { + if (regex == null || "".equals(regex) || source == null || "".equals(source)) { + return null; + } + + String value = ""; + + Pattern p = Pattern.compile(regex); + Matcher m = p.matcher(source); + + while (m.find()) { + for (int i = 0; i <= m.groupCount(); i++) { + if (i == groupId) { + value = m.group(i); + } + } + } + return value; + } + + public static List toGroupValues(String regex, String source) { + if (regex == null || "".equals(regex) || source == null || "".equals(source)) { + return null; + } + + Pattern p = Pattern.compile(regex); + Matcher m = p.matcher(source); + + List list = null; + + while (m.find()) { + list = new ArrayList<>(m.groupCount()); + for (int i = 0; i <= m.groupCount(); i++) { + list.add(m.group(i)); + } + } + + return list; + } + + public static List getMatchList(String regex, String source) { + if (regex == null || "".equals(regex) || source == null || "".equals(source)) { + return null; + } + Pattern p = Pattern.compile(regex, Pattern.MULTILINE); + Matcher m = p.matcher(source); + List list = new ArrayList<>(); + while (m.find()) { + list.add(m.group(0)); + } + return list.isEmpty() ? null : list; + } + + + public static void main(String[] args) { + List res = toGroupValues("(replicator)(.+?:\\d+)(.*)", + "replicator_10.14.139.10:8081_append_entries_times"); + if (res != null) { + res.stream().forEach(e -> System.out.println(e)); + } + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgStoreConst.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgStoreConst.java new file mode 100644 index 0000000000..546cfe8620 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgStoreConst.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.util; + +import java.util.Collections; +import java.util.List; + +/** + * created on 2021/10/22 + */ +public final class HgStoreConst { + + public final static int SCAN_WAIT_CLIENT_TAKING_TIME_OUT_SECONDS = 300; + + public final static byte[] EMPTY_BYTES = new byte[0]; + + public static final List EMPTY_LIST = Collections.EMPTY_LIST; + + public final static int SCAN_ALL_PARTITIONS_ID = -1; // means scan all partitions. + + private HgStoreConst() { + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgStoreNodeUtil.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgStoreNodeUtil.java new file mode 100644 index 0000000000..2716704bcd --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgStoreNodeUtil.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.util; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.UUID; + +import lombok.extern.slf4j.Slf4j; + +/** + * created on 2021/10/29 + */ +@Slf4j +public final class HgStoreNodeUtil { + public static String toStr(byte[] b) { + if (b == null) { + return ""; + } + if (b.length == 0) { + return ""; + } + return new String(b, StandardCharsets.UTF_8); + } + + public static byte[] toBytes(String str) { + if (str == null) { + return null; + } + return str.getBytes(StandardCharsets.UTF_8); + } + + public static byte[] toBytes(long l) { + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); + buffer.putLong(l); + return buffer.array(); + } + + private static byte[] toBytes(final int i) { + ByteBuffer buffer = ByteBuffer.allocate(Integer.BYTES); + buffer.putInt(i); + return buffer.array(); + } + + public static long toLong(byte[] bytes) { + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); + buffer.put(bytes); + buffer.flip();//need flip + return buffer.getLong(); + } + + public static int toInt(byte[] bytes) { + ByteBuffer buffer = ByteBuffer.allocate(Integer.BYTES); + buffer.put(bytes); + buffer.flip();//need flip + return buffer.getInt(); + } + + public static String toUuidStr(byte[] bytes) { + try { + return UUID.nameUUIDFromBytes(bytes).toString(); + } catch (Throwable t) { + log.error("Failed to parse bytes to UUID", t); + } + return null; + } + +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/PropertyUtil.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/PropertyUtil.java new file mode 100644 index 0000000000..425a9828ee --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/PropertyUtil.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.util; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class PropertyUtil { + private static final Logger LOG = LoggerFactory.getLogger(PropertyUtil.class); + + public static String get(String key) { + return get(key, null); + } + + public static String get(final String key, String def) { + if (key == null) { + throw new NullPointerException("key"); + } + if (key.isEmpty()) { + throw new IllegalArgumentException("key must not be empty."); + } + + String value = null; + try { + if (System.getSecurityManager() == null) { + value = System.getProperty(key); + } else { + value = AccessController.doPrivileged( + (PrivilegedAction) () -> System.getProperty(key)); + } + } catch (Exception e) { + LOG.error("exception {}", e); + } + + if (value == null) { + return def; + } + + return value; + } + + + public static boolean getBoolean(String key, boolean def) { + String value = get(key, Boolean.toString(def)); + value = value.trim().toLowerCase(); + if (value.isEmpty()) { + return true; + } + + if ("true".equals(value) || "yes".equals(value) || "1".equals(value)) { + return true; + } + + if ("false".equals(value) || "no".equals(value) || "0".equals(value)) { + return false; + } + return def; + } + + public static int getInt(String key, int def) { + String value = get(key); + if (value == null) { + return def; + } + + value = value.trim().toLowerCase(); + try { + return Integer.parseInt(value); + } catch (Exception e) { + LOG.warn("exception ", e); + } + return def; + } + + public static Object setProperty(String key, String value) { + return System.getProperties().setProperty(key, value); + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Result.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Result.java new file mode 100644 index 0000000000..5087deb2cc --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Result.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.util; + + +public class Result { + private Err err; + private T t; + + private Result() { + } + + public static Result of() { + return new Result(); + } + + public T get() { + return t; + } + + public void set(T t) { + this.t = t; + } + + public void err(String msg) { + this.err = Err.of(msg); + } +} diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/TkEntry.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/TkEntry.java new file mode 100644 index 0000000000..d089ed61f3 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/TkEntry.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.util; + +import java.util.Arrays; +import java.util.Objects; + +/** + * Table Key pair. + */ +public class TkEntry { + private final String table; + private final byte[] key; + + public TkEntry(String table, byte[] key) { + this.table = table; + this.key = key; + } + + public String getTable() { + return table; + } + + public byte[] getKey() { + return key; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof TkEntry)) { + return false; + } + TkEntry tk = (TkEntry) o; + return Objects.equals(table, tk.table) && Arrays.equals(key, tk.key); + } + + @Override + public int hashCode() { + int result = Objects.hash(table); + result = 31 * result + Arrays.hashCode(key); + return result; + } + + @Override + public String toString() { + return "Tk{" + + "table='" + table + '\'' + + ", key=" + Arrays.toString(key) + + '}'; + } +} diff --git a/hugegraph-store/hg-store-node/src/main/resources/application-pd.yml b/hugegraph-store/hg-store-node/src/main/resources/application-pd.yml new file mode 100644 index 0000000000..dc198c3c89 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/resources/application-pd.yml @@ -0,0 +1,38 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +pdserver: + # pd服务地址,多个pd地址用逗号分割 + address: localhost:8686 + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" +logging: + config: classpath:log4j2-dev.xml + level: + root: info +rocksdb: + # rocksdb 使用的总内存大小 + total_memory_size: 32000000000 + write_buffer_size: 32000000 diff --git a/hugegraph-store/hg-store-node/src/main/resources/application.yml b/hugegraph-store/hg-store-node/src/main/resources/application.yml new file mode 100644 index 0000000000..962101aac8 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/resources/application.yml @@ -0,0 +1,51 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +pdserver: + # pd服务地址,多个pd地址用逗号分割 + address: localhost:8686 + +grpc: + # grpc的服务地址 + host: 127.0.0.1 + port: 8500 + netty-server: + max-inbound-message-size: 1000MB +raft: + address: 127.0.0.1:8510 + max-log-file-size: 600000000000 + # 快照生成时间间隔,单位秒 + snapshotInterval: 1800 +server: + # rest 服务地址 + port: 8520 + +app: + # 存储路径,支持多个路径,逗号分割 + data-path: ./storage + +spring: + profiles: + active: default + include: pd + application: + name: store-node-grpc-server + +logging: + config: classpath:log4j2-dev.xml + level: + root: info diff --git a/hugegraph-store/hg-store-node/src/main/resources/banner.txt b/hugegraph-store/hg-store-node/src/main/resources/banner.txt new file mode 100644 index 0000000000..d9a377f20b --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/resources/banner.txt @@ -0,0 +1,6 @@ +██╗ ██╗ ██████╗ ███████╗████████╗ ██████╗ ██████╗ ███████╗ +██║ ██║██╔════╝ ██╔════╝╚══██╔══╝██╔═══██╗██╔══██╗██╔════╝ +███████║██║ ███╗█████╗███████╗ ██║ ██║ ██║██████╔╝█████╗ +██╔══██║██║ ██║╚════╝╚════██║ ██║ ██║ ██║██╔══██╗██╔══╝ +██║ ██║╚██████╔╝ ███████║ ██║ ╚██████╔╝██║ ██║███████╗ +╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝╚══════╝ - 3.x \ No newline at end of file diff --git a/hugegraph-store/hg-store-node/src/main/resources/log4j2-dev.xml b/hugegraph-store/hg-store-node/src/main/resources/log4j2-dev.xml new file mode 100644 index 0000000000..379acebbae --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/resources/log4j2-dev.xml @@ -0,0 +1,143 @@ + + + + + + + + logs + hugegraph-store + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hugegraph-store/hg-store-node/src/main/resources/version.txt b/hugegraph-store/hg-store-node/src/main/resources/version.txt new file mode 100644 index 0000000000..b55f10804f --- /dev/null +++ b/hugegraph-store/hg-store-node/src/main/resources/version.txt @@ -0,0 +1 @@ +3.6.5 \ No newline at end of file diff --git a/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer00.java b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer00.java new file mode 100644 index 0000000000..dc860826df --- /dev/null +++ b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer00.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.boot; + +import java.io.File; +import java.util.Objects; + +import org.apache.hugegraph.store.node.StoreNodeApplication; +import org.springframework.boot.SpringApplication; + +import com.alipay.remoting.util.StringUtils; + +public class StoreNodeServer00 { + + public static void main(String[] args) { + // deleteDir(new File("tmp/8500")); + String logPath = System.getProperty("logging.path"); + if (StringUtils.isBlank(logPath)) { + System.setProperty("logging.path", "logs/8500"); + System.setProperty("com.alipay.remoting.client.log.level", "WARN"); + } + if (System.getProperty("bolt.channel_write_buf_low_water_mark") == null) { + System.setProperty("bolt.channel_write_buf_low_water_mark", + Integer.toString(4 * 1024 * 1024)); + } + if (System.getProperty("bolt.channel_write_buf_high_water_mark") == null) { + System.setProperty("bolt.channel_write_buf_high_water_mark", + Integer.toString(8 * 1024 * 1024)); + } + SpringApplication.run(StoreNodeApplication.class, "--spring.profiles.active=server00"); + System.out.println("StoreNodeServer00 started."); + } + + private static boolean deleteDir(File dir) { + if (dir.isDirectory()) { + for (File file : Objects.requireNonNull(dir.listFiles())) { + deleteDir(file); + } + } + return dir.delete(); + } +} diff --git a/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer01.java b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer01.java new file mode 100644 index 0000000000..ba11facfb8 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer01.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.boot; + +import java.io.File; +import java.util.Objects; + +import org.apache.hugegraph.store.node.StoreNodeApplication; +import org.springframework.boot.SpringApplication; + +import com.alipay.remoting.util.StringUtils; + +public class StoreNodeServer01 { + + public static void main(String[] args) { + // deleteDir(new File("tmp/8501")); + String logPath = System.getProperty("logging.path"); + if (StringUtils.isBlank(logPath)) { + System.setProperty("logging.path", "logs/8501"); + System.setProperty("com.alipay.remoting.client.log.level", "WARN"); + } + if (System.getProperty("bolt.channel_write_buf_low_water_mark") == null) { + System.setProperty("bolt.channel_write_buf_low_water_mark", + Integer.toString(4 * 1024 * 1024)); + } + if (System.getProperty("bolt.channel_write_buf_high_water_mark") == null) { + System.setProperty("bolt.channel_write_buf_high_water_mark", + Integer.toString(8 * 1024 * 1024)); + } + + + SpringApplication.run(StoreNodeApplication.class, "--spring.profiles.active=server01"); + System.out.println("StoreNodeServer01 started."); + } + + private static boolean deleteDir(File dir) { + if (dir.isDirectory()) { + for (File file : Objects.requireNonNull(dir.listFiles())) { + deleteDir(file); + } + } + return dir.delete(); + } +} diff --git a/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer02.java b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer02.java new file mode 100644 index 0000000000..75bad4ffce --- /dev/null +++ b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer02.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.boot; + +import java.io.File; + +import org.apache.hugegraph.store.node.StoreNodeApplication; +import org.springframework.boot.SpringApplication; + +import com.alipay.remoting.util.StringUtils; + +public class StoreNodeServer02 { + + public static void main(String[] args) { + // deleteDir(new File("tmp/8502")); + String logPath = System.getProperty("logging.path"); + if (StringUtils.isBlank(logPath)) { + System.setProperty("logging.path", "logs/8502"); + System.setProperty("com.alipay.remoting.client.log.level", "WARN"); + } + if (System.getProperty("bolt.channel_write_buf_low_water_mark") == null) { + System.setProperty("bolt.channel_write_buf_low_water_mark", + Integer.toString(4 * 1024 * 1024)); + } + if (System.getProperty("bolt.channel_write_buf_high_water_mark") == null) { + System.setProperty("bolt.channel_write_buf_high_water_mark", + Integer.toString(8 * 1024 * 1024)); + } + + SpringApplication.run(StoreNodeApplication.class, "--spring.profiles.active=server02"); + System.out.println("StoreNodeServer02 started."); + } + + private static boolean deleteDir(File dir) { + if (dir.isDirectory()) { + for (File file : dir.listFiles()) { + deleteDir(file); + } + } + return dir.delete(); + } +} diff --git a/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer03.java b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer03.java new file mode 100644 index 0000000000..ea78bb8994 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer03.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.boot; + +import java.io.File; + +import org.apache.hugegraph.store.node.StoreNodeApplication; +import org.springframework.boot.SpringApplication; + +import com.alipay.remoting.util.StringUtils; + +public class StoreNodeServer03 { + + public static void main(String[] args) { + // deleteDir(new File("tmp/8503")); + String logPath = System.getProperty("logging.path"); + if (StringUtils.isBlank(logPath)) { + System.setProperty("logging.path", "logs/8503"); + System.setProperty("com.alipay.remoting.client.log.level", "WARN"); + } + if (System.getProperty("bolt.channel_write_buf_low_water_mark") == null) { + System.setProperty("bolt.channel_write_buf_low_water_mark", + Integer.toString(4 * 1024 * 1024)); + } + if (System.getProperty("bolt.channel_write_buf_high_water_mark") == null) { + System.setProperty("bolt.channel_write_buf_high_water_mark", + Integer.toString(8 * 1024 * 1024)); + } + + SpringApplication.run(StoreNodeApplication.class, "--spring.profiles.active=server03"); + System.out.println("StoreNodeServer03 started."); + } + + private static boolean deleteDir(File dir) { + if (dir.isDirectory()) { + for (File file : dir.listFiles()) { + deleteDir(file); + } + } + return dir.delete(); + } +} diff --git a/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer04.java b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer04.java new file mode 100644 index 0000000000..e186888500 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer04.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.boot; + +import java.io.File; + +import org.apache.hugegraph.store.node.StoreNodeApplication; +import org.springframework.boot.SpringApplication; + +public class StoreNodeServer04 { + + public static void main(String[] args) { + // deleteDir(new File("tmp/8504")); + SpringApplication.run(StoreNodeApplication.class, "--spring.profiles.active=server04"); + System.out.println("StoreNodeServer04 started."); + } + + private static boolean deleteDir(File dir) { + if (dir.isDirectory()) { + for (File file : dir.listFiles()) { + deleteDir(file); + } + } + return dir.delete(); + } +} diff --git a/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer05.java b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer05.java new file mode 100644 index 0000000000..9eae17383d --- /dev/null +++ b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer05.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.boot; + +import java.io.File; + +import org.apache.hugegraph.store.node.StoreNodeApplication; +import org.springframework.boot.SpringApplication; + +public class StoreNodeServer05 { + + public static void main(String[] args) { + // deleteDir(new File("tmp/8503")); + SpringApplication.run(StoreNodeApplication.class, "--spring.profiles.active=server05"); + System.out.println("StoreNodeServer03 started."); + } + + private static boolean deleteDir(File dir) { + if (dir.isDirectory()) { + for (File file : dir.listFiles()) { + deleteDir(file); + } + } + return dir.delete(); + } +} diff --git a/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer06.java b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer06.java new file mode 100644 index 0000000000..e6d1c751da --- /dev/null +++ b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer06.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.boot; + +import java.io.File; + +import org.apache.hugegraph.store.node.StoreNodeApplication; +import org.springframework.boot.SpringApplication; + +public class StoreNodeServer06 { + + public static void main(String[] args) { + // deleteDir(new File("tmp/8503")); + SpringApplication.run(StoreNodeApplication.class, "--spring.profiles.active=server06"); + System.out.println("StoreNodeServer03 started."); + } + + private static boolean deleteDir(File dir) { + if (dir.isDirectory()) { + for (File file : dir.listFiles()) { + deleteDir(file); + } + } + return dir.delete(); + } +} diff --git a/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/node/HgStoreNodeServiceTest.java b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/node/HgStoreNodeServiceTest.java new file mode 100644 index 0000000000..0ff80ff2e6 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/node/HgStoreNodeServiceTest.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node; + +// import org.junit.Test; + +import java.io.IOException; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +/** + * HgStore单元测试 + * 1、测试raft多副本入库 + * 2、测试快照同步 + * 3、测试副本增减 + * 4、测试单幅本关闭日志入库 + */ +public class HgStoreNodeServiceTest { + + String yml = + "rocksdb:\n" + " # rocksdb 使用的总内存大小\n" + " total_memory_size: 32000000000\n" + + " max_background_jobs: 8\n" + " max_subcompactions: 4\n" + + " target_file_size_multiplier: 4\n" + " min_write_buffer_number_to_merge: 8\n" + + " target_file_size_base: 512000000"; + + // @Test + public void testRaft() { + + } + + // @Test + public void testYaml() throws InterruptedException, IOException { + + ExecutorService executor = new ThreadPoolExecutor(1000, 1000, + 10L, TimeUnit.SECONDS, + new ArrayBlockingQueue<>(10000)); + CountDownLatch latch = new CountDownLatch(100); + for (int i = 0; i < 100; i++) { + + executor.execute(() -> { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + System.out.println(executor); + System.out.println(Thread.activeCount()); + latch.countDown(); + }); + + } + latch.await(); + System.in.read(); + } +} diff --git a/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/node/metrics/JraftMetricsTest.java b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/node/metrics/JraftMetricsTest.java new file mode 100644 index 0000000000..f41e0ff4af --- /dev/null +++ b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/node/metrics/JraftMetricsTest.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.node.metrics; + +import java.util.Arrays; +import java.util.List; + +import org.apache.hugegraph.store.node.util.HgRegexUtil; +import org.junit.Test; + + +/** + * 2022/3/7 + */ +public class JraftMetricsTest { + + @Test + public void testRefineLabel() { + String regex = "(replicator)(.+?:\\d+)(.*)"; + + String[] sources = { + "replicator_10.14.139.10:8081_append_entries_times", + "replicator10.14.139.10:8081appendentriestimes", + "replicator.10.14.139.10:8081.append.entries.times", + "replicator-10-14-139-10:8081-append-entries-times", + "replicator_hg_0_10_14_139_10:8081::100_replicate_inflights_count_min", + "replicasdf14-13dasfies-times", + }; + + + Arrays.stream(sources).forEach(e -> { + System.out.println("--- " + e + " ---"); + List list = HgRegexUtil.toGroupValues(regex, e); + if (list != null) { + list.forEach(System.out::println); + } else { + System.out.println("NONE"); + } + System.out.println(); + }); + + } + +} diff --git a/hugegraph-store/hg-store-node/src/test/resources/application-pd.yml b/hugegraph-store/hg-store-node/src/test/resources/application-pd.yml new file mode 100644 index 0000000000..58673f5c23 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/test/resources/application-pd.yml @@ -0,0 +1,28 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +rocksdb: + # rocksdb 使用的总内存大小 + total_memory_size: 32000000000 + max_background_jobs: 8 + max_subcompactions: 4 + target_file_size_multiplier: 4 + min_write_buffer_number_to_merge: 8 + target_file_size_base: 512000000 + bloom_filter_bits_per_key: 10 + compression_per_level: "[zstd, zstd, zstd, zstd, zstd, zstd, zstd]" + bottommost_compression: "zstd" diff --git a/hugegraph-store/hg-store-node/src/test/resources/application-server00.yml b/hugegraph-store/hg-store-node/src/test/resources/application-server00.yml new file mode 100644 index 0000000000..49b33b2ea2 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/test/resources/application-server00.yml @@ -0,0 +1,73 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +pdserver: + # pd服务地址,多个pd地址用逗号分割 + address: localhost:8686 +grpc: + # grpc的服务地址 + host: 127.0.0.1 + port: 8500 + netty-server: + max-inbound-message-size: 1000MB +raft: + # enable: false + address: 127.0.0.1:8510 + data-path: ${app.data-path}/raft + # 快照生成时间间隔,单位秒 + snapshotInterval: 30 + max-log-file-size: 60000000 +server: + # rest 服务地址 + port: 8520 + +app: + data-path: tmp/8500 + #fake-pd: true + label: + label-key: label-value +spring: + application: + name: store-node-grpc-server + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +rocksdb: + data_path: ${app.data-path}/db + wal_path: ${app.data-path}/db + snapshot_path: ${app.data-path}/snapshot + bloom_filter_bits_per_key: 10 + compression_per_level: "[none, zstd, zstd, zstd, zstd, zstd, zstd]" +#fakePd配置参数 +fake-pd: + # fake-pd模式下,store grpc集群列表 + store-list: 127.0.0.1:8500 + # fake-pd模式下,设置raft集群列表 + peers-list: 127.0.0.1:8510 + # 分区数量 + partition-count: 10 + # 每个分区副本数量 + shard-count: 3 + diff --git a/hugegraph-store/hg-store-node/src/test/resources/application-server01.yml b/hugegraph-store/hg-store-node/src/test/resources/application-server01.yml new file mode 100644 index 0000000000..72482bc28c --- /dev/null +++ b/hugegraph-store/hg-store-node/src/test/resources/application-server01.yml @@ -0,0 +1,70 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +pdserver: + # pd服务地址,多个pd地址用逗号分割 + address: localhost:8686 +grpc: + # grpc的服务地址 + host: 127.0.0.1 + port: 8501 + netty-server: + max-inbound-message-size: 1000MB +raft: + # enable: false + address: 127.0.0.1:8511 + useRocksDBSegmentLogStorage: false + # 快照生成时间间隔,单位秒 + snapshotInterval: 300 + disruptorBufferSize: 128 +server: + # rest 服务地址 + port: 8521 + +app: + data-path: tmp/8501 + # fake-pd: true + label: + label-name: label-value +spring: + application: + name: store-node-grpc-server + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +rocksdb: + write_buffer_size: 2000000 + level0_file_num_compaction_trigger: 2 + bloom_filter_bits_per_key: 10 +#fakePd配置参数 +fake-pd: + # fake-pd模式下,store grpc集群列表 + store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503 + # fake-pd模式下,设置raft集群列表 + peers-list: 127.0.0.1:8511,127.0.0.1:8512,127.0.0.1:8513 + # 分区数量 + partition-count: 10 + # 每个分区副本数量 + shard-count: 3 diff --git a/hugegraph-store/hg-store-node/src/test/resources/application-server02.yml b/hugegraph-store/hg-store-node/src/test/resources/application-server02.yml new file mode 100644 index 0000000000..b69e0535e3 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/test/resources/application-server02.yml @@ -0,0 +1,69 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +pdserver: + # pd服务地址,多个pd地址用逗号分割 + address: localhost:8686 +grpc: + # grpc的服务地址 + host: 127.0.0.1 + port: 8502 + netty-server: + max-inbound-message-size: 1000MB +raft: + # enable: false + address: 127.0.0.1:8512 + useRocksDBSegmentLogStorage: false + # 快照生成时间间隔,单位秒 + snapshotInterval: 300 + disruptorBufferSize: 128 +server: + # rest 服务地址 + port: 8522 + +app: + data-path: tmp/8502 + # fake-pd: true + label: + label-key: label-value +spring: + application: + name: store-node-grpc-server + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +rocksdb: + db_max_alive_time: 120 + +#fakePd配置参数 +fake-pd: + # fake-pd模式下,store grpc集群列表 + store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503 + # fake-pd模式下,设置raft集群列表 + peers-list: 127.0.0.1:8511,127.0.0.1:8512,127.0.0.1:8513 + # 分区数量 + partition-count: 10 + # 每个分区副本数量 + shard-count: 3 diff --git a/hugegraph-store/hg-store-node/src/test/resources/application-server03.yml b/hugegraph-store/hg-store-node/src/test/resources/application-server03.yml new file mode 100644 index 0000000000..8028dffd9f --- /dev/null +++ b/hugegraph-store/hg-store-node/src/test/resources/application-server03.yml @@ -0,0 +1,71 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +pdserver: + # pd服务地址,多个pd地址用逗号分割 + address: localhost:8686 +grpc: + # grpc的服务地址 + host: 127.0.0.1 + port: 8503 + netty-server: + max-inbound-message-size: 1000MB +raft: + # enable: false + address: 127.0.0.1:8513 + snapshotLogIndexMargin: 1024 + useRocksDBSegmentLogStorage: false + # 快照生成时间间隔,单位秒 + snapshotInterval: 300 + disruptorBufferSize: 128 +server: + # rest 服务地址 + port: 8523 + +app: + data-path: tmp/8503 + # fake-pd: true + label: + label-key: label-value +spring: + application: + name: store-node-grpc-server + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +rocksdb: + db_max_alive_time: 120 + + +#fakePd配置参数 +fake-pd: + # fake-pd模式下,store grpc集群列表 + store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503 + # fake-pd模式下,设置raft集群列表 + peers-list: 127.0.0.1:8511,127.0.0.1:8512,127.0.0.1:8513 + # 分区数量 + partition-count: 10 + # 每个分区副本数量 + shard-count: 3 diff --git a/hugegraph-store/hg-store-node/src/test/resources/application-server04.yml b/hugegraph-store/hg-store-node/src/test/resources/application-server04.yml new file mode 100644 index 0000000000..b9d35f443d --- /dev/null +++ b/hugegraph-store/hg-store-node/src/test/resources/application-server04.yml @@ -0,0 +1,68 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +pdserver: + # pd服务地址,多个pd地址用逗号分割 + address: localhost:8686 +grpc: + # grpc的服务地址 + host: 127.0.0.1 + port: 8504 + netty-server: + max-inbound-message-size: 1000MB +raft: + # enable: false + address: 127.0.0.1:8514 + + # 快照生成时间间隔,单位秒 + snapshotInterval: 300 +server: + # rest 服务地址 + port: 8524 + +app: + data-path: tmp/8504 + # fake-pd: true + label: + label-key: label-value +spring: + application: + name: store-node-grpc-server + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +rocksdb: + + +#fakePd配置参数 +fake-pd: + # fake-pd模式下,store grpc集群列表 + store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503 + # fake-pd模式下,设置raft集群列表 + peers-list: 127.0.0.1:8511,127.0.0.1:8512,127.0.0.1:8513 + # 分区数量 + partition-count: 3 + # 每个分区副本数量 + shard-count: 3 diff --git a/hugegraph-store/hg-store-node/src/test/resources/application-server05.yml b/hugegraph-store/hg-store-node/src/test/resources/application-server05.yml new file mode 100644 index 0000000000..02b83f9a5c --- /dev/null +++ b/hugegraph-store/hg-store-node/src/test/resources/application-server05.yml @@ -0,0 +1,70 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +pdserver: + # pd服务地址,多个pd地址用逗号分割 + address: localhost:8686 +grpc: + # grpc的服务地址 + host: 127.0.0.1 + port: 8505 + netty-server: + max-inbound-message-size: 1000MB +raft: + # enable: false + address: 127.0.0.1:8515 + data-path: ${app.data-path}/raft + # 快照生成时间间隔,单位秒 + snapshotInterval: 300 +server: + # rest 服务地址 + port: 8525 + +app: + data-path: tmp/8505 + # fake-pd: true + label: + label-key: label-value +spring: + application: + name: store-node-grpc-server + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +rocksdb: + data_path: ${app.data-path}/db + wal_path: ${app.data-path}/db + snapshot_path: ${app.data-path}/snapshot + +#fakePd配置参数 +fake-pd: + # fake-pd模式下,store grpc集群列表 + store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503 + # fake-pd模式下,设置raft集群列表 + peers-list: 127.0.0.1:8511,127.0.0.1:8512,127.0.0.1:8513 + # 分区数量 + partition-count: 3 + # 每个分区副本数量 + shard-count: 3 diff --git a/hugegraph-store/hg-store-node/src/test/resources/application-server06.yml b/hugegraph-store/hg-store-node/src/test/resources/application-server06.yml new file mode 100644 index 0000000000..eeef451c1c --- /dev/null +++ b/hugegraph-store/hg-store-node/src/test/resources/application-server06.yml @@ -0,0 +1,70 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +pdserver: + # pd服务地址,多个pd地址用逗号分割 + address: localhost:8686 +grpc: + # grpc的服务地址 + host: 127.0.0.1 + port: 8506 + netty-server: + max-inbound-message-size: 1000MB +raft: + # enable: false + address: 127.0.0.1:8516 + data-path: ${app.data-path}/raft + # 快照生成时间间隔,单位秒 + snapshotInterval: 300 +server: + # rest 服务地址 + port: 8526 + +app: + data-path: tmp/8506 + # fake-pd: true + label: + label-key: label-value +spring: + application: + name: store-node-grpc-server + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +rocksdb: + data_path: ${app.data-path}/db + wal_path: ${app.data-path}/db + snapshot_path: ${app.data-path}/snapshot + +#fakePd配置参数 +fake-pd: + # fake-pd模式下,store grpc集群列表 + store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503 + # fake-pd模式下,设置raft集群列表 + peers-list: 127.0.0.1:8511,127.0.0.1:8512,127.0.0.1:8513 + # 分区数量 + partition-count: 3 + # 每个分区副本数量 + shard-count: 3 diff --git a/hugegraph-store/hg-store-node/src/test/resources/log4j2-dev.xml b/hugegraph-store/hg-store-node/src/test/resources/log4j2-dev.xml new file mode 100644 index 0000000000..913ad3f4a1 --- /dev/null +++ b/hugegraph-store/hg-store-node/src/test/resources/log4j2-dev.xml @@ -0,0 +1,139 @@ + + + + + + + + logs + hugegraph-store + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From 5fe4bcfc75eff9d57387d561c41647bb512c82cf Mon Sep 17 00:00:00 2001 From: sheli00 Date: Sat, 8 Jun 2024 21:07:41 +0800 Subject: [PATCH 2/6] chore: add node dependency in store --- hugegraph-store/pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hugegraph-store/pom.xml b/hugegraph-store/pom.xml index dc74b6f439..6793b0c91a 100644 --- a/hugegraph-store/pom.xml +++ b/hugegraph-store/pom.xml @@ -40,8 +40,8 @@ hg-store-test hg-store-rocksdb - - + hg-store-core + hg-store-node From 1e661d5cf237bd5fcbb0c39f5204568b94ce0e05 Mon Sep 17 00:00:00 2001 From: sheli00 Date: Sat, 8 Jun 2024 21:35:45 +0800 Subject: [PATCH 3/6] chore: resolve duplicate module --- hugegraph-store/pom.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/hugegraph-store/pom.xml b/hugegraph-store/pom.xml index 2a7e3f3e7a..ea0d3dc8e2 100644 --- a/hugegraph-store/pom.xml +++ b/hugegraph-store/pom.xml @@ -41,7 +41,6 @@ hg-store-rocksdb hg-store-core - hg-store-core hg-store-node From dc04c45c1d9ba90370dc66bd0d597f8bc80998b2 Mon Sep 17 00:00:00 2001 From: VGalaxies Date: Sun, 9 Jun 2024 16:40:08 +0800 Subject: [PATCH 4/6] reformat store-node --- .../hugegraph/store/node/AppConfig.java | 9 ++- .../controller/HgStoreMetricsController.java | 2 +- .../node/controller/HgTestController.java | 2 +- .../store/node/controller/IndexAPI.java | 5 +- .../store/node/controller/PartitionAPI.java | 4 +- .../store/node/entry/RestResult.java | 1 + .../store/node/grpc/BatchGrpcClosure.java | 2 +- .../store/node/grpc/BatchScanIterator.java | 1 + .../store/node/grpc/FusingScanIterator.java | 1 + .../store/node/grpc/GRpcServerConfig.java | 1 + .../store/node/grpc/GrpcClosure.java | 1 + .../store/node/grpc/HgStoreNodeService.java | 3 +- .../store/node/grpc/HgStoreSessionImpl.java | 2 +- .../store/node/grpc/HgStoreStateService.java | 1 - .../store/node/grpc/HgStoreStateSubject.java | 2 +- .../store/node/grpc/HgStoreWrapperEx.java | 4 +- .../store/node/grpc/ParallelScanIterator.java | 1 + .../store/node/grpc/QueryCondition.java | 1 + .../node/grpc/ScanBatchOneShotResponse.java | 2 - .../store/node/grpc/ScanBatchResponse.java | 4 +- .../store/node/grpc/ScanBatchResponse3.java | 5 ++ .../node/grpc/ScanBatchResponseFactory.java | 1 + .../store/node/grpc/ScanOneShotResponse.java | 2 - .../hugegraph/store/node/grpc/ScanQuery.java | 1 + .../store/node/grpc/ScanQueryProducer.java | 4 ++ .../store/node/grpc/ScanStreamResponse.java | 1 + .../hugegraph/store/node/grpc/ScanUtil.java | 4 +- .../store/node/grpc/scan/GraphStoreImpl.java | 1 - .../node/grpc/scan/ScanResponseObserver.java | 67 +++++++++---------- .../store/node/metrics/GRpcExMetrics.java | 2 + .../store/node/metrics/JRaftMetrics.java | 2 + .../store/node/metrics/ProcfsEntry.java | 1 - .../store/node/metrics/ProcfsMetrics.java | 1 - .../store/node/metrics/RocksDBMetrics.java | 4 +- .../node/metrics/RocksDBMetricsConst.java | 1 + .../store/node/metrics/StoreMetrics.java | 3 +- .../store/node/model/HgNodeStatus.java | 1 + .../hugegraph/store/node/util/Base58.java | 1 - .../apache/hugegraph/store/node/util/Err.java | 2 +- .../hugegraph/store/node/util/HgAssert.java | 2 +- .../store/node/util/HgBufferProxy.java | 1 + .../hugegraph/store/node/util/HgChannel.java | 1 - .../hugegraph/store/node/util/HgGrpc.java | 2 +- .../store/node/util/HgRegexUtil.java | 1 - .../store/node/util/HgStoreNodeUtil.java | 1 + .../store/node/util/PropertyUtil.java | 2 +- .../hugegraph/store/node/util/Result.java | 2 +- .../hugegraph/store/node/util/TkEntry.java | 1 + .../store/boot/StoreNodeServer01.java | 1 - .../store/node/metrics/JraftMetricsTest.java | 2 - 50 files changed, 96 insertions(+), 73 deletions(-) diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppConfig.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppConfig.java index 214e196f1c..6c561f4c02 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppConfig.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppConfig.java @@ -33,6 +33,7 @@ @Data @Component public class AppConfig { + @Value("${pdserver.address}") private String pdServerAddress; @@ -48,7 +49,6 @@ public class AppConfig { @Value("${server.port}") private int restPort; - //内置pd模式,用于单机部署 @Value("${app.data-path: store}") private String dataPath; @@ -134,6 +134,7 @@ public Map getRocksdbConfig() { @Data @Configuration public class ThreadPoolGrpc { + @Value("${thread.pool.grpc.core:600}") private int core; @Value("${thread.pool.grpc.max:1000}") @@ -145,6 +146,7 @@ public class ThreadPoolGrpc { @Data @Configuration public class ThreadPoolScan { + @Value("${thread.pool.scan.core: 128}") private int core; @Value("${thread.pool.scan.max: 1000}") @@ -156,6 +158,7 @@ public class ThreadPoolScan { @Data @Configuration public class Raft { + @Value("${raft.address}") private String address; @@ -185,6 +188,7 @@ public class Raft { @Data @Configuration public class ArthasConfig { + @Value("${arthas.telnetPort:8566}") private String telnetPort; @@ -201,6 +205,7 @@ public class ArthasConfig { @Data @Configuration public class FakePdConfig { + @Value("${fake-pd.store-list:''}") private String storeList; @Value("${fake-pd.peers-list:''}") @@ -215,6 +220,7 @@ public class FakePdConfig { @Configuration @ConfigurationProperties(prefix = "app") public class LabelConfig { + private final Map label = new HashMap<>(); } @@ -222,6 +228,7 @@ public class LabelConfig { @Configuration @ConfigurationProperties(prefix = "") public class RocksdbConfig { + private final Map rocksdb = new HashMap<>(); } diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgStoreMetricsController.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgStoreMetricsController.java index aa6e175196..a7aea39b1d 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgStoreMetricsController.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgStoreMetricsController.java @@ -37,6 +37,7 @@ @RestController @RequestMapping(value = "/metrics", method = RequestMethod.GET) public class HgStoreMetricsController { + private final SystemMetrics systemMetrics = new SystemMetrics(); private final DriveMetrics driveMetrics = new DriveMetrics(); @Autowired @@ -62,5 +63,4 @@ public Map getRaftMetrics() { return nodeService.getNodeMetrics(); } - } diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgTestController.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgTestController.java index fac0ccc610..8c23621b54 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgTestController.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/HgTestController.java @@ -40,6 +40,7 @@ @Slf4j @RequestMapping("/test") public class HgTestController { + @Autowired HgStoreNodeService nodeService; @@ -77,7 +78,6 @@ public String deleteRaftNode(@PathVariable(value = "groupId") int groupId) { return "未找到分区"; } - } @GetMapping(value = "/gc", produces = MediaType.APPLICATION_JSON_VALUE) diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/IndexAPI.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/IndexAPI.java index b3a542f794..72005fb649 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/IndexAPI.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/IndexAPI.java @@ -41,6 +41,7 @@ @Slf4j @RequestMapping("/") public class IndexAPI { + @Autowired HgStoreNodeService nodeService; @@ -61,12 +62,14 @@ public Map okMap(String k, Object v) { @Data class StoreInfo { + private int leaderCount; private int partitionCount; } @Data public class Raft { + private final List partitions = new ArrayList<>(); private int groupId; private String role; @@ -79,6 +82,7 @@ public class Raft { @Data public class PartitionInfo { + private final int id; // region id private final String graphName; // Region key range [startKey, endKey) @@ -89,7 +93,6 @@ public class PartitionInfo { private HgStoreMetric.Partition metric; private String leader; - public PartitionInfo(Partition pt) { id = pt.getId(); graphName = pt.getGraphName(); diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/PartitionAPI.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/PartitionAPI.java index 3b12dbf388..9247f35c75 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/PartitionAPI.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/controller/PartitionAPI.java @@ -55,6 +55,7 @@ @Slf4j @RequestMapping("/v1") public class PartitionAPI { + @Autowired HgStoreNodeService nodeService; @@ -208,6 +209,7 @@ public Map okMap(String k, Object v) { @Data public class Raft { + private final List partitions = new ArrayList<>(); private int groupId; private String role; @@ -222,6 +224,7 @@ public class Raft { @Data public class PartitionInfo { + private final int id; // region id private final String graphName; // Region key range [startKey, endKey) @@ -232,7 +235,6 @@ public class PartitionInfo { private HgStoreMetric.Partition metric; private String leader; - public PartitionInfo(Partition pt) { id = pt.getId(); graphName = pt.getGraphName(); diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/entry/RestResult.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/entry/RestResult.java index 14e7109bd4..b82b88e520 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/entry/RestResult.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/entry/RestResult.java @@ -23,6 +23,7 @@ @Data public class RestResult implements Serializable { + public static final String OK = "OK"; public static final String ERR = "ERR"; String state; diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchGrpcClosure.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchGrpcClosure.java index f7ad7ff75a..d4ee59cfaa 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchGrpcClosure.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchGrpcClosure.java @@ -50,6 +50,7 @@ */ @Slf4j class BatchGrpcClosure { + private final CountDownLatch countDownLatch; private final List errorStatus; private final List results; @@ -112,7 +113,6 @@ public void run(Status status) { }; } - public PartitionFaultResponse getErrorResponse() { PartitionFaultResponse errorResponse; diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchScanIterator.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchScanIterator.java index 56851d8f9a..f7e02481fe 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchScanIterator.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/BatchScanIterator.java @@ -35,6 +35,7 @@ */ @NotThreadSafe public final class BatchScanIterator implements ScanIterator { + private final Supplier> batchSupplier; private final Supplier limitSupplier; private final AtomicBoolean closed = new AtomicBoolean(); diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/FusingScanIterator.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/FusingScanIterator.java index 01d5778684..4ef0286df3 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/FusingScanIterator.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/FusingScanIterator.java @@ -29,6 +29,7 @@ * 2023/2/8 */ final class FusingScanIterator implements ScanIterator { + public static final byte[] EMPTY_BYTES = new byte[0]; private long max; private long accumulator; diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GRpcServerConfig.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GRpcServerConfig.java index 2e3b74c29b..27cb69a1de 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GRpcServerConfig.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GRpcServerConfig.java @@ -30,6 +30,7 @@ */ @Component public class GRpcServerConfig extends GRpcServerBuilderConfigurer { + public final static String EXECUTOR_NAME = "hg-grpc"; @Autowired private AppConfig appConfig; diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GrpcClosure.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GrpcClosure.java index 825a57ef90..785739edde 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GrpcClosure.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GrpcClosure.java @@ -31,6 +31,7 @@ */ abstract class GrpcClosure implements RaftClosure { + private final Map leaderMap = new HashMap<>(); private V result; diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java index 86239bf3d2..4492f37b2c 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java @@ -56,12 +56,12 @@ @Slf4j @Service public class HgStoreNodeService implements RaftTaskHandler { + public static final byte BATCH_OP = 0x12; public static final byte TABLE_OP = 0x13; public static final byte GRAPH_OP = 0x14; public static final byte CLEAN_OP = 0x15; - public static final byte MAX_OP = 0x59; private final AppConfig appConfig; @Autowired @@ -222,7 +222,6 @@ public void destroy() { storeEngine.shutdown(); } - private String getSerializingExceptionMessage(String target) { return "Serializing " + getClass().getName() diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreSessionImpl.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreSessionImpl.java index aff054917a..b7766ea230 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreSessionImpl.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreSessionImpl.java @@ -65,6 +65,7 @@ @Slf4j @GRpcService public class HgStoreSessionImpl extends HgStoreSessionGrpc.HgStoreSessionImplBase { + @Autowired() private AppConfig appConfig; @Autowired @@ -458,7 +459,6 @@ public void doTable(int partId, TableReq request, RaftClosure response) { log.debug(" - ended GrpcClosure:setResult "); } - @Override public void graph(GraphReq request, StreamObserver observer) { if (log.isDebugEnabled()) { diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateService.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateService.java index 57d5165006..a8dc1c2cac 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateService.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateService.java @@ -17,7 +17,6 @@ package org.apache.hugegraph.store.node.grpc; - import org.apache.hugegraph.store.grpc.state.HgStoreStateGrpc; import org.apache.hugegraph.store.grpc.state.NodeStateRes; import org.apache.hugegraph.store.grpc.state.ScanState; diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateSubject.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateSubject.java index 5541b16f58..2a90fa5ba8 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateSubject.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateSubject.java @@ -33,10 +33,10 @@ */ @Slf4j public final class HgStoreStateSubject { + public final static Map> subObserverHolder = new ConcurrentHashMap<>(); - public static void addObserver(String subId, StreamObserver observer) { HgAssert.isArgumentValid(subId, "subId"); HgAssert.isArgumentNotNull(observer == null, "observer"); diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreWrapperEx.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreWrapperEx.java index 414eaa7de7..78355e1785 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreWrapperEx.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreWrapperEx.java @@ -59,7 +59,7 @@ public ScanIterator scan(String graph, int partId, String table, byte[] start, b int scanType, byte[] query) { ScanIterator scanIterator = - this.handler.scan(graph, partId, table, start, end, scanType, query); + this.handler.scan(graph, partId, table, start, end, scanType, query); return FilterIterator.of(scanIterator, query); } @@ -74,7 +74,7 @@ public ScanIterator scanPrefix(String graph, int partition, String table, byte[] int scanType, byte[] query) { ScanIterator scanIterator = - this.handler.scanPrefix(graph, partition, table, prefix, scanType); + this.handler.scanPrefix(graph, partition, table, prefix, scanType); return FilterIterator.of(scanIterator, query); } diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ParallelScanIterator.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ParallelScanIterator.java index e5d297b69e..430d466c0d 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ParallelScanIterator.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ParallelScanIterator.java @@ -255,6 +255,7 @@ private long getLimit() { } static class KV { + public int sn; public byte[] key; public byte[] value; diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/QueryCondition.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/QueryCondition.java index efcda92438..a52a6d49d3 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/QueryCondition.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/QueryCondition.java @@ -21,6 +21,7 @@ * 2023/2/8 */ public interface QueryCondition { + byte[] getStart(); byte[] getEnd(); diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchOneShotResponse.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchOneShotResponse.java index e862c88afe..418c810eb3 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchOneShotResponse.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchOneShotResponse.java @@ -34,7 +34,6 @@ import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; - /** * created on 2022/04/08 * @@ -101,5 +100,4 @@ public static void scanOneShot(ScanStreamBatchReq request, } - } diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse.java index caac4eeaf5..3712fbd7cc 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse.java @@ -17,7 +17,6 @@ package org.apache.hugegraph.store.node.grpc; - import static org.apache.hugegraph.store.node.grpc.ScanUtil.getParallelIterator; import java.util.List; @@ -45,6 +44,7 @@ */ @Slf4j public class ScanBatchResponse implements StreamObserver { + static ByteBufferAllocator bfAllocator = new ByteBufferAllocator(ParallelScanIterator.maxBodySize * 3 / 2, 1000); static ByteBufferAllocator alloc = @@ -71,7 +71,6 @@ public class ScanBatchResponse implements StreamObserver { private long activeTime; private volatile State state; - public ScanBatchResponse(StreamObserver response, HgStoreWrapperEx wrapper, ThreadPoolExecutor executor) { this.sender = response; @@ -132,7 +131,6 @@ public void onCompleted() { closeQuery(); } - /** * 生成迭代器 * diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse3.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse3.java index df81a7bbb4..fac1c35820 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse3.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse3.java @@ -53,6 +53,7 @@ */ @Slf4j public class ScanBatchResponse3 { + private final static long DEFAULT_PACKAGE_SIZE = 10_000; private final static int MAX_NOT_RECEIPT = 10; @@ -75,6 +76,7 @@ private enum OrderState { /*** Broker ***/ private static class Broker implements StreamObserver { + private final StreamObserver responseObserver; private final HgStoreWrapperEx wrapper; private final ThreadPoolExecutor executor; @@ -154,6 +156,7 @@ private void makeADeal(ScanQueryRequest request) { @NotThreadSafe private static class OrderManager { + OrderState state = OrderState.NEW; OrderWorker worker; OrderDeliverer deliverer; @@ -196,6 +199,7 @@ synchronized void breakdown() { } private static class OrderDeliverer { + private final StreamObserver responseObserver; private final AtomicBoolean finishFlag = new AtomicBoolean(); private final String delivererId; @@ -249,6 +253,7 @@ void error(String msg, Throwable t) { /*** Worker ***/ private static class OrderWorker { + private final ScanIterator iterator; private final OrderDeliverer deliverer; private final AtomicBoolean pauseFlag = new AtomicBoolean(); diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponseFactory.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponseFactory.java index 52b3378fc3..9c6dafc776 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponseFactory.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponseFactory.java @@ -27,6 +27,7 @@ import io.grpc.stub.StreamObserver; public class ScanBatchResponseFactory { + private final static ScanBatchResponseFactory instance = new ScanBatchResponseFactory(); private final Set streamObservers = new ConcurrentHashSet<>(); diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanOneShotResponse.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanOneShotResponse.java index 841ca09cab..ae37028a6b 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanOneShotResponse.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanOneShotResponse.java @@ -31,7 +31,6 @@ import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; - /** * created on 2022/02/17 * @@ -92,5 +91,4 @@ public static void scanOneShot(ScanStreamReq request, } - } diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQuery.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQuery.java index 27508e5e76..3894497cf7 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQuery.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQuery.java @@ -25,6 +25,7 @@ * 2022/2/28 */ class ScanQuery implements QueryCondition { + String graph; String table; ScanMethod method; diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQueryProducer.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQueryProducer.java index e5b0d32a62..204c32d9c0 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQueryProducer.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQueryProducer.java @@ -135,6 +135,7 @@ public Iterator groupedIterator() { /*---------------inner classes below--------------------*/ private class GroupedNoConditionsIterator implements Iterator { + private boolean isHasNext = true; @Override @@ -161,6 +162,7 @@ public ScanQuery[] next() { } private class GroupedConditionsIterator implements Iterator { + private final Iterator conditionIterator = ScanQueryProducer.this.conditionList.iterator(); @@ -187,6 +189,7 @@ public ScanQuery[] next() { * TODO: no testing */ private class NoConditionsIterator implements Iterator { + private String tableName; private int tableIndex; @@ -220,6 +223,7 @@ public ScanQuery next() { * TODO: no testing */ private class ConditionsIterator implements Iterator { + private final Iterator conditionIterator = ScanQueryProducer.this.conditionList.iterator(); private ScanCondition condition; diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanStreamResponse.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanStreamResponse.java index 26aaec3175..a4e7369f1d 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanStreamResponse.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanStreamResponse.java @@ -48,6 +48,7 @@ */ @Slf4j public class ScanStreamResponse implements StreamObserver { + private static final String msg = "to wait for client taking data exceeded max time: [{}] seconds,stop scanning."; private final StreamObserver responseObserver; diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanUtil.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanUtil.java index aa97842db0..0148fa0b25 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanUtil.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanUtil.java @@ -41,7 +41,6 @@ import lombok.extern.slf4j.Slf4j; - /** * created on 2022/02/22 * @@ -52,7 +51,6 @@ class ScanUtil { private final static Map tableKeyMap = new HashMap<>(); - static ScanIterator getIterator(ScanStreamReq request, HgStoreWrapperEx wrapper) { String graph = request.getHeader().getGraph(); String table = request.getTable(); @@ -259,7 +257,6 @@ private void init(ScanQueryRequest request) { this.sqIterator = this.sqs.iterator(); } - //@Override public KVPair get1() { ScanIterator iterator = null; @@ -311,6 +308,7 @@ public KVPair get() { } private class Query2Iterator implements Supplier { + ScanQuery[] queries; int index; diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/GraphStoreImpl.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/GraphStoreImpl.java index 864ba3b889..dcfc0549a8 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/GraphStoreImpl.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/GraphStoreImpl.java @@ -66,7 +66,6 @@ public ThreadPoolExecutor getExecutor() { return this.storeStream.getExecutor(); } - /** * 流式回复消息,每个消息带有seqNo * 客户端每消费一个消息,应答一个seqNo diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/ScanResponseObserver.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/ScanResponseObserver.java index 7bf56e4cdf..aee0c6eec3 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/ScanResponseObserver.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/ScanResponseObserver.java @@ -102,6 +102,10 @@ private boolean readTaskCondition() { private boolean sendCondition() { return nextSeqNo.get() - cltSeqNo.get() < MAX_PAGE; + } + + private boolean sendTaskCondition() { + return sendCondition() && (sendTask == null || sendTask.isDone()); } Runnable rr = new Runnable() { @Override public void run() { @@ -146,10 +150,6 @@ public void run() { } }; - private boolean sendTaskCondition() { - return sendCondition() && (sendTask == null || sendTask.isDone()); - } - private void offer(Iterable data, boolean isVertex) { ScanResponse.Builder builder = ScanResponse.newBuilder(); builder.setHeader(okHeader).setSeqNo(nextSeqNo.get()); @@ -172,34 +172,7 @@ private void startRead() { readLock.unlock(); } } - } Runnable sr = () -> { - while (sendCondition()) { - ScanResponse response; - try { - if (readOver.get()) { - if ((response = packages.poll()) == null) { - sender.onCompleted(); - } else { - sender.onNext(response); - nextSeqNo.incrementAndGet(); - } - } else { - response = packages.poll(10, - TimeUnit.MILLISECONDS); - if (response != null) { - sender.onNext(response); - nextSeqNo.incrementAndGet(); - startRead(); - } else { - break; - } - } - - } catch (InterruptedException e) { - break; - } - } - }; + } private void startSend() { if (sendTaskCondition()) { @@ -230,7 +203,34 @@ public void onNext(ScanPartitionRequest scanReq) { cltSeqNo.getAndIncrement(); startSend(); } - } + } Runnable sr = () -> { + while (sendCondition()) { + ScanResponse response; + try { + if (readOver.get()) { + if ((response = packages.poll()) == null) { + sender.onCompleted(); + } else { + sender.onNext(response); + nextSeqNo.incrementAndGet(); + } + } else { + response = packages.poll(10, + TimeUnit.MILLISECONDS); + if (response != null) { + sender.onNext(response); + nextSeqNo.incrementAndGet(); + startRead(); + } else { + break; + } + } + + } catch (InterruptedException e) { + break; + } + } + }; @Override public void onError(Throwable t) { @@ -263,5 +263,4 @@ private void close() { - } diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/GRpcExMetrics.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/GRpcExMetrics.java index cb5619a8c1..828b324b8c 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/GRpcExMetrics.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/GRpcExMetrics.java @@ -29,6 +29,7 @@ * 2022/3/8 */ public class GRpcExMetrics { + public final static String PREFIX = "grpc"; private final static ExecutorWrapper wrapper = new ExecutorWrapper(); private static MeterRegistry registry; @@ -66,6 +67,7 @@ private static void registerExecutor() { } private static class ExecutorWrapper { + ThreadPoolExecutor pool; void init() { diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/JRaftMetrics.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/JRaftMetrics.java index 10d5b14791..a9b53de2db 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/JRaftMetrics.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/JRaftMetrics.java @@ -45,6 +45,7 @@ */ @Slf4j public class JRaftMetrics { + public final static String PREFIX = "jraft"; public static final String LABELS = "quantile"; public static final String LABEL_50 = "0.5"; @@ -292,6 +293,7 @@ private static void registerGauge(String group, String name, } private static class HistogramWrapper { + private final com.codahale.metrics.Histogram histogram; private Snapshot snapshot; diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsEntry.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsEntry.java index ed32d88df1..ef39fd7720 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsEntry.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsEntry.java @@ -16,7 +16,6 @@ */ package org.apache.hugegraph.store.node.metrics; - import static org.apache.hugegraph.store.node.metrics.ProcfsReader.ReadResult; import java.io.IOException; diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsMetrics.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsMetrics.java index 6a3f97f27f..64064158ca 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsMetrics.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsMetrics.java @@ -17,7 +17,6 @@ package org.apache.hugegraph.store.node.metrics; - import io.micrometer.core.instrument.Gauge; import io.micrometer.core.instrument.MeterRegistry; diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetrics.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetrics.java index 5575fc3b9a..a18048aaef 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetrics.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetrics.java @@ -58,6 +58,7 @@ */ @Slf4j public class RocksDBMetrics { + private final static RocksDBFactory rocksDBFactory = RocksDBFactory.getInstance(); private final static AtomicInteger rocks = new AtomicInteger(0); private final static Set graphSet = new HashSet<>(); @@ -111,7 +112,6 @@ private static RocksDBSession getRocksDBSession(String graph) { return rocksDBFactory.queryGraphDB(graph); } - private static synchronized void registerMeter() { Set graphs = getGraphs(); @@ -264,6 +264,7 @@ public int getRefCount() { } private static class MemoryUseWrapper { + Map mems = null; long lastTime = 0; @@ -358,6 +359,7 @@ public HistogramData getHistogramData(HistogramType histogramType) { } private static class HistogramDataWrapper { + private final Supplier supplier; private final HistogramType histogramType; private HistogramData data = new HistogramData(0d, 0d, 0d, 0d, 0d); diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetricsConst.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetricsConst.java index 79c009a700..92df91861e 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetricsConst.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetricsConst.java @@ -24,6 +24,7 @@ * TODO: refer license later, 80% match, maybe refer to pantheon, This file need refactor! */ public final class RocksDBMetricsConst { + public static final String PREFIX = "rocks.stats"; public static final String LABELS = "quantile"; public static final String LABEL_50 = "0.5"; diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/StoreMetrics.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/StoreMetrics.java index 9fcdbc685e..d2f022e00a 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/StoreMetrics.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/StoreMetrics.java @@ -17,7 +17,6 @@ package org.apache.hugegraph.store.node.metrics; - import java.util.Collections; import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; @@ -33,6 +32,7 @@ * 2021/12/28 */ public final class StoreMetrics { + public final static String PREFIX = "hg"; private final static HgStoreEngine storeEngine = HgStoreEngine.getInstance(); private final static AtomicInteger graphs = new AtomicInteger(0); @@ -98,6 +98,7 @@ private static Map> getGraphPartitions() { } private static class PartitionsGetter implements Supplier { + private final String graph; PartitionsGetter(String graph) { diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/model/HgNodeStatus.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/model/HgNodeStatus.java index 6f70e2d48d..b971e117d0 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/model/HgNodeStatus.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/model/HgNodeStatus.java @@ -23,6 +23,7 @@ * created on 2021/11/1 */ public class HgNodeStatus { + private int status; private String text; diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Base58.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Base58.java index 12382e1358..dff6a0406e 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Base58.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Base58.java @@ -167,6 +167,5 @@ private static byte[] copyOfRange(byte[] source, int from, int to) { return range; } - } diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Err.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Err.java index c36b658de5..9f156d69a0 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Err.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Err.java @@ -17,8 +17,8 @@ package org.apache.hugegraph.store.node.util; - class Err { + private final String msg; private Err(String msg) { diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgAssert.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgAssert.java index a6dbff59fc..7df843a956 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgAssert.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgAssert.java @@ -21,8 +21,8 @@ import java.util.Map; import java.util.function.Supplier; - public final class HgAssert { + @Deprecated public static void isTrue(boolean expression, String message) { if (message == null) { diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgBufferProxy.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgBufferProxy.java index 580989a7ed..1d133182b1 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgBufferProxy.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgBufferProxy.java @@ -37,6 +37,7 @@ */ @Slf4j public final class HgBufferProxy { + private final BlockingQueue> queue; private final AtomicBoolean closed = new AtomicBoolean(false); private final ReentrantLock lock = new ReentrantLock(); diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgChannel.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgChannel.java index 722ff90ae8..02feb24011 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgChannel.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgChannel.java @@ -81,7 +81,6 @@ public boolean send(T t) { } } - /** * return an item from the chan * diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgGrpc.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgGrpc.java index adbc96e1e4..fb824c56aa 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgGrpc.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgGrpc.java @@ -32,8 +32,8 @@ import io.grpc.StatusRuntimeException; import jline.internal.Log; - public abstract class HgGrpc { + private static final ResCode OK = ResCode.RES_CODE_OK; public static ResStatus not() { diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgRegexUtil.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgRegexUtil.java index 1336eaf8d1..905e9b5d0c 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgRegexUtil.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgRegexUtil.java @@ -80,7 +80,6 @@ public static List getMatchList(String regex, String source) { return list.isEmpty() ? null : list; } - public static void main(String[] args) { List res = toGroupValues("(replicator)(.+?:\\d+)(.*)", "replicator_10.14.139.10:8081_append_entries_times"); diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgStoreNodeUtil.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgStoreNodeUtil.java index 2716704bcd..a3eb7fd99c 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgStoreNodeUtil.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgStoreNodeUtil.java @@ -28,6 +28,7 @@ */ @Slf4j public final class HgStoreNodeUtil { + public static String toStr(byte[] b) { if (b == null) { return ""; diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/PropertyUtil.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/PropertyUtil.java index 425a9828ee..07a33d03ca 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/PropertyUtil.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/PropertyUtil.java @@ -24,6 +24,7 @@ import org.slf4j.LoggerFactory; public class PropertyUtil { + private static final Logger LOG = LoggerFactory.getLogger(PropertyUtil.class); public static String get(String key) { @@ -57,7 +58,6 @@ public static String get(final String key, String def) { return value; } - public static boolean getBoolean(String key, boolean def) { String value = get(key, Boolean.toString(def)); value = value.trim().toLowerCase(); diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Result.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Result.java index 5087deb2cc..3c46cf55a0 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Result.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Result.java @@ -17,8 +17,8 @@ package org.apache.hugegraph.store.node.util; - public class Result { + private Err err; private T t; diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/TkEntry.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/TkEntry.java index d089ed61f3..6d75217d64 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/TkEntry.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/TkEntry.java @@ -24,6 +24,7 @@ * Table Key pair. */ public class TkEntry { + private final String table; private final byte[] key; diff --git a/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer01.java b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer01.java index ba11facfb8..13efed28e3 100644 --- a/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer01.java +++ b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/boot/StoreNodeServer01.java @@ -43,7 +43,6 @@ public static void main(String[] args) { Integer.toString(8 * 1024 * 1024)); } - SpringApplication.run(StoreNodeApplication.class, "--spring.profiles.active=server01"); System.out.println("StoreNodeServer01 started."); } diff --git a/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/node/metrics/JraftMetricsTest.java b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/node/metrics/JraftMetricsTest.java index f41e0ff4af..ec44123a33 100644 --- a/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/node/metrics/JraftMetricsTest.java +++ b/hugegraph-store/hg-store-node/src/test/java/org/apache/hugegraph/store/node/metrics/JraftMetricsTest.java @@ -23,7 +23,6 @@ import org.apache.hugegraph.store.node.util.HgRegexUtil; import org.junit.Test; - /** * 2022/3/7 */ @@ -42,7 +41,6 @@ public void testRefineLabel() { "replicasdf14-13dasfies-times", }; - Arrays.stream(sources).forEach(e -> { System.out.println("--- " + e + " ---"); List list = HgRegexUtil.toGroupValues(regex, e); From 250b83fd6e47af8aa95828db8b7e9bcf35591bae Mon Sep 17 00:00:00 2001 From: VGalaxies Date: Sun, 9 Jun 2024 16:50:20 +0800 Subject: [PATCH 5/6] reformat ScanResponseObserver.java --- .../node/grpc/scan/ScanResponseObserver.java | 145 +++++++++--------- 1 file changed, 72 insertions(+), 73 deletions(-) diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/ScanResponseObserver.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/ScanResponseObserver.java index aee0c6eec3..b5b49d0398 100644 --- a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/ScanResponseObserver.java +++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/ScanResponseObserver.java @@ -106,49 +106,7 @@ private boolean sendCondition() { private boolean sendTaskCondition() { return sendCondition() && (sendTask == null || sendTask.isDone()); - } Runnable rr = new Runnable() { - @Override - public void run() { - try { - if (readCondition()) { - synchronized (iter) { - while (readCondition()) { - Request r = scanReq.getScanRequest(); - ScanType t = r.getScanType(); - boolean isVertex = t.equals(ScanType.SCAN_VERTEX); - ArrayList data = new ArrayList<>(BATCH_SIZE); - int count = 0; - while (iter.hasNext() && leftCount > -1) { - count++; - leftCount--; - T next = (T) iter.next(); - data.add(next); - if (count >= BATCH_SIZE) { - offer(data, isVertex); - // data.clear(); - break; - } - } - if (!(iter.hasNext() && leftCount > -1)) { - if (data.size() > 0 && - data.size() < BATCH_SIZE) { - offer(data, isVertex); - } - readOver.set(true); - data = null; - //log.warn("scan complete , count: {},time: {}", - // sum, System.currentTimeMillis()); - return; - } - } - } - } - } catch (Exception e) { - log.warn("read data with error: ", e); - sender.onError(e); - } - } - }; + } private void offer(Iterable data, boolean isVertex) { ScanResponse.Builder builder = ScanResponse.newBuilder(); @@ -203,34 +161,7 @@ public void onNext(ScanPartitionRequest scanReq) { cltSeqNo.getAndIncrement(); startSend(); } - } Runnable sr = () -> { - while (sendCondition()) { - ScanResponse response; - try { - if (readOver.get()) { - if ((response = packages.poll()) == null) { - sender.onCompleted(); - } else { - sender.onNext(response); - nextSeqNo.incrementAndGet(); - } - } else { - response = packages.poll(10, - TimeUnit.MILLISECONDS); - if (response != null) { - sender.onNext(response); - nextSeqNo.incrementAndGet(); - startRead(); - } else { - break; - } - } - - } catch (InterruptedException e) { - break; - } - } - }; + } @Override public void onError(Throwable t) { @@ -259,8 +190,76 @@ private void close() { } } + Runnable rr = new Runnable() { + @Override + public void run() { + try { + if (readCondition()) { + synchronized (iter) { + while (readCondition()) { + Request r = scanReq.getScanRequest(); + ScanType t = r.getScanType(); + boolean isVertex = t.equals(ScanType.SCAN_VERTEX); + ArrayList data = new ArrayList<>(BATCH_SIZE); + int count = 0; + while (iter.hasNext() && leftCount > -1) { + count++; + leftCount--; + T next = (T) iter.next(); + data.add(next); + if (count >= BATCH_SIZE) { + offer(data, isVertex); + // data.clear(); + break; + } + } + if (!(iter.hasNext() && leftCount > -1)) { + if (data.size() > 0 && + data.size() < BATCH_SIZE) { + offer(data, isVertex); + } + readOver.set(true); + data = null; + //log.warn("scan complete , count: {},time: {}", + // sum, System.currentTimeMillis()); + return; + } + } + } + } + } catch (Exception e) { + log.warn("read data with error: ", e); + sender.onError(e); + } + } + }; + Runnable sr = () -> { + while (sendCondition()) { + ScanResponse response; + try { + if (readOver.get()) { + if ((response = packages.poll()) == null) { + sender.onCompleted(); + } else { + sender.onNext(response); + nextSeqNo.incrementAndGet(); + } + } else { + response = packages.poll(10, + TimeUnit.MILLISECONDS); + if (response != null) { + sender.onNext(response); + nextSeqNo.incrementAndGet(); + startRead(); + } else { + break; + } + } - - + } catch (InterruptedException e) { + break; + } + } + }; } From 44fc4f8b3bc35cf593b4e21c0214ebabb60d6bfb Mon Sep 17 00:00:00 2001 From: imbajin Date: Mon, 10 Jun 2024 23:47:44 +0800 Subject: [PATCH 6/6] Apply suggestions from code review --- hugegraph-store/hg-store-node/banner.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hugegraph-store/hg-store-node/banner.txt b/hugegraph-store/hg-store-node/banner.txt index 13f0501ce9..0fdffc4d11 100644 --- a/hugegraph-store/hg-store-node/banner.txt +++ b/hugegraph-store/hg-store-node/banner.txt @@ -2,4 +2,4 @@ | | | |/ ___| / ___|_ _/ _ \| _ \| ____| | \ | |/ _ \| _ \| ____| | |_| | | _ ____\___ \ | || | | | |_) | _| _____| \| | | | | | | | _| | _ | |_| |_____|__) || || |_| | _ <| |__|_____| |\ | |_| | |_| | |___ - |_| |_|\____| |____/ |_| \___/|_| \_\_____| |_| \_|\___/|____/|_____| \ No newline at end of file + |_| |_|\____| |____/ |_| \___/|_| \_\_____| |_| \_|\___/|____/|_____|