T next() {
+ return null;
+ }
+
+ @Override
+ public long count() {
+ return 0;
+ }
+
+ @Override
+ public byte[] position() {
+ return new byte[0];
+ }
+
+ @Override
+ public void close() {
+
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/FusingScanIterator.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/FusingScanIterator.java
new file mode 100644
index 0000000000..4ef0286df3
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/FusingScanIterator.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import java.util.NoSuchElementException;
+import java.util.function.Supplier;
+
+import org.apache.hugegraph.rocksdb.access.ScanIterator;
+
+/**
+ * This is a wrapper of the ScanIterator that provides a mechanism
+ * to set a threshold value in order to abort the iterating operation.
+ *
+ * 2023/2/8
+ */
+final class FusingScanIterator implements ScanIterator {
+
+ public static final byte[] EMPTY_BYTES = new byte[0];
+ private long max;
+ private long accumulator;
+ private Supplier supplier;
+ private ScanIterator iterator;
+ private byte[] position = EMPTY_BYTES;
+
+ private FusingScanIterator() {
+ }
+
+ public static FusingScanIterator maxOf(long maxThreshold,
+ Supplier iteratorSupplier) {
+ FusingScanIterator res = new FusingScanIterator();
+ res.max = maxThreshold;
+ res.supplier = iteratorSupplier;
+ return res;
+ }
+
+ private ScanIterator getIterator() {
+ ScanIterator buf = this.supplier.get();
+ if (buf == null) {
+ return null;
+ }
+ if (!buf.hasNext()) {
+ buf = null;
+ }
+ return buf;
+ }
+
+ private void init() {
+ if (this.iterator == null) {
+ this.iterator = this.getIterator();
+ }
+ }
+
+ @Override
+ public boolean hasNext() {
+ if (this.isThresholdExceeded()) {
+ return false;
+ }
+ if (this.iterator == null) {
+ this.iterator = this.getIterator();
+ }
+ return this.iterator != null;
+ }
+
+ @Override
+ public boolean isValid() {
+ return hasNext();
+ }
+
+ @Override
+ public byte[] position() {
+ return this.position;
+ }
+
+ /**
+ * @return true, when the threshold is exceeded.
+ */
+ private boolean isThresholdExceeded() {
+ return this.accumulator >= this.max;
+ }
+
+ @Override
+ public T next() {
+ if (this.isThresholdExceeded()) {
+ throw new NoSuchElementException();
+ }
+ this.init();
+ if (this.iterator == null) {
+ throw new NoSuchElementException();
+ }
+ T t = this.iterator.next();
+ position = this.iterator.position();
+ this.accumulator++;
+ if (!this.iterator.hasNext() || this.isThresholdExceeded()) {
+ this.iterator.close();
+ this.iterator = null;
+ }
+ return t;
+ }
+
+ @Override
+ public void close() {
+ if (this.iterator != null) {
+ this.iterator.close();
+ }
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GRpcServerConfig.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GRpcServerConfig.java
new file mode 100644
index 0000000000..27cb69a1de
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GRpcServerConfig.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import org.apache.hugegraph.store.node.AppConfig;
+import org.apache.hugegraph.store.node.util.HgExecutorUtil;
+import org.lognet.springboot.grpc.GRpcServerBuilderConfigurer;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Component;
+
+import io.grpc.ServerBuilder;
+
+/**
+ * 2022/3/4
+ */
+@Component
+public class GRpcServerConfig extends GRpcServerBuilderConfigurer {
+
+ public final static String EXECUTOR_NAME = "hg-grpc";
+ @Autowired
+ private AppConfig appConfig;
+
+ @Override
+ public void configure(ServerBuilder> serverBuilder) {
+ AppConfig.ThreadPoolGrpc grpc = appConfig.getThreadPoolGrpc();
+ serverBuilder.executor(
+ HgExecutorUtil.createExecutor(EXECUTOR_NAME, grpc.getCore(), grpc.getMax(),
+ grpc.getQueue())
+ );
+ }
+
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GrpcClosure.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GrpcClosure.java
new file mode 100644
index 0000000000..785739edde
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/GrpcClosure.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hugegraph.store.grpc.session.FeedbackRes;
+import org.apache.hugegraph.store.raft.RaftClosure;
+
+import io.grpc.stub.StreamObserver;
+
+/**
+ * 2022/1/27
+ */
+
+abstract class GrpcClosure implements RaftClosure {
+
+ private final Map leaderMap = new HashMap<>();
+ private V result;
+
+ /**
+ * 设置输出结果给raftClosure,对于Follower来说,raftClosure为空
+ */
+ public static void setResult(RaftClosure raftClosure, V result) {
+ GrpcClosure closure = (GrpcClosure) raftClosure;
+ if (closure != null) {
+ closure.setResult(result);
+ }
+ }
+
+ public static RaftClosure newRaftClosure(StreamObserver observer) {
+ BatchGrpcClosure wrap = new BatchGrpcClosure<>(0);
+ return wrap.newRaftClosure(s -> {
+ wrap.waitFinish(observer, r -> {
+ return (V) wrap.selectError((List) r);
+ }, 0);
+ });
+ }
+
+ public V getResult() {
+ return result;
+ }
+
+ public void setResult(V result) {
+ this.result = result;
+ }
+
+ public Map getLeaderMap() {
+ return leaderMap;
+ }
+
+ @Override
+ public void onLeaderChanged(Integer partId, Long storeId) {
+ leaderMap.put(partId, storeId);
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java
new file mode 100644
index 0000000000..4492f37b2c
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import static org.apache.hugegraph.store.grpc.common.GraphMethod.GRAPH_METHOD_DELETE;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import javax.annotation.PostConstruct;
+import javax.annotation.PreDestroy;
+
+import org.apache.hugegraph.store.HgStoreEngine;
+import org.apache.hugegraph.store.business.DefaultDataMover;
+import org.apache.hugegraph.store.grpc.session.BatchReq;
+import org.apache.hugegraph.store.grpc.session.CleanReq;
+import org.apache.hugegraph.store.grpc.session.GraphReq;
+import org.apache.hugegraph.store.grpc.session.TableReq;
+import org.apache.hugegraph.store.node.AppConfig;
+import org.apache.hugegraph.store.options.HgStoreEngineOptions;
+import org.apache.hugegraph.store.options.RaftRocksdbOptions;
+import org.apache.hugegraph.store.raft.RaftClosure;
+import org.apache.hugegraph.store.raft.RaftOperation;
+import org.apache.hugegraph.store.raft.RaftTaskHandler;
+import org.apache.hugegraph.store.util.HgRaftError;
+import org.apache.hugegraph.store.util.HgStoreException;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Service;
+
+import com.alipay.sofa.jraft.Status;
+import com.alipay.sofa.jraft.core.NodeMetrics;
+import com.google.protobuf.CodedInputStream;
+import com.google.protobuf.CodedOutputStream;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * @projectName: raft task executor
+ */
+@Slf4j
+@Service
+public class HgStoreNodeService implements RaftTaskHandler {
+
+ public static final byte BATCH_OP = 0x12;
+ public static final byte TABLE_OP = 0x13;
+ public static final byte GRAPH_OP = 0x14;
+ public static final byte CLEAN_OP = 0x15;
+
+ public static final byte MAX_OP = 0x59;
+ private final AppConfig appConfig;
+ @Autowired
+ HgStoreSessionImpl hgStoreSession;
+ private HgStoreEngine storeEngine;
+
+ public HgStoreNodeService(@Autowired AppConfig appConfig) {
+ this.appConfig = appConfig;
+ }
+
+ public HgStoreEngine getStoreEngine() {
+ return this.storeEngine;
+ }
+
+ @PostConstruct
+ public void init() {
+ log.info("{}", appConfig.toString());
+ HgStoreEngineOptions options = new HgStoreEngineOptions() {{
+ setRaftAddress(appConfig.getRaft().getAddress());
+ setDataPath(appConfig.getDataPath());
+ setRaftPath(appConfig.getRaftPath());
+ setPdAddress(appConfig.getPdServerAddress());
+ setFakePD(appConfig.isFakePd());
+ setRocksdbConfig(appConfig.getRocksdbConfig());
+ setGrpcAddress(appConfig.getStoreServerAddress());
+ setLabels(appConfig.getLabelConfig().getLabel());
+ setRaftOptions(new RaftOptions() {{
+ setMetrics(appConfig.getRaft().isMetrics());
+ setRpcDefaultTimeout(appConfig.getRaft().getRpcTimeOut());
+ setSnapshotLogIndexMargin(appConfig.getRaft().getSnapshotLogIndexMargin());
+ setSnapshotIntervalSecs(appConfig.getRaft().getSnapshotInterval());
+ setDisruptorBufferSize(appConfig.getRaft().getDisruptorBufferSize());
+ setMaxLogSize(appConfig.getRaft().getMaxLogFileSize());
+ setAveLogEntrySizeRatio(appConfig.getRaft().getAveLogEntrySizeRation());
+ setUseRocksDBSegmentLogStorage(appConfig.getRaft()
+ .isUseRocksDBSegmentLogStorage());
+ setMaxSegmentFileSize(appConfig.getRaft().getMaxSegmentFileSize());
+ setMaxReplicatorInflightMsgs(appConfig.getRaft().getMaxReplicatorInflightMsgs());
+ }});
+ setFakePdOptions(new FakePdOptions() {{
+ setStoreList(appConfig.getFakePdConfig().getStoreList());
+ setPeersList(appConfig.getFakePdConfig().getPeersList());
+ setPartitionCount(appConfig.getFakePdConfig().getPartitionCount());
+ setShardCount(appConfig.getFakePdConfig().getShardCount());
+ }});
+ }};
+
+ RaftRocksdbOptions.initRocksdbGlobalConfig(options.getRocksdbConfig());
+
+ options.getLabels().put("rest.port", Integer.toString(appConfig.getRestPort()));
+ log.info("HgStoreEngine init {}", options);
+ options.setTaskHandler(this);
+ options.setDataTransfer(new DefaultDataMover());
+ storeEngine = HgStoreEngine.getInstance();
+ storeEngine.init(options);
+
+ }
+
+ public List getGraphLeaderPartitionIds(String graphName) {
+ return storeEngine.getPartitionManager().getLeaderPartitionIds(graphName);
+ }
+
+ /**
+ * 添加raft 任务,转发数据给raft
+ *
+ * @return true 表示数据已被提交,false表示未提交,用于单副本入库减少批次拆分
+ */
+ public
+ void addRaftTask(byte methodId, String graphName, Integer partitionId, Req req,
+ RaftClosure closure) {
+ if (!storeEngine.isClusterReady()) {
+ closure.run(new Status(HgRaftError.CLUSTER_NOT_READY.getNumber(),
+ "The cluster is not ready, please check active stores number!"));
+ log.error("The cluster is not ready, please check active stores number!");
+ return;
+ }
+ //
+ try {
+ // 序列化,
+ final byte[] buffer = new byte[req.getSerializedSize() + 1];
+ final CodedOutputStream output = CodedOutputStream.newInstance(buffer);
+ output.write(methodId);
+ req.writeTo(output);
+ output.checkNoSpaceLeft();
+ output.flush();
+ // 传送给raft
+ storeEngine.addRaftTask(graphName, partitionId,
+ RaftOperation.create(methodId, buffer, req), closure);
+
+ } catch (Exception e) {
+ closure.run(new Status(HgRaftError.UNKNOWN.getNumber(), e.getMessage()));
+ log.error("addRaftTask {}", e);
+ }
+
+ }
+
+ /**
+ * 来自日志的任务,一般是follower 或者 日志回滚的任务
+ */
+ @Override
+ public boolean invoke(int partId, byte[] request, RaftClosure response) throws
+ HgStoreException {
+ try {
+ CodedInputStream input = CodedInputStream.newInstance(request);
+ byte methodId = input.readRawByte();
+ switch (methodId) {
+ case HgStoreNodeService.BATCH_OP:
+ invoke(partId, methodId, BatchReq.parseFrom(input), response);
+ break;
+ case HgStoreNodeService.TABLE_OP:
+ invoke(partId, methodId, TableReq.parseFrom(input), response);
+ break;
+ case HgStoreNodeService.GRAPH_OP:
+ invoke(partId, methodId, GraphReq.parseFrom(input), response);
+ break;
+ case HgStoreNodeService.CLEAN_OP:
+ invoke(partId, methodId, CleanReq.parseFrom(input), response);
+ break;
+ default:
+ return false; // 未处理
+ }
+ } catch (IOException e) {
+ throw new HgStoreException(e.getMessage(), e);
+ }
+ return true;
+ }
+
+ /**
+ * 处理raft传送过来的数据
+ */
+ @Override
+ public boolean invoke(int partId, byte methodId, Object req, RaftClosure response) throws
+ HgStoreException {
+ switch (methodId) {
+ case HgStoreNodeService.BATCH_OP:
+ hgStoreSession.doBatch(partId, (BatchReq) req, response);
+ break;
+ case HgStoreNodeService.TABLE_OP:
+ hgStoreSession.doTable(partId, (TableReq) req, response);
+ break;
+ case HgStoreNodeService.GRAPH_OP:
+ if (((GraphReq) req).getMethod() == GRAPH_METHOD_DELETE) {
+ storeEngine.deletePartition(partId, ((GraphReq) req).getGraphName());
+ }
+ hgStoreSession.doGraph(partId, (GraphReq) req, response);
+ break;
+ case HgStoreNodeService.CLEAN_OP:
+ hgStoreSession.doClean(partId, (CleanReq) req, response);
+ break;
+ default:
+ return false; // 未处理
+ }
+ return true;
+ }
+
+ @PreDestroy
+ public void destroy() {
+ storeEngine.shutdown();
+ }
+
+ private String getSerializingExceptionMessage(String target) {
+ return "Serializing "
+ + getClass().getName()
+ + " to a "
+ + target
+ + " threw an IOException (should never happen).";
+ }
+
+ public Map getNodeMetrics() {
+ return storeEngine.getNodeMetrics();
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeState.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeState.java
new file mode 100644
index 0000000000..aecb176878
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeState.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.store.grpc.state.NodeStateType;
+
+/**
+ * created on 2021/11/3
+ */
+@ThreadSafe
+public final class HgStoreNodeState {
+
+ private static NodeStateType curState = NodeStateType.STARTING;
+
+ public static NodeStateType getState() {
+ return curState;
+ }
+
+ private static void setState(NodeStateType state) {
+ curState = state;
+ change();
+ }
+
+ private static void change() {
+ HgStoreStateSubject.notifyAll(curState);
+ }
+
+ public static void goOnline() {
+ setState(NodeStateType.ONLINE);
+ }
+
+ public static void goStarting() {
+ setState(NodeStateType.STARTING);
+ }
+
+ public static void goStopping() {
+ setState(NodeStateType.STOPPING);
+ }
+
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreSessionImpl.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreSessionImpl.java
new file mode 100644
index 0000000000..b7766ea230
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreSessionImpl.java
@@ -0,0 +1,551 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.Metapb.GraphMode;
+import org.apache.hugegraph.rocksdb.access.ScanIterator;
+import org.apache.hugegraph.store.business.BusinessHandler;
+import org.apache.hugegraph.store.grpc.common.Key;
+import org.apache.hugegraph.store.grpc.common.Kv;
+import org.apache.hugegraph.store.grpc.common.ResCode;
+import org.apache.hugegraph.store.grpc.common.ResStatus;
+import org.apache.hugegraph.store.grpc.session.Agg;
+import org.apache.hugegraph.store.grpc.session.BatchEntry;
+import org.apache.hugegraph.store.grpc.session.BatchGetReq;
+import org.apache.hugegraph.store.grpc.session.BatchReq;
+import org.apache.hugegraph.store.grpc.session.BatchWriteReq;
+import org.apache.hugegraph.store.grpc.session.CleanReq;
+import org.apache.hugegraph.store.grpc.session.FeedbackRes;
+import org.apache.hugegraph.store.grpc.session.GetReq;
+import org.apache.hugegraph.store.grpc.session.GraphReq;
+import org.apache.hugegraph.store.grpc.session.HgStoreSessionGrpc;
+import org.apache.hugegraph.store.grpc.session.KeyValueResponse;
+import org.apache.hugegraph.store.grpc.session.TableReq;
+import org.apache.hugegraph.store.grpc.session.ValueResponse;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamReq;
+import org.apache.hugegraph.store.meta.Graph;
+import org.apache.hugegraph.store.meta.GraphManager;
+import org.apache.hugegraph.store.node.AppConfig;
+import org.apache.hugegraph.store.node.util.HgGrpc;
+import org.apache.hugegraph.store.node.util.HgStoreNodeUtil;
+import org.apache.hugegraph.store.pd.PdProvider;
+import org.apache.hugegraph.store.raft.RaftClosure;
+import org.apache.hugegraph.store.util.HgStoreConst;
+import org.lognet.springboot.grpc.GRpcService;
+import org.springframework.beans.factory.annotation.Autowired;
+
+import com.google.protobuf.ByteString;
+
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+@GRpcService
+public class HgStoreSessionImpl extends HgStoreSessionGrpc.HgStoreSessionImplBase {
+
+ @Autowired()
+ private AppConfig appConfig;
+ @Autowired
+ private HgStoreNodeService storeService;
+ private HgStoreWrapperEx wrapper;
+ private PdProvider pdProvider;
+
+ private HgStoreWrapperEx getWrapper() {
+ if (this.wrapper == null) {
+ synchronized (this) {
+ if (this.wrapper == null) {
+ this.wrapper = new HgStoreWrapperEx(
+ storeService.getStoreEngine().getBusinessHandler());
+ }
+ }
+ }
+ return this.wrapper;
+ }
+
+ private PdProvider getPD() {
+ if (pdProvider == null) {
+ synchronized (this) {
+ if (pdProvider == null) {
+ pdProvider = storeService.getStoreEngine().getPdProvider();
+ }
+ }
+ }
+ return pdProvider;
+ }
+
+ @Override
+ public void get2(GetReq request, StreamObserver responseObserver) {
+ String graph = request.getHeader().getGraph();
+ String table = request.getTk().getTable();
+ byte[] key = request.getTk().getKey().toByteArray();
+ int code = request.getTk().getCode();
+ byte[] value = getWrapper().doGet(graph, code, table, key);
+
+ FeedbackRes.Builder builder = FeedbackRes.newBuilder();
+
+ FeedbackRes res = null;
+ if (value != null) {
+ res = builder.setStatus(HgGrpc.success())
+ .setValueResponse(
+ ValueResponse.newBuilder()
+ .setValue(ByteString.copyFrom(value))
+ ).build();
+
+ } else {
+ res = builder.setStatus(HgGrpc.success())
+ .setStatus(HgGrpc.not())
+ .build();
+ }
+
+ responseObserver.onNext(res);
+ responseObserver.onCompleted();
+ }
+
+ @Override
+ public void clean(CleanReq request,
+ StreamObserver responseObserver) {
+
+ String graph = request.getHeader().getGraph();
+ int partition = request.getPartition();
+ // 发给不同的raft执行
+ BatchGrpcClosure closure = new BatchGrpcClosure<>(1);
+ storeService.addRaftTask(HgStoreNodeService.CLEAN_OP, graph, partition,
+ request,
+ closure.newRaftClosure());
+ // 等待返回结果
+ closure.waitFinish(responseObserver, r -> closure.selectError(r),
+ appConfig.getRaft().getRpcTimeOut());
+ }
+
+ public void doClean(int partId, CleanReq request, RaftClosure response) {
+ String graph = request.getHeader().getGraph();
+ FeedbackRes.Builder builder = FeedbackRes.newBuilder();
+ try {
+ if (getWrapper().doClean(graph, partId)) {
+ builder.setStatus(HgGrpc.success());
+ } else {
+ builder.setStatus(HgGrpc.not());
+ }
+ } catch (Throwable t) {
+ String msg = "Failed to doClean, graph: " + graph + "; partitionId = " + partId;
+ log.error(msg, t);
+ builder.setStatus(HgGrpc.fail(msg));
+ }
+ GrpcClosure.setResult(response, builder.build());
+ }
+
+ @Override
+ public void batchGet2(BatchGetReq request, StreamObserver responseObserver) {
+ String graph = request.getHeader().getGraph();
+ String table = request.getTable();
+ FeedbackRes.Builder builder = FeedbackRes.newBuilder();
+
+ List keyList = request.getKeyList();
+ if (keyList == null || keyList.isEmpty()) {
+ builder.setStatus(HgGrpc.fail("keys is empty"));
+ responseObserver.onNext(builder.build());
+ responseObserver.onCompleted();
+ return;
+ }
+
+ KeyValueResponse.Builder keyValueBuilder = KeyValueResponse.newBuilder();
+
+ int max = keyList.size() - 1;
+ AtomicInteger count = new AtomicInteger(-1);
+ Kv.Builder kvBuilder = Kv.newBuilder();
+ getWrapper().batchGet(graph, table,
+ () -> {
+ if (count.getAndAdd(1) == max) {
+ return null;
+ }
+
+ Key key = keyList.get(count.get());
+ if (log.isDebugEnabled()) {
+ log.debug("batch-get: " +
+ HgStoreNodeUtil.toStr(
+ key.getKey()
+ .toByteArray()));
+ }
+ return HgGrpc.toHgPair(key);
+ },
+ (
+ pair -> {
+ if (pair.getValue() == null || pair.getKey() == null) {
+ return;
+ }
+ keyValueBuilder.addKv(HgGrpc.toKv(pair, kvBuilder));
+ }
+ )
+
+ );
+
+ builder.setKeyValueResponse(keyValueBuilder.build());
+ responseObserver.onNext(builder.build());
+ responseObserver.onCompleted();
+
+ }
+
+ @Override
+ public void batch(BatchReq request, StreamObserver observer) {
+ String graph = request.getHeader().getGraph();
+ List list = request.getWriteReq().getEntryList();
+ PdProvider pd = getPD();
+ try {
+ GraphManager graphManager = pd.getGraphManager();
+ Graph managerGraph = graphManager.getGraph(graph);
+ if (managerGraph != null && graph.endsWith("/g")) {
+ Metapb.Graph g = managerGraph.getProtoObj();
+ if (g == null || g.getGraphState() == null) {
+ g = pd.getPDClient().getGraphWithOutException(graph);
+ managerGraph.setGraph(g);
+ }
+ if (g != null) {
+ Metapb.GraphState graphState = g.getGraphState();
+ if (graphState != null) {
+ GraphMode graphMode = graphState.getMode();
+ if (graphMode != null &&
+ graphMode.getNumber() == GraphMode.ReadOnly_VALUE) {
+ // 状态为只读时从pd获取最新的图状态,图只读状态会在pd的通知中更新
+ Metapb.Graph pdGraph =
+ pd.getPDClient().getGraph(graph);
+ Metapb.GraphState pdGraphState =
+ pdGraph.getGraphState();
+ if (pdGraphState != null &&
+ pdGraphState.getMode() != null &&
+ pdGraphState.getMode().getNumber() ==
+ GraphMode.ReadOnly_VALUE) {
+ // 确认pd中存储的当前状态也是只读,则不允许插入数据
+ throw new PDException(-1,
+ "the graph space size " +
+ "has " +
+ "reached the threshold");
+ }
+ // pd状态与本地缓存不一致,本地缓存更新为pd中的状态
+ managerGraph.setProtoObj(pdGraph);
+ }
+ }
+ }
+ }
+ } catch (PDException e) {
+ ResStatus status = ResStatus.newBuilder()
+ .setCode(ResCode.RES_CODE_EXCESS)
+ .setMsg(e.getMessage())
+ .build();
+ FeedbackRes feedbackRes = FeedbackRes.newBuilder()
+ .setStatus(status)
+ .build();
+ observer.onNext(feedbackRes);
+ observer.onCompleted();
+ return;
+ }
+
+ // 按分区拆分数据
+ Map> groups = new HashMap<>();
+ list.forEach((entry) -> {
+ Key startKey = entry.getStartKey();
+ if (startKey.getCode() == HgStoreConst.SCAN_ALL_PARTITIONS_ID) {
+ // 所有Leader分区
+ List ids =
+ storeService.getGraphLeaderPartitionIds(graph);
+ ids.forEach(id -> {
+ if (!groups.containsKey(id)) {
+ groups.put(id, new LinkedList<>());
+ }
+ groups.get(id).add(entry);
+ });
+ } else {
+ // 根据keyCode查询所属分区ID,按分区ID分组
+ Integer partitionId =
+ pd.getPartitionByCode(graph, startKey.getCode())
+ .getId();
+ if (!groups.containsKey(partitionId)) {
+ groups.put(partitionId, new LinkedList<>());
+ }
+ groups.get(partitionId).add(entry);
+ }
+ });
+
+ // 发给不同的raft执行
+ BatchGrpcClosure closure =
+ new BatchGrpcClosure<>(groups.size());
+ groups.forEach((partition, entries) -> {
+ storeService.addRaftTask(HgStoreNodeService.BATCH_OP, graph,
+ partition,
+ BatchReq.newBuilder()
+ .setHeader(request.getHeader())
+ .setWriteReq(
+ BatchWriteReq.newBuilder()
+ .addAllEntry(
+ entries))
+ .build(),
+ closure.newRaftClosure());
+ });
+
+ if (!graph.isEmpty()) {
+ log.debug(" batch: waiting raft...");
+ // 等待返回结果
+ closure.waitFinish(observer, r -> closure.selectError(r),
+ appConfig.getRaft().getRpcTimeOut());
+ log.debug(" batch: ended waiting");
+ } else {
+ log.info(" batch: there is none of raft leader, graph = {}.",
+ request.getHeader().getGraph());
+ observer.onNext(
+ FeedbackRes.newBuilder().setStatus(HgGrpc.success())
+ .build());
+ observer.onCompleted();
+ }
+ }
+
+ public void doBatch(int partId, BatchReq request, RaftClosure response) {
+ String graph = request.getHeader().getGraph();
+ String batchId = request.getBatchId();
+ FeedbackRes.Builder builder = FeedbackRes.newBuilder();
+ List entries = request.getWriteReq().getEntryList();
+ try {
+ getWrapper().doBatch(graph, partId, entries);
+ builder.setStatus(HgGrpc.success());
+ } catch (Throwable t) {
+ String msg = "Failed to doBatch, graph: " + graph + "; batchId= " + batchId;
+ log.error(msg, t);
+ builder.setStatus(HgGrpc.fail(msg));
+ }
+ GrpcClosure.setResult(response, builder.build());
+ }
+
+ // private static HgBusinessHandler.Batch toBatch(BatchEntry entry) {
+ // return new HgBusinessHandler.Batch() {
+ // @Override
+ // public BatchOpType getOp() {
+ // return BatchOpType.of(entry.getOpType().getNumber());
+ // }
+ //
+ // @Override
+ // public int getKeyCode() {
+ // return entry.getStartKey().getCode();
+ // }
+ //
+ // @Override
+ // public String getTable() {
+ // return entry.getTable();
+ // }
+ //
+ // @Override
+ // public byte[] getStartKey() {
+ // return entry.getStartKey().getKey().toByteArray();
+ // }
+ //
+ // @Override
+ // public byte[] getEndKey() {
+ // return entry.getEndKey().getKey().toByteArray();
+ // }
+ //
+ // @Override
+ // public byte[] getValue() {
+ // return entry.getValue().toByteArray();
+ // }
+ // };
+ //
+ //}
+
+ @Override
+ public void table(TableReq request, StreamObserver observer) {
+ if (log.isDebugEnabled()) {
+ log.debug("table: method = {}, graph = {}, table = {}"
+ , request.getMethod().name()
+ , request.getHeader().getGraph()
+ , request.getTableName()
+ );
+ }
+
+ String graph = request.getHeader().getGraph();
+ // 所有Leader分区
+ List ids = storeService.getGraphLeaderPartitionIds(graph);
+ // 按分区拆分数据
+ Map groups = new HashMap<>();
+ // 按分区拆分数据
+ ids.forEach(id -> {
+ groups.put(id, request);
+ });
+
+ // 发给不同的raft执行
+ BatchGrpcClosure closure = new BatchGrpcClosure<>(groups.size());
+ groups.forEach((partition, entries) -> {
+ storeService.addRaftTask(HgStoreNodeService.TABLE_OP, graph, partition,
+ TableReq.newBuilder(request).build(),
+ closure.newRaftClosure());
+ });
+
+ if (!groups.isEmpty()) {
+ // log.info(" table waiting raft...");
+ // 等待返回结果
+ closure.waitFinish(observer, r -> closure.selectError(r),
+ appConfig.getRaft().getRpcTimeOut());
+ // log.info(" table ended waiting raft");
+ } else {
+ // log.info(" table none leader logic");
+ ResStatus status = null;
+
+ switch (request.getMethod()) {
+ case TABLE_METHOD_EXISTS:
+ status = HgGrpc.not();
+ break;
+ default:
+ status = HgGrpc.success();
+ }
+
+ // log.info(" table none leader status: {}", status.getCode());
+ observer.onNext(FeedbackRes.newBuilder().setStatus(status).build());
+ observer.onCompleted();
+ }
+
+ }
+
+ public void doTable(int partId, TableReq request, RaftClosure response) {
+ if (log.isDebugEnabled()) {
+ log.debug(" - doTable[{}]: graph = {}, table = {}"
+ , request.getMethod().name()
+ , request.getHeader().getGraph()
+ , request.getTableName()
+ );
+ }
+
+ FeedbackRes.Builder builder = FeedbackRes.newBuilder();
+
+ try {
+ log.debug(" - starting wrapper:doTable ");
+ if (getWrapper().doTable(partId,
+ request.getMethod(),
+ request.getHeader().getGraph(),
+ request.getTableName())) {
+ builder.setStatus(HgGrpc.success());
+ } else {
+ builder.setStatus(HgGrpc.not());
+ }
+ log.debug(" - ended wrapper:doTable ");
+ } catch (Throwable t) {
+ String msg = "Failed to invoke doTable[ "
+ + request.getMethod().name() + " ], graph="
+ + request.getHeader().getGraph() + " , table="
+ + request.getTableName();
+ log.error(msg, t);
+ builder.setStatus(HgGrpc.fail(msg));
+ }
+ log.debug(" - starting GrpcClosure:setResult ");
+ GrpcClosure.setResult(response, builder.build());
+ log.debug(" - ended GrpcClosure:setResult ");
+ }
+
+ @Override
+ public void graph(GraphReq request, StreamObserver observer) {
+ if (log.isDebugEnabled()) {
+ log.debug("graph: method = {}, graph = {}, table = {}"
+ , request.getMethod().name()
+ , request.getHeader().getGraph()
+ , request.getGraphName()
+ );
+ }
+
+ String graph = request.getHeader().getGraph();
+ // 所有Leader分区
+ List ids = storeService.getGraphLeaderPartitionIds(graph);
+ // 按分区拆分数据
+ Map groups = new HashMap<>();
+ // 按分区拆分数据
+ ids.forEach(id -> {
+ groups.put(id, request);
+ });
+
+ // 发给不同的raft执行
+ BatchGrpcClosure closure = new BatchGrpcClosure<>(groups.size());
+ groups.forEach((partition, entries) -> {
+ storeService.addRaftTask(HgStoreNodeService.GRAPH_OP, graph, partition,
+ GraphReq.newBuilder(request).build(),
+ closure.newRaftClosure());
+ });
+
+ if (!groups.isEmpty()) {
+ // 等待返回结果
+ closure.waitFinish(observer, r -> closure.selectError(r),
+ appConfig.getRaft().getRpcTimeOut());
+
+ } else {
+ observer.onNext(FeedbackRes.newBuilder().setStatus(HgGrpc.success()).build());
+ observer.onCompleted();
+ }
+
+ }
+
+ public void doGraph(int partId, GraphReq request, RaftClosure response) {
+ if (log.isDebugEnabled()) {
+ log.debug(" - doGraph[{}]: graph = {}, table = {}"
+ , request.getMethod().name()
+ , request.getHeader().getGraph()
+ , request.getGraphName()
+ );
+ }
+
+ FeedbackRes.Builder builder = FeedbackRes.newBuilder();
+
+ try {
+ if (getWrapper().doGraph(partId,
+ request.getMethod(),
+ request.getHeader().getGraph())) {
+ builder.setStatus(HgGrpc.success());
+ } else {
+ builder.setStatus(HgGrpc.not());
+ }
+ } catch (Throwable t) {
+ String msg = "Failed to invoke doGraph[ "
+ + request.getMethod().name() + " ], graph="
+ + request.getHeader().getGraph();
+ log.error(msg, t);
+ builder.setStatus(HgGrpc.fail(msg));
+ }
+ GrpcClosure.setResult(response, builder.build());
+ }
+
+ @Override
+ public void count(ScanStreamReq request, StreamObserver observer) {
+ ScanIterator it = null;
+ try {
+ BusinessHandler handler = storeService.getStoreEngine().getBusinessHandler();
+ long count = handler.count(request.getHeader().getGraph(), request.getTable());
+ observer.onNext(Agg.newBuilder().setCount(count).build());
+ observer.onCompleted();
+ } catch (Exception e) {
+ observer.onError(e);
+ } finally {
+ if (it != null) {
+ try {
+ it.close();
+ } catch (Exception e) {
+
+ }
+ }
+ }
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateService.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateService.java
new file mode 100644
index 0000000000..a8dc1c2cac
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateService.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import org.apache.hugegraph.store.grpc.state.HgStoreStateGrpc;
+import org.apache.hugegraph.store.grpc.state.NodeStateRes;
+import org.apache.hugegraph.store.grpc.state.ScanState;
+import org.apache.hugegraph.store.grpc.state.SubStateReq;
+import org.lognet.springboot.grpc.GRpcService;
+import org.springframework.beans.factory.annotation.Autowired;
+
+import com.google.protobuf.Empty;
+
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2021/11/3
+ */
+@Slf4j
+@GRpcService
+public class HgStoreStateService extends HgStoreStateGrpc.HgStoreStateImplBase {
+
+ @Autowired
+ HgStoreStreamImpl impl;
+
+ @Override
+ public void subState(SubStateReq request, StreamObserver observer) {
+ HgStoreStateSubject.addObserver(request.getSubId(), observer);
+ }
+
+ @Override
+ public void unsubState(SubStateReq request, StreamObserver observer) {
+ HgStoreStateSubject.removeObserver(request.getSubId());
+ }
+
+ @Override
+ public void getScanState(SubStateReq request, StreamObserver observer) {
+ ScanState state = impl.getState();
+ observer.onNext(state);
+ observer.onCompleted();
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateSubject.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateSubject.java
new file mode 100644
index 0000000000..2a90fa5ba8
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStateSubject.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.hugegraph.store.grpc.state.NodeStateRes;
+import org.apache.hugegraph.store.grpc.state.NodeStateType;
+import org.apache.hugegraph.store.node.util.HgAssert;
+
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2021/11/3
+ */
+@Slf4j
+public final class HgStoreStateSubject {
+
+ public final static Map> subObserverHolder =
+ new ConcurrentHashMap<>();
+
+ public static void addObserver(String subId, StreamObserver observer) {
+ HgAssert.isArgumentValid(subId, "subId");
+ HgAssert.isArgumentNotNull(observer == null, "observer");
+
+ subObserverHolder.put(subId, observer);
+ }
+
+ public static void removeObserver(String subId) {
+ HgAssert.isArgumentValid(subId, "subId");
+ subObserverHolder.remove(subId);
+ }
+
+ public static void notifyAll(NodeStateType nodeState) {
+
+ HgAssert.isArgumentNotNull(nodeState == null, "nodeState");
+ NodeStateRes res = NodeStateRes.newBuilder().setState(nodeState).build();
+ Iterator>> iter =
+ subObserverHolder.entrySet().iterator();
+
+ while (iter.hasNext()) {
+ Map.Entry> entry = iter.next();
+
+ try {
+ entry.getValue().onNext(res);
+ } catch (Throwable e) {
+ log.error("Failed to send node-state[" + nodeState + "] to subscriber[" +
+ entry.getKey() + "].", e);
+ iter.remove();
+ log.error("Removed the subscriber[" + entry.getKey() + "].", e);
+ }
+
+ }
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStreamImpl.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStreamImpl.java
new file mode 100644
index 0000000000..7d01fa3db4
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreStreamImpl.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+
+import org.apache.hugegraph.store.grpc.state.ScanState;
+import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc;
+import org.apache.hugegraph.store.grpc.stream.KvPageRes;
+import org.apache.hugegraph.store.grpc.stream.KvStream;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamReq;
+import org.apache.hugegraph.store.node.AppConfig;
+import org.apache.hugegraph.store.node.util.HgExecutorUtil;
+import org.lognet.springboot.grpc.GRpcService;
+import org.springframework.beans.factory.annotation.Autowired;
+
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2021/10/19
+ */
+@Slf4j
+@GRpcService
+public class HgStoreStreamImpl extends HgStoreStreamGrpc.HgStoreStreamImplBase {
+
+ @Autowired
+ private HgStoreNodeService storeService;
+ @Autowired
+ private AppConfig appConfig;
+ private HgStoreWrapperEx wrapper;
+ private ThreadPoolExecutor executor;
+
+ private HgStoreWrapperEx getWrapper() {
+ if (this.wrapper == null) {
+ synchronized (this) {
+ if (this.wrapper == null) {
+ this.wrapper = new HgStoreWrapperEx(
+ storeService.getStoreEngine().getBusinessHandler());
+ }
+ }
+ }
+ return this.wrapper;
+ }
+
+ public ThreadPoolExecutor getRealExecutor() {
+ return executor;
+ }
+
+ public ThreadPoolExecutor getExecutor() {
+ if (this.executor == null) {
+ synchronized (this) {
+ if (this.executor == null) {
+ AppConfig.ThreadPoolScan scan = this.appConfig.getThreadPoolScan();
+ this.executor =
+ HgExecutorUtil.createExecutor("hg-scan", scan.getCore(), scan.getMax(),
+ scan.getQueue());
+ }
+ }
+ }
+ return this.executor;
+ }
+
+ public ScanState getState() {
+ ThreadPoolExecutor ex = getExecutor();
+ ScanState.Builder builder = ScanState.newBuilder();
+ BlockingQueue queue = ex.getQueue();
+ ScanState state =
+ builder.setActiveCount(ex.getActiveCount()).setTaskCount(ex.getTaskCount())
+ .setCompletedTaskCount(ex.getCompletedTaskCount())
+ .setMaximumPoolSize(ex.getMaximumPoolSize())
+ .setLargestPoolSize(ex.getLargestPoolSize()).setPoolSize(ex.getPoolSize())
+ .setAddress(appConfig.getStoreServerAddress())
+ .setQueueSize(queue.size())
+ .setQueueRemainingCapacity(queue.remainingCapacity())
+ .build();
+ return state;
+ }
+
+ @Override
+ public StreamObserver scan(StreamObserver response) {
+ return ScanStreamResponse.of(response, getWrapper(), getExecutor(), appConfig);
+ }
+
+ @Override
+ public void scanOneShot(ScanStreamReq request, StreamObserver response) {
+ ScanOneShotResponse.scanOneShot(request, response, getWrapper());
+ }
+
+ @Override
+ public StreamObserver scanBatch(StreamObserver response) {
+ return ScanBatchResponse3.of(response, getWrapper(), getExecutor());
+ }
+
+ @Override
+ public StreamObserver scanBatch2(StreamObserver response) {
+ return ScanBatchResponseFactory.of(response, getWrapper(), getExecutor());
+ }
+
+ @Override
+ public void scanBatchOneShot(ScanStreamBatchReq request, StreamObserver response) {
+ ScanBatchOneShotResponse.scanOneShot(request, response, getWrapper());
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreWrapperEx.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreWrapperEx.java
new file mode 100644
index 0000000000..78355e1785
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreWrapperEx.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import static org.apache.hugegraph.store.grpc.common.GraphMethod.GRAPH_METHOD_DELETE;
+
+import java.util.List;
+import java.util.function.Consumer;
+import java.util.function.Supplier;
+
+import org.apache.hugegraph.rocksdb.access.ScanIterator;
+import org.apache.hugegraph.store.business.BusinessHandler;
+import org.apache.hugegraph.store.business.FilterIterator;
+import org.apache.hugegraph.store.grpc.common.GraphMethod;
+import org.apache.hugegraph.store.grpc.common.TableMethod;
+import org.apache.hugegraph.store.grpc.session.BatchEntry;
+import org.apache.hugegraph.store.term.HgPair;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public class HgStoreWrapperEx {
+
+ private final BusinessHandler handler;
+
+ public HgStoreWrapperEx(BusinessHandler handler) {
+ this.handler = handler;
+ }
+
+ public byte[] doGet(String graph, int code, String table, byte[] key) {
+ return this.handler.doGet(graph, code, table, key);
+ }
+
+ public boolean doClean(String graph, int partId) {
+ return this.handler.cleanPartition(graph, partId);
+ }
+
+ public ScanIterator scanAll(String graph, String table, byte[] query) {
+ ScanIterator scanIterator = this.handler.scanAll(graph, table, query);
+ return FilterIterator.of(scanIterator, query);
+ }
+
+ public ScanIterator scan(String graph, int partId, String table, byte[] start, byte[] end,
+ int scanType,
+ byte[] query) {
+ ScanIterator scanIterator =
+ this.handler.scan(graph, partId, table, start, end, scanType, query);
+ return FilterIterator.of(scanIterator, query);
+ }
+
+ public void batchGet(String graph, String table, Supplier> s,
+ Consumer> c) {
+ this.handler.batchGet(graph, table, s, (pair -> {
+ c.accept(new HgPair<>(pair.getKey(), pair.getValue()));
+ }));
+ }
+
+ public ScanIterator scanPrefix(String graph, int partition, String table, byte[] prefix,
+ int scanType,
+ byte[] query) {
+ ScanIterator scanIterator =
+ this.handler.scanPrefix(graph, partition, table, prefix, scanType);
+ return FilterIterator.of(scanIterator, query);
+ }
+
+ public void doBatch(String graph, int partId, List entryList) {
+ this.handler.doBatch(graph, partId, entryList);
+ }
+
+ public boolean doTable(int partId, TableMethod method, String graph, String table) {
+ boolean flag;
+ switch (method) {
+ case TABLE_METHOD_EXISTS:
+ flag = this.handler.existsTable(graph, partId, table);
+ break;
+ case TABLE_METHOD_CREATE:
+ this.handler.createTable(graph, partId, table);
+ flag = true;
+ break;
+ case TABLE_METHOD_DELETE:
+ this.handler.deleteTable(graph, partId, table);
+ flag = true;
+ break;
+ case TABLE_METHOD_DROP:
+ this.handler.dropTable(graph, partId, table);
+ flag = true;
+ break;
+ case TABLE_METHOD_TRUNCATE:
+ this.handler.truncate(graph, partId);
+ flag = true;
+ break;
+ default:
+ throw new UnsupportedOperationException("TableMethod: " + method.name());
+ }
+
+ return flag;
+ }
+
+ public boolean doGraph(int partId, GraphMethod method, String graph) {
+ boolean flag = true;
+ if (method == GRAPH_METHOD_DELETE) {// 交给 raft 执行,此处不处理
+ flag = true;
+ } else {
+ throw new UnsupportedOperationException("GraphMethod: " + method.name());
+ }
+ return flag;
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ParallelScanIterator.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ParallelScanIterator.java
new file mode 100644
index 0000000000..430d466c0d
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ParallelScanIterator.java
@@ -0,0 +1,387 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Queue;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.Supplier;
+
+import org.apache.hugegraph.pd.common.KVPair;
+import org.apache.hugegraph.rocksdb.access.RocksDBSession;
+import org.apache.hugegraph.rocksdb.access.ScanIterator;
+import org.apache.hugegraph.store.buffer.KVByteBuffer;
+import org.apache.hugegraph.store.grpc.common.ScanOrderType;
+import org.apache.hugegraph.store.grpc.stream.ScanQueryRequest;
+import org.apache.hugegraph.store.node.util.HgAssert;
+import org.apache.hugegraph.store.node.util.PropertyUtil;
+import org.apache.hugegraph.store.term.Bits;
+
+import com.alipay.sofa.jraft.util.Utils;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * 支持平行读取的批量查询迭代器
+ */
+@Slf4j
+public class ParallelScanIterator implements ScanIterator {
+
+ private static final int waitDataMaxTryTimes = 600;
+ protected static int maxBodySize =
+ PropertyUtil.getInt("app.scan.stream.body.size", 1024 * 1024);
+ private final int batchSize = PropertyUtil.getInt("app.scan.stream.entries.size", 20000);
+ private final Supplier> batchSupplier;
+ private final Supplier limitSupplier;
+ private final BlockingQueue> queue;
+ private final ReentrantLock queueLock = new ReentrantLock();
+ final private ThreadPoolExecutor executor;
+ private final ScanQueryRequest query;
+ private final Queue scanners = new LinkedList<>();
+ private final Queue pauseScanners = new LinkedList<>();
+ final private List NO_DATA = new ArrayList<>();
+ private final boolean orderVertex;
+ private final boolean orderEdge;
+ private int maxWorkThreads = Utils.cpus() / 8;
+ private int maxInQueue = maxWorkThreads * 2;
+ private volatile boolean finished;
+ private List current = null;
+
+ private ParallelScanIterator(Supplier> iteratorSupplier,
+ Supplier limitSupplier,
+ ScanQueryRequest query,
+ ThreadPoolExecutor executor) {
+ this.executor = executor;
+ this.batchSupplier = iteratorSupplier;
+ this.limitSupplier = limitSupplier;
+ this.finished = false;
+ this.query = query;
+ orderVertex = query.getOrderType() == ScanOrderType.ORDER_STRICT;
+ orderEdge = query.getOrderType() == ScanOrderType.ORDER_WITHIN_VERTEX;
+ if (orderVertex) {
+ this.maxWorkThreads = 1;
+ } else {
+ this.maxWorkThreads =
+ Math.max(1, Math.min(query.getConditionCount() / 16, maxWorkThreads));
+ }
+ this.maxInQueue = maxWorkThreads * 2;
+ // 边有序需要更大的队列
+ queue = new LinkedBlockingQueue<>(maxInQueue * 2);
+ createScanner();
+ }
+
+ public static ParallelScanIterator of(
+ Supplier> iteratorSupplier,
+ Supplier limitSupplier,
+ ScanQueryRequest query,
+ ThreadPoolExecutor executor) {
+ HgAssert.isArgumentNotNull(iteratorSupplier, "iteratorSupplier");
+ HgAssert.isArgumentNotNull(limitSupplier, "limitSupplier");
+ return new ParallelScanIterator(iteratorSupplier, limitSupplier, query, executor);
+ }
+
+ @Override
+ public boolean hasNext() {
+ int tryTimes = 0;
+ while (current == null && tryTimes < waitDataMaxTryTimes) {
+ try {
+ if (queue.size() != 0 || !finished) {
+ current = queue.poll(100, TimeUnit.MILLISECONDS); //定期检查client是否被关闭了
+ if (current == null && !finished) {
+ wakeUpScanner();
+ }
+ } else {
+ break;
+ }
+ } catch (InterruptedException e) {
+ log.error("hasNext interrupted {}", e);
+ break;
+ }
+ tryTimes++;
+ }
+ if (current == null && tryTimes >= waitDataMaxTryTimes) {
+ log.error("Wait data timeout!!!, scanner is {}/{}", scanners.size(),
+ pauseScanners.size());
+ }
+ return current != null && current != NO_DATA;
+ }
+
+ @Override
+ public boolean isValid() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List next() {
+ List t = current;
+ current = null;
+ if (queue.size() < maxWorkThreads) {
+ wakeUpScanner();
+ }
+ return t;
+ }
+
+ @Override
+ public void close() {
+ finished = true;
+ synchronized (scanners) {
+ scanners.forEach(scanner -> {
+ scanner.close();
+ });
+ }
+ synchronized (pauseScanners) {
+ pauseScanners.forEach(s -> {
+ s.close();
+ });
+ }
+ queue.clear();
+ }
+
+ /**
+ * 创建扫描器
+ */
+ private void createScanner() {
+ synchronized (scanners) {
+ for (int i = 0; i < maxWorkThreads; i++) {
+ scanners.add(new KVScanner());
+ }
+ scanners.forEach(scanner -> {
+ executor.execute(() -> scanner.scanKV());
+ });
+ }
+ }
+
+ /**
+ * 唤醒扫描器
+ */
+ private void wakeUpScanner() {
+ synchronized (pauseScanners) {
+ if (!pauseScanners.isEmpty()) {
+ KVScanner scanner = pauseScanners.poll();
+ if (scanner != null) {
+ executor.execute(() -> scanner.scanKV());
+ }
+ }
+ }
+ }
+
+ /**
+ * 休眠扫描器
+ *
+ * @param scanner
+ */
+ private void suspendScanner(KVScanner scanner) {
+ synchronized (pauseScanners) {
+ pauseScanners.add(scanner);
+ }
+ }
+
+ private void quitScanner(KVScanner scanner) {
+ synchronized (scanners) {
+ scanner.close();
+ scanners.remove(scanner);
+ if (scanners.size() == 0) {
+ putData(NO_DATA);
+ this.finished = true;
+ }
+ }
+ }
+
+ /**
+ * 添加到队列,返回队列是否已满
+ *
+ * @param data
+ * @return false: 队列已满
+ */
+ private boolean putData(List data) {
+ try {
+ this.queue.put(data);
+ } catch (InterruptedException e) {
+ log.error("exception ", e);
+ this.finished = true;
+ return false;
+ }
+ return this.queue.size() < maxInQueue;
+ }
+
+ private boolean putData(List data, boolean hasNext) {
+ try {
+ queueLock.lock();
+ this.queue.put(data);
+ } catch (InterruptedException e) {
+ log.error("exception ", e);
+ this.finished = true;
+ return false;
+ } finally {
+ if (!hasNext) {
+ queueLock.unlock();
+ }
+ }
+ // 数据未结束,线程继续执行
+ return hasNext || this.queue.size() < maxInQueue;
+ }
+
+ private synchronized KVPair getIterator() {
+ return this.batchSupplier.get();
+ }
+
+ private long getLimit() {
+ Long limit = this.limitSupplier.get();
+ if (limit == null || limit <= 0) {
+ limit = Long.valueOf(Integer.MAX_VALUE);
+ }
+ return limit;
+ }
+
+ static class KV {
+
+ public int sn;
+ public byte[] key;
+ public byte[] value;
+
+ public boolean hasSN = false;
+
+ public static KV of(RocksDBSession.BackendColumn col) {
+ KV kv = new KV();
+ kv.key = col.name;
+ kv.value = col.value;
+ return kv;
+ }
+
+ public static KV ofSeparator(int value) {
+ KV kv = new KV();
+ kv.key = new byte[4];
+ Bits.putInt(kv.key, 0, value);
+ return kv;
+ }
+
+ public KV setNo(int sn) {
+ this.sn = sn;
+ hasSN = true;
+ return this;
+ }
+
+ public void write(KVByteBuffer buffer) {
+ if (hasSN) {
+ buffer.putInt(sn);
+ }
+ buffer.put(key);
+ buffer.put(value);
+ }
+
+ public int size() {
+ return this.key.length + this.value.length + 1;
+ }
+ }
+
+ class KVScanner {
+
+ private final ReentrantLock iteratorLock = new ReentrantLock();
+ private ScanIterator iterator = null;
+ private QueryCondition query = null;
+ private long limit;
+ private long counter;
+ private volatile boolean closed = false;
+
+ private ScanIterator getIterator() {
+ // 迭代器没有数据,或该点以达到limit,切换新的迭代器
+ if (iterator == null || !iterator.hasNext() || counter >= limit) {
+ if (iterator != null) {
+ iterator.close();
+ }
+ KVPair pair = ParallelScanIterator.this.getIterator();
+ query = pair.getKey();
+ iterator = pair.getValue();
+ limit = getLimit();
+ counter = 0;
+ }
+ return iterator;
+ }
+
+ public void scanKV() {
+ boolean canNext = true;
+ ArrayList dataList = new ArrayList<>(batchSize);
+ dataList.ensureCapacity(batchSize);
+ iteratorLock.lock();
+ try {
+ long entriesSize = 0, bodySize = 0;
+ while (canNext && !closed) {
+ iterator = this.getIterator();
+ if (iterator == null) {
+ break;
+ }
+ while (iterator.hasNext() && entriesSize < batchSize &&
+ bodySize < maxBodySize &&
+ counter < limit && !closed) {
+ KV kv = KV.of(iterator.next());
+ dataList.add(orderVertex ? kv.setNo(query.getSerialNo()) : kv);
+ bodySize += kv.size();
+ entriesSize++;
+ counter++;
+ }
+ if ((entriesSize >= batchSize || bodySize >= maxBodySize) ||
+ (orderEdge && bodySize >= maxBodySize / 2)) {
+ if (orderEdge) {
+ //边排序,保证一个点的所有边连续,阻止其他点插入
+ canNext = putData(dataList, iterator != null && iterator.hasNext());
+ } else {
+ canNext = putData(dataList);
+ }
+ dataList = new ArrayList<>(batchSize);
+ dataList.ensureCapacity(batchSize);
+ entriesSize = bodySize = 0;
+ }
+ }
+ if (!dataList.isEmpty()) {
+ if (orderEdge) {
+ putData(dataList, false);
+ } else {
+ putData(dataList);
+ }
+ }
+ } catch (Exception e) {
+ log.error("exception {}", e);
+ } finally {
+ iteratorLock.unlock();
+ if (iterator != null && counter < limit && !closed) {
+ suspendScanner(this);
+ } else {
+ quitScanner(this);
+ }
+ }
+ }
+
+ public void close() {
+ closed = true;
+ iteratorLock.lock();
+ try {
+ if (iterator != null) {
+ iterator.close();
+ }
+ } finally {
+ iteratorLock.unlock();
+ }
+ }
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/QueryCondition.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/QueryCondition.java
new file mode 100644
index 0000000000..a52a6d49d3
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/QueryCondition.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+/**
+ * 2023/2/8
+ */
+public interface QueryCondition {
+
+ byte[] getStart();
+
+ byte[] getEnd();
+
+ byte[] getPrefix();
+
+ int getKeyCode();
+
+ int getScanType();
+
+ byte[] getQuery();
+
+ byte[] getPosition();
+
+ int getSerialNo();
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchOneShotResponse.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchOneShotResponse.java
new file mode 100644
index 0000000000..418c810eb3
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchOneShotResponse.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import static org.apache.hugegraph.store.node.grpc.ScanUtil.getIterator;
+
+import org.apache.hugegraph.rocksdb.access.RocksDBSession;
+import org.apache.hugegraph.rocksdb.access.ScanIterator;
+import org.apache.hugegraph.store.grpc.common.Kv;
+import org.apache.hugegraph.store.grpc.stream.KvPageRes;
+import org.apache.hugegraph.store.grpc.stream.ScanQueryRequest;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq;
+import org.apache.hugegraph.store.node.util.HgGrpc;
+import org.apache.hugegraph.store.node.util.HgStoreNodeUtil;
+
+import com.google.protobuf.ByteString;
+
+import io.grpc.Status;
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2022/04/08
+ *
+ * @version 0.1.0
+ */
+@Slf4j
+public class ScanBatchOneShotResponse {
+
+ /**
+ * Handle one-shot batch scan
+ *
+ * @param request
+ * @param responseObserver
+ */
+ public static void scanOneShot(ScanStreamBatchReq request,
+ StreamObserver responseObserver,
+ HgStoreWrapperEx wrapper) {
+
+ String graph = request.getHeader().getGraph();
+ ScanQueryRequest queryRequest = request.getQueryRequest();
+ ScanIterator iterator = getIterator(graph, queryRequest, wrapper);
+
+ KvPageRes.Builder resBuilder = KvPageRes.newBuilder();
+ Kv.Builder kvBuilder = Kv.newBuilder();
+
+ long limit = queryRequest.getLimit();
+
+ if (limit <= 0) {
+ limit = Integer.MAX_VALUE;
+ log.warn("As limit is less than or equals 0, default limit was effective:[ {} ]",
+ Integer.MAX_VALUE);
+ }
+
+ int count = 0;
+
+ try {
+ while (iterator.hasNext()) {
+
+ if (++count > limit) {
+ break;
+ }
+
+ RocksDBSession.BackendColumn col = iterator.next();
+
+ resBuilder.addData(kvBuilder
+ .setKey(ByteString.copyFrom(col.name))
+ .setValue(ByteString.copyFrom(col.value))
+ .setCode(HgStoreNodeUtil.toInt(iterator.position()))
+//position == partition-id.
+ );
+
+ }
+
+ responseObserver.onNext(resBuilder.build());
+ responseObserver.onCompleted();
+
+ } catch (Throwable t) {
+ String msg = "Failed to do oneshot batch scan, scanning was interrupted, cause by:";
+ responseObserver.onError(
+ HgGrpc.toErr(Status.Code.INTERNAL, msg, t));
+ } finally {
+ iterator.close();
+ }
+
+ }
+
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse.java
new file mode 100644
index 0000000000..3712fbd7cc
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse.java
@@ -0,0 +1,276 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import static org.apache.hugegraph.store.node.grpc.ScanUtil.getParallelIterator;
+
+import java.util.List;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.hugegraph.rocksdb.access.ScanIterator;
+import org.apache.hugegraph.store.buffer.ByteBufferAllocator;
+import org.apache.hugegraph.store.buffer.KVByteBuffer;
+import org.apache.hugegraph.store.grpc.stream.KvStream;
+import org.apache.hugegraph.store.grpc.stream.ScanQueryRequest;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq;
+import org.apache.hugegraph.store.node.util.HgGrpc;
+import org.apache.hugegraph.store.node.util.PropertyUtil;
+
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * 批量查询处理器,批量查询数据,流式返回数据。
+ * 1、服务端流式发送数据给客户端
+ * 2、客户端每消费一批次数据,返回批次号给服务端
+ * 3、服务端根据批次号决定发送多少数据,保证传送数据的不间断,
+ */
+@Slf4j
+public class ScanBatchResponse implements StreamObserver {
+
+ static ByteBufferAllocator bfAllocator =
+ new ByteBufferAllocator(ParallelScanIterator.maxBodySize * 3 / 2, 1000);
+ static ByteBufferAllocator alloc =
+ new ByteBufferAllocator(ParallelScanIterator.maxBodySize * 3 / 2, 1000);
+ private final int maxInFlightCount = PropertyUtil.getInt("app.scan.stream.inflight", 16);
+ private final int activeTimeout = PropertyUtil.getInt("app.scan.stream.timeout", 60); //单位秒
+ private final StreamObserver sender;
+ private final HgStoreWrapperEx wrapper;
+ private final ThreadPoolExecutor executor;
+ private final Object stateLock = new Object();
+ private final Lock iteratorLock = new ReentrantLock();
+ // 当前正在遍历的迭代器
+ private ScanIterator iterator;
+ // 下一次发送的序号
+ private volatile int seqNo;
+ // Client已消费的序号
+ private volatile int clientSeqNo;
+ // 已经发送的条目数
+ private volatile long count;
+ // 客户端要求返回的最大条目数
+ private volatile long limit;
+ private ScanQueryRequest query;
+ // 上次读取数据时间
+ private long activeTime;
+ private volatile State state;
+
+ public ScanBatchResponse(StreamObserver response, HgStoreWrapperEx wrapper,
+ ThreadPoolExecutor executor) {
+ this.sender = response;
+ this.wrapper = wrapper;
+ this.executor = executor;
+ this.iterator = null;
+ this.seqNo = 1;
+ this.state = State.IDLE;
+ this.activeTime = System.currentTimeMillis();
+ }
+
+ /**
+ * 接收客户端发送的消息
+ * 服务端另起线程处理消息,不阻塞网络
+ *
+ * @param request
+ */
+ @Override
+ public void onNext(ScanStreamBatchReq request) {
+ switch (request.getQueryCase()) {
+ case QUERY_REQUEST: // 查询条件
+ executor.execute(() -> {
+ startQuery(request.getHeader().getGraph(), request.getQueryRequest());
+ });
+ break;
+ case RECEIPT_REQUEST: // 消息异步应答
+ this.clientSeqNo = request.getReceiptRequest().getTimes();
+ if (seqNo - clientSeqNo < maxInFlightCount) {
+ synchronized (stateLock) {
+ if (state == State.IDLE) {
+ state = State.DOING;
+ executor.execute(() -> {
+ sendEntries();
+ });
+ } else if (state == State.DONE) {
+ sendNoDataEntries();
+ }
+ }
+ }
+ break;
+ case CANCEL_REQUEST: // 关闭流
+ closeQuery();
+ break;
+ default:
+ sender.onError(
+ HgGrpc.toErr("Unsupported sub-request: [ " + request + " ]"));
+ }
+ }
+
+ @Override
+ public void onError(Throwable t) {
+ log.error("onError ", t);
+ closeQuery();
+ }
+
+ @Override
+ public void onCompleted() {
+ closeQuery();
+ }
+
+ /**
+ * 生成迭代器
+ *
+ * @param request
+ */
+ private void startQuery(String graphName, ScanQueryRequest request) {
+ this.query = request;
+ this.limit = request.getLimit();
+ this.count = 0;
+ this.iterator = getParallelIterator(graphName, request, this.wrapper, executor);
+ synchronized (stateLock) {
+ if (state == State.IDLE) {
+ state = State.DOING;
+ executor.execute(() -> {
+ sendEntries();
+ });
+ }
+ }
+ }
+
+ /**
+ * 生成迭代器
+ */
+ private void closeQuery() {
+ setStateDone();
+ try {
+ closeIter();
+ this.sender.onCompleted();
+ } catch (Exception e) {
+ log.error("exception ", e);
+ }
+ int active = ScanBatchResponseFactory.getInstance().removeStreamObserver(this);
+ log.info("ScanBatchResponse closeQuery, active count is {}", active);
+ }
+
+ private void closeIter() {
+ try {
+ if (this.iterator != null) {
+ this.iterator.close();
+ this.iterator = null;
+ }
+ } catch (Exception e) {
+
+ }
+ }
+
+ /**
+ * 发送数据
+ */
+ private void sendEntries() {
+ if (state == State.DONE || iterator == null) {
+ setStateIdle();
+ return;
+ }
+ iteratorLock.lock();
+ try {
+ if (state == State.DONE || iterator == null) {
+ setStateIdle();
+ return;
+ }
+ KvStream.Builder dataBuilder = KvStream.newBuilder().setVersion(1);
+ while (state != State.DONE && iterator.hasNext()
+ && (seqNo - clientSeqNo < maxInFlightCount)
+ && this.count < limit) {
+ KVByteBuffer buffer = new KVByteBuffer(alloc.get());
+ List dataList = iterator.next();
+ dataList.forEach(kv -> {
+ kv.write(buffer);
+ this.count++;
+ });
+ dataBuilder.setStream(buffer.flip().getBuffer());
+ dataBuilder.setSeqNo(seqNo++);
+ dataBuilder.complete(e -> alloc.release(buffer.getBuffer()));
+ this.sender.onNext(dataBuilder.build());
+ this.activeTime = System.currentTimeMillis();
+ }
+ if (!iterator.hasNext() || this.count >= limit || state == State.DONE) {
+ closeIter();
+ this.sender.onNext(KvStream.newBuilder().setOver(true).build());
+ setStateDone();
+ } else {
+ setStateIdle();
+ }
+ } catch (Throwable e) {
+ if (this.state != State.DONE) {
+ log.error(" send data exception: ", e);
+ setStateIdle();
+ if (this.sender != null) {
+ try {
+ this.sender.onError(e);
+ } catch (Exception ex) {
+
+ }
+ }
+ }
+ } finally {
+ iteratorLock.unlock();
+ }
+ }
+
+ private void sendNoDataEntries() {
+ try {
+ this.sender.onNext(KvStream.newBuilder().setOver(true).build());
+ } catch (Exception e) {
+ }
+ }
+
+ private State setStateDone() {
+ synchronized (this.stateLock) {
+ this.state = State.DONE;
+ }
+ return state;
+ }
+
+ private State setStateIdle() {
+ synchronized (this.stateLock) {
+ if (this.state != State.DONE) {
+ this.state = State.IDLE;
+ }
+ }
+ return state;
+ }
+
+ /**
+ * 检查是否活跃,超过一定时间客户端没有请求数据,认为已经不活跃,关闭连接释放资源
+ */
+ public void checkActiveTimeout() {
+ if ((System.currentTimeMillis() - activeTime) > activeTimeout * 1000L) {
+ log.warn("The stream is not closed, and the timeout is forced to close");
+ closeQuery();
+ }
+ }
+
+ /**
+ * 任务状态
+ */
+ private enum State {
+ IDLE,
+ DOING,
+ DONE,
+ ERROR
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse3.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse3.java
new file mode 100644
index 0000000000..fac1c35820
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponse3.java
@@ -0,0 +1,422 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import java.util.List;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReentrantLock;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+import org.apache.hugegraph.rocksdb.access.RocksDBSession;
+import org.apache.hugegraph.rocksdb.access.ScanIterator;
+import org.apache.hugegraph.store.grpc.common.Kv;
+import org.apache.hugegraph.store.grpc.stream.KvPageRes;
+import org.apache.hugegraph.store.grpc.stream.ScanCondition;
+import org.apache.hugegraph.store.grpc.stream.ScanQueryRequest;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq;
+import org.apache.hugegraph.store.node.util.Base58;
+import org.apache.hugegraph.store.node.util.HgAssert;
+import org.apache.hugegraph.store.node.util.HgGrpc;
+import org.apache.hugegraph.store.node.util.HgStoreConst;
+import org.apache.hugegraph.store.node.util.HgStoreNodeUtil;
+
+import com.google.protobuf.ByteString;
+
+import io.grpc.Status;
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2022/03/27
+ *
+ * @version 3.6.0
+ */
+@Slf4j
+public class ScanBatchResponse3 {
+
+ private final static long DEFAULT_PACKAGE_SIZE = 10_000;
+ private final static int MAX_NOT_RECEIPT = 10;
+
+ public static StreamObserver of(StreamObserver responseObserver,
+ HgStoreWrapperEx wrapper, ThreadPoolExecutor executor) {
+ HgAssert.isArgumentNotNull(responseObserver, "responseObserver");
+ HgAssert.isArgumentNotNull(wrapper, "wrapper");
+ return new Broker(responseObserver, wrapper, executor);
+ }
+
+ private enum OrderState {
+ NEW(0),
+ WORKING(1);//, PAUSE(2), COMPLETE(10);
+ int value;
+
+ OrderState(int value) {
+ this.value = value;
+ }
+ }
+
+ /*** Broker ***/
+ private static class Broker implements StreamObserver {
+
+ private final StreamObserver responseObserver;
+ private final HgStoreWrapperEx wrapper;
+ private final ThreadPoolExecutor executor;
+ private final OrderManager manager = new OrderManager();
+ private String graph;
+
+ Broker(StreamObserver responseObserver, HgStoreWrapperEx wrapper,
+ ThreadPoolExecutor executor) {
+ this.responseObserver = responseObserver;
+ this.wrapper = wrapper;
+ this.executor = executor;
+ }
+
+ @Override
+ public void onNext(ScanStreamBatchReq request) {
+ this.handleHeader(request);
+ switch (request.getQueryCase()) {
+ case QUERY_REQUEST:
+ this.makeADeal(request.getQueryRequest());
+ break;
+ case RECEIPT_REQUEST:
+ this.manager.receipt(request.getReceiptRequest().getTimes());
+ break;
+ case CANCEL_REQUEST:
+ this.manager.finished();
+ break;
+ default:
+ responseObserver.onError(
+ HgGrpc.toErr("Unsupported sub-request: [ " + request + " ]"));
+ }
+ }
+
+ @Override
+ public void onError(Throwable t) {
+ log.warn(t.getMessage());
+ this.manager.breakdown();
+ }
+
+ @Override
+ public void onCompleted() {
+ this.manager.finished();
+ }
+
+ private void handleHeader(ScanStreamBatchReq request) {
+ if (this.graph == null) {
+ this.graph = request.getHeader().getGraph();
+ }
+ }
+
+ private void makeADeal(ScanQueryRequest request) {
+ String deliverId = "";
+ if (log.isDebugEnabled()) {
+ List conditions = request.getConditionList();
+ if (conditions.size() > 0) {
+ ScanCondition c = conditions.get(0);
+ if (c.getPrefix() != null && c.getPrefix().size() > 0) {
+ deliverId = Base58.encode(c.getPrefix().toByteArray());
+ log.info("[ANALYSIS DEAL] [{}] prefixLength: {}", deliverId,
+ conditions.size());
+ }
+
+ }
+ }
+
+ OrderDeliverer deliverer = new OrderDeliverer(deliverId, this.responseObserver);
+ OrderWorker worker = new OrderWorker(
+ request.getLimit(),
+ request.getPageSize(),
+ ScanUtil.getIterator(this.graph, request, this.wrapper),
+ deliverer,
+ this.executor);
+
+ this.manager.deal(worker, deliverer);
+ }
+
+ }
+
+ @NotThreadSafe
+ private static class OrderManager {
+
+ OrderState state = OrderState.NEW;
+ OrderWorker worker;
+ OrderDeliverer deliverer;
+
+ synchronized void deal(OrderWorker worker, OrderDeliverer deliverer) {
+ if (log.isDebugEnabled()) {
+ log.debug("Receiving query request.");
+ }
+ if (this.state == OrderState.NEW) {
+ this.worker = worker;
+ this.deliverer = deliverer;
+ this.worker.hereWeGo();
+ this.state = OrderState.WORKING;
+ }
+ }
+
+ synchronized void receipt(int receiptTimes) {
+ if (log.isDebugEnabled()) {
+ log.debug("Receiving receipt request.");
+ }
+ this.worker.setReceipt(receiptTimes);
+ }
+
+ synchronized void finished() {
+ if (log.isDebugEnabled()) {
+ log.debug("Receiving finished request.");
+ }
+/* if (this.state.value > OrderState.NEW.value
+ && this.state.value < OrderState.COMPLETE.value) {
+ this.state = OrderState.COMPLETE;
+ }*/
+ this.breakdown();
+ }
+
+ synchronized void breakdown() {
+ if (this.worker != null) {
+ this.worker.breakdown();
+ }
+ }
+ }
+
+ private static class OrderDeliverer {
+
+ private final StreamObserver responseObserver;
+ private final AtomicBoolean finishFlag = new AtomicBoolean();
+ private final String delivererId;
+ private final AtomicLong count = new AtomicLong();
+
+ OrderDeliverer(String delivererId, StreamObserver responseObserver) {
+ this.responseObserver = responseObserver;
+ this.delivererId = delivererId;
+ }
+
+ void deliver(KvPageRes.Builder dataBuilder, int times, boolean isOver) {
+ if (this.finishFlag.get()) {
+ return;
+ }
+ count.addAndGet(dataBuilder.getDataCount());
+ this.responseObserver.onNext(dataBuilder.setOver(isOver).setTimes(times).build());
+ if (log.isDebugEnabled()) {
+ log.debug("deliver times : {}, over: {}", times, isOver);
+ }
+
+ if (isOver) {
+ if (log.isDebugEnabled()) {
+ if (delivererId != null && !delivererId.isEmpty()) {
+ log.debug("[ANALYSIS OVER] [{}] count: {}, times: {}", delivererId, count,
+ times);
+ }
+ }
+ this.finish();
+ }
+ }
+
+ void finish() {
+ if (!finishFlag.getAndSet(true)) {
+ this.responseObserver.onCompleted();
+ }
+ }
+
+ void error(String msg) {
+ if (!finishFlag.getAndSet(true)) {
+ this.responseObserver.onError(HgGrpc.toErr(msg));
+ }
+ }
+
+ void error(String msg, Throwable t) {
+ if (!finishFlag.getAndSet(true)) {
+ this.responseObserver.onError(HgGrpc.toErr(Status.INTERNAL,
+ msg, t));
+ }
+ }
+ }
+
+ /*** Worker ***/
+ private static class OrderWorker {
+
+ private final ScanIterator iterator;
+ private final OrderDeliverer deliverer;
+ private final AtomicBoolean pauseFlag = new AtomicBoolean();
+ private final AtomicBoolean completeFlag = new AtomicBoolean();
+ private final ReentrantLock workingLock = new ReentrantLock();
+ private final AtomicBoolean isWorking = new AtomicBoolean();
+ private final AtomicBoolean breakdown = new AtomicBoolean();
+ private final AtomicInteger receiptTimes = new AtomicInteger();
+ private final AtomicInteger curTimes = new AtomicInteger();
+ private final ThreadPoolExecutor executor;
+ private final long limit;
+ private long packageSize;
+ private long counter;
+
+ OrderWorker(long limit, long packageSize, ScanIterator iterator, OrderDeliverer deliverer,
+ ThreadPoolExecutor executor) {
+ this.limit = limit;
+ this.packageSize = packageSize;
+ this.iterator = iterator;
+ this.deliverer = deliverer;
+ this.executor = executor;
+
+ if (this.packageSize <= 0) {
+ this.packageSize = DEFAULT_PACKAGE_SIZE;
+ log.warn(
+ "As page-Size is less than or equals 0, default package-size was " +
+ "effective.[ {} ]",
+ DEFAULT_PACKAGE_SIZE);
+ }
+
+ }
+
+ void hereWeGo() {
+ if (this.completeFlag.get()) {
+ log.warn("job complete.");
+ return;
+ }
+ if (this.isWorking.get()) {
+ log.warn("has been working.");
+ return;
+ }
+
+ if (this.workingLock.isLocked()) {
+ log.warn("working now");
+ return;
+ }
+
+ executor.execute(() -> working());
+ Thread.yield();
+ }
+
+ void setReceipt(int times) {
+ this.receiptTimes.set(times);
+ this.continueWorking();
+ }
+
+ boolean checkContinue() {
+ return (this.curTimes.get() - this.receiptTimes.get() < MAX_NOT_RECEIPT);
+ }
+
+ void continueWorking() {
+ if (this.checkContinue()) {
+ synchronized (this.iterator) {
+ this.iterator.notify();
+ }
+ }
+ }
+
+ void breakdown() {
+ this.breakdown.set(true);
+ synchronized (this.iterator) {
+ this.iterator.notify();
+ }
+ }
+
+ private void working() {
+ if (this.isWorking.getAndSet(true)) {
+ return;
+ }
+
+ this.workingLock.lock();
+
+ try {
+ synchronized (this.iterator) {
+ KvPageRes.Builder dataBuilder = KvPageRes.newBuilder();
+ Kv.Builder kvBuilder = Kv.newBuilder();
+ long packageCount = 0;
+
+ while (iterator.hasNext()) {
+ if (++this.counter > limit) {
+ this.completeFlag.set(true);
+ break;
+ }
+
+ if (++packageCount > packageSize) {
+
+ if (this.breakdown.get()) {
+ break;
+ }
+
+ deliverer.deliver(dataBuilder, curTimes.incrementAndGet(), false);
+ Thread.yield();
+
+ if (!this.checkContinue()) {
+ long start = System.currentTimeMillis();
+ iterator.wait(
+ HgStoreConst.SCAN_WAIT_CLIENT_TAKING_TIME_OUT_SECONDS *
+ 1000);
+
+ if (System.currentTimeMillis() - start
+ >=
+ HgStoreConst.SCAN_WAIT_CLIENT_TAKING_TIME_OUT_SECONDS * 1000) {
+ throw new TimeoutException("Waiting continue more than "
+ +
+ HgStoreConst.SCAN_WAIT_CLIENT_TAKING_TIME_OUT_SECONDS +
+ " seconds.");
+ }
+
+ if (this.breakdown.get()) {
+ break;
+ }
+
+ }
+
+ packageCount = 1;
+ dataBuilder = KvPageRes.newBuilder();
+ }
+
+ RocksDBSession.BackendColumn col = iterator.next();
+
+ dataBuilder.addData(kvBuilder
+ .setKey(ByteString.copyFrom(col.name))
+ .setValue(ByteString.copyFrom(col.value))
+ .setCode(HgStoreNodeUtil.toInt(
+ iterator.position()))
+//position == partition-id.
+ );
+
+ }
+
+ this.completeFlag.set(true);
+
+ deliverer.deliver(dataBuilder, curTimes.incrementAndGet(), true);
+
+ }
+
+ } catch (InterruptedException e) {
+ log.error("Interrupted waiting of iterator, canceled while.", e);
+ this.deliverer.error("Failed to finish scanning, cause by InterruptedException.");
+ } catch (TimeoutException t) {
+ log.info(t.getMessage());
+ this.deliverer.error("Sever waiting exceeded ["
+ + HgStoreConst.SCAN_WAIT_CLIENT_TAKING_TIME_OUT_SECONDS +
+ "] seconds.");
+ } catch (Throwable t) {
+ log.error("Failed to do while for scanning, cause by:", t);
+ this.deliverer.error("Failed to finish scanning ", t);
+ } finally {
+ this.workingLock.unlock();
+ this.iterator.close();
+ }
+ }
+
+ }
+
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponseFactory.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponseFactory.java
new file mode 100644
index 0000000000..9c6dafc776
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanBatchResponseFactory.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import java.util.Set;
+import java.util.concurrent.ThreadPoolExecutor;
+
+import org.apache.hugegraph.store.grpc.stream.KvStream;
+
+import com.alipay.sofa.jraft.util.concurrent.ConcurrentHashSet;
+
+import io.grpc.stub.StreamObserver;
+
+public class ScanBatchResponseFactory {
+
+ private final static ScanBatchResponseFactory instance = new ScanBatchResponseFactory();
+ private final Set streamObservers = new ConcurrentHashSet<>();
+
+ public static ScanBatchResponseFactory getInstance() {
+ return instance;
+ }
+
+ public static StreamObserver of(StreamObserver responseObserver,
+ HgStoreWrapperEx wrapper, ThreadPoolExecutor executor) {
+ StreamObserver observer = new ScanBatchResponse(responseObserver, wrapper, executor);
+ getInstance().addStreamObserver(observer);
+ getInstance().checkStreamActive();
+ return observer;
+ }
+
+ public int addStreamObserver(StreamObserver observer) {
+ streamObservers.add(observer);
+ return streamObservers.size();
+ }
+
+ public int removeStreamObserver(StreamObserver observer) {
+ streamObservers.remove(observer);
+ return streamObservers.size();
+ }
+
+ /**
+ * 检查是否Stream是否活跃,超时的Stream及时关闭
+ */
+ public void checkStreamActive() {
+ streamObservers.forEach(streamObserver -> {
+ ((ScanBatchResponse) streamObserver).checkActiveTimeout();
+ });
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanOneShotResponse.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanOneShotResponse.java
new file mode 100644
index 0000000000..ae37028a6b
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanOneShotResponse.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import org.apache.hugegraph.rocksdb.access.RocksDBSession;
+import org.apache.hugegraph.rocksdb.access.ScanIterator;
+import org.apache.hugegraph.store.grpc.common.Kv;
+import org.apache.hugegraph.store.grpc.stream.KvPageRes;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamReq;
+import org.apache.hugegraph.store.node.util.HgGrpc;
+import org.apache.hugegraph.store.node.util.HgStoreNodeUtil;
+
+import com.google.protobuf.ByteString;
+
+import io.grpc.Status;
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2022/02/17
+ *
+ * @version 3.6.0
+ */
+@Slf4j
+public class ScanOneShotResponse {
+
+ /**
+ * Handle one-shot scan
+ *
+ * @param request
+ * @param responseObserver
+ */
+ public static void scanOneShot(ScanStreamReq request,
+ StreamObserver responseObserver,
+ HgStoreWrapperEx wrapper) {
+ KvPageRes.Builder resBuilder = KvPageRes.newBuilder();
+ Kv.Builder kvBuilder = Kv.newBuilder();
+ ScanIterator iterator = ScanUtil.getIterator(ScanUtil.toSq(request), wrapper);
+
+ long limit = request.getLimit();
+
+ if (limit <= 0) {
+ responseObserver.onError(HgGrpc.toErr("limit<=0, please to invoke stream scan."));
+ return;
+ }
+
+ int count = 0;
+
+ try {
+ while (iterator.hasNext()) {
+
+ if (++count > limit) {
+ break;
+ }
+
+ RocksDBSession.BackendColumn col = iterator.next();
+
+ resBuilder.addData(kvBuilder
+ .setKey(ByteString.copyFrom(col.name))
+ .setValue(ByteString.copyFrom(col.value))
+ .setCode(HgStoreNodeUtil.toInt(iterator.position()))
+//position == partition-id.
+ );
+
+ }
+
+ responseObserver.onNext(resBuilder.build());
+ responseObserver.onCompleted();
+
+ } catch (Throwable t) {
+ String msg = "an exception occurred during data scanning";
+ responseObserver.onError(HgGrpc.toErr(Status.INTERNAL, msg, t));
+ } finally {
+ iterator.close();
+ }
+
+ }
+
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQuery.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQuery.java
new file mode 100644
index 0000000000..3894497cf7
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQuery.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import java.util.Arrays;
+
+import org.apache.hugegraph.store.grpc.common.ScanMethod;
+
+/**
+ * 2022/2/28
+ */
+class ScanQuery implements QueryCondition {
+
+ String graph;
+ String table;
+ ScanMethod method;
+
+ byte[] start;
+ byte[] end;
+ byte[] prefix;
+ int keyCode;
+ int scanType;
+ byte[] query;
+ byte[] position;
+ int serialNo;
+
+ private ScanQuery() {
+ }
+
+ static ScanQuery of() {
+ return new ScanQuery();
+ }
+
+ @Override
+ public byte[] getStart() {
+ return this.start;
+ }
+
+ @Override
+ public byte[] getEnd() {
+ return this.end;
+ }
+
+ @Override
+ public byte[] getPrefix() {
+ return this.prefix;
+ }
+
+ @Override
+ public int getKeyCode() {
+ return this.keyCode;
+ }
+
+ @Override
+ public int getScanType() {
+ return this.scanType;
+ }
+
+ @Override
+ public byte[] getQuery() {
+ return this.query;
+ }
+
+ @Override
+ public byte[] getPosition() {
+ return this.position;
+ }
+
+ @Override
+ public int getSerialNo() {
+ return this.serialNo;
+ }
+
+ @Override
+ public String toString() {
+ return "ScanQuery{" +
+ "graph='" + graph + '\'' +
+ ", table='" + table + '\'' +
+ ", method=" + method +
+ ", start=" + Arrays.toString(start) +
+ ", end=" + Arrays.toString(end) +
+ ", prefix=" + Arrays.toString(prefix) +
+ ", partition=" + keyCode +
+ ", scanType=" + scanType +
+ ", serialNo=" + serialNo +
+ ", query=" + Arrays.toString(query) +
+ ", position=" + Arrays.toString(position) +
+ '}';
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQueryProducer.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQueryProducer.java
new file mode 100644
index 0000000000..204c32d9c0
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanQueryProducer.java
@@ -0,0 +1,266 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+import org.apache.hugegraph.store.grpc.common.ScanMethod;
+import org.apache.hugegraph.store.grpc.stream.ScanCondition;
+import org.apache.hugegraph.store.grpc.stream.ScanQueryRequest;
+import org.apache.hugegraph.store.node.util.HgAssert;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * Buffering the data of ScanQueryRequest and generating ScanQuery.
+ * It will not hold the reference of ScanQueryRequest.
+ *
+ * 2023/2/8
+ */
+@NotThreadSafe
+@Slf4j
+class ScanQueryProducer implements Iterable {
+
+ private String graph;
+ private String[] tables;
+ private ScanMethod method;
+ private int scanType;
+ private byte[] query;
+ private byte[] position;
+
+ private List conditionList;
+
+ private ScanQueryProducer() {
+ }
+
+ public static ScanQueryProducer requestOf(String graph, String[] tables,
+ ScanQueryRequest request) {
+ HgAssert.isArgumentValid(graph, "graph");
+ HgAssert.isArgumentNotNull(tables, "tables");
+ HgAssert.isArgumentNotNull(request, "ScanQueryRequest");
+
+ ScanQueryProducer res = new ScanQueryProducer();
+ res.graph = graph;
+ res.tables = tables; // a trick that reduce the data-size transferred through network;
+
+ res.method = request.getMethod();
+ res.scanType = request.getScanType();
+ res.query = request.getQuery().toByteArray();
+ res.position = request.getPosition().toByteArray();
+
+ res.conditionList = request.getConditionList();
+
+ if (res.conditionList == null) {
+ res.conditionList = Collections.emptyList();
+ }
+
+ if (res.conditionList.isEmpty()) {
+ log.warn("the condition-list of ScanQueryRequest is empty.");
+ }
+
+ return res;
+ }
+
+ private ScanQuery createQuery(String tableName, ScanCondition condition) {
+ ScanQuery sq = ScanQuery.of();
+ sq.graph = this.graph;
+ sq.table = tableName;
+ sq.method = this.method;
+ sq.scanType = this.scanType;
+ sq.query = this.query;
+ sq.position = this.position;
+
+ if (condition != null) {
+ sq.keyCode = condition.getCode();
+ sq.start = condition.getStart().toByteArray();
+ sq.end = condition.getEnd().toByteArray();
+ sq.prefix = condition.getPrefix().toByteArray();
+ sq.serialNo = condition.getSerialNo();
+ }
+
+ return sq;
+ }
+
+ private String getTableName(int tableIndex) {
+ if (tableIndex + 1 > this.tables.length) {
+ return null;
+ }
+
+ return this.tables[tableIndex];
+ }
+
+ @Override
+ public Iterator iterator() {
+ if (this.conditionList.isEmpty()) {
+ return new NoConditionsIterator();
+ } else {
+ return new ConditionsIterator();
+ }
+ }
+
+ /**
+ * Return an Iterator contains Scan-Queries grouped ScanQuery that
+ * created by same resource but filled with different tables;
+ *
+ * @return
+ */
+ public Iterator groupedIterator() {
+ if (this.conditionList.isEmpty()) {
+ return new GroupedNoConditionsIterator();
+ } else {
+ return new GroupedConditionsIterator();
+ }
+ }
+
+ /*---------------inner classes below--------------------*/
+
+ private class GroupedNoConditionsIterator implements Iterator {
+
+ private boolean isHasNext = true;
+
+ @Override
+ public boolean hasNext() {
+ return isHasNext;
+ }
+
+ @Override
+ public ScanQuery[] next() {
+ if (!this.hasNext()) {
+ throw new NoSuchElementException();
+ }
+
+ ScanQuery[] res = new ScanQuery[ScanQueryProducer.this.tables.length];
+
+ for (int i = 0; i < res.length; i++) {
+ res[i] = ScanQueryProducer.this.createQuery(ScanQueryProducer.this.tables[i], null);
+ }
+
+ this.isHasNext = false;
+
+ return res;
+ }
+ }
+
+ private class GroupedConditionsIterator implements Iterator {
+
+ private final Iterator conditionIterator =
+ ScanQueryProducer.this.conditionList.iterator();
+
+ @Override
+ public boolean hasNext() {
+ return conditionIterator.hasNext();
+ }
+
+ @Override
+ public ScanQuery[] next() {
+ ScanCondition condition = this.conditionIterator.next();
+ ScanQuery[] res = new ScanQuery[ScanQueryProducer.this.tables.length];
+
+ for (int i = 0; i < res.length; i++) {
+ res[i] = ScanQueryProducer.this.createQuery(ScanQueryProducer.this.tables[i],
+ condition);
+ }
+
+ return res;
+ }
+ }
+
+ /**
+ * TODO: no testing
+ */
+ private class NoConditionsIterator implements Iterator {
+
+ private String tableName;
+ private int tableIndex;
+
+ @Override
+ public boolean hasNext() {
+ if (this.tableName != null) {
+ return true;
+ }
+
+ this.tableName = ScanQueryProducer.this.getTableName(this.tableIndex);
+
+ return this.tableName != null;
+ }
+
+ @Override
+ public ScanQuery next() {
+ if (!this.hasNext()) {
+ throw new NoSuchElementException();
+ }
+
+ ScanQuery res = ScanQueryProducer.this.createQuery(this.tableName, null);
+ this.tableIndex++;
+ this.tableName = ScanQueryProducer.this.getTableName(this.tableIndex);
+
+ return res;
+ }
+
+ }
+
+ /**
+ * TODO: no testing
+ */
+ private class ConditionsIterator implements Iterator {
+
+ private final Iterator conditionIterator =
+ ScanQueryProducer.this.conditionList.iterator();
+ private ScanCondition condition;
+ private String tableName;
+ private int tableIndex;
+
+ @Override
+ public boolean hasNext() {
+ if (this.condition != null) {
+ return true;
+ }
+ return conditionIterator.hasNext();
+ }
+
+ @Override
+ public ScanQuery next() {
+ if (!this.hasNext()) {
+ throw new NoSuchElementException();
+ }
+ if (this.condition == null) {
+ this.condition = conditionIterator.next();
+ }
+ if (this.tableName == null) {
+ this.tableName = ScanQueryProducer.this.getTableName(this.tableIndex);
+ }
+
+ ScanQuery res = ScanQueryProducer.this.createQuery(this.tableName, this.condition);
+ this.tableIndex++;
+ this.tableName = ScanQueryProducer.this.getTableName(this.tableIndex);
+
+ if (this.tableName == null) {
+ this.condition = null;
+ this.tableIndex = 0;
+ }
+
+ return res;
+ }
+
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanStreamResponse.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanStreamResponse.java
new file mode 100644
index 0000000000..a4e7369f1d
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanStreamResponse.java
@@ -0,0 +1,262 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import static org.apache.hugegraph.store.node.grpc.ScanUtil.getIterator;
+
+import java.util.Collections;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.hugegraph.rocksdb.access.RocksDBSession;
+import org.apache.hugegraph.rocksdb.access.ScanIterator;
+import org.apache.hugegraph.store.grpc.common.Kv;
+import org.apache.hugegraph.store.grpc.stream.KvPageRes;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamReq;
+import org.apache.hugegraph.store.node.AppConfig;
+import org.apache.hugegraph.store.node.util.HgAssert;
+import org.apache.hugegraph.store.node.util.HgChannel;
+import org.apache.hugegraph.store.node.util.HgGrpc;
+import org.apache.hugegraph.store.node.util.HgStoreNodeUtil;
+
+import com.google.protobuf.ByteString;
+
+import io.grpc.Status;
+import io.grpc.StatusRuntimeException;
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2022/02/17
+ *
+ * @version 3.6.0
+ */
+@Slf4j
+public class ScanStreamResponse implements StreamObserver {
+
+ private static final String msg =
+ "to wait for client taking data exceeded max time: [{}] seconds,stop scanning.";
+ private final StreamObserver responseObserver;
+ private final HgStoreWrapperEx wrapper;
+ private final AtomicBoolean finishFlag = new AtomicBoolean();
+ private final ThreadPoolExecutor executor;
+ private final AtomicBoolean isStarted = new AtomicBoolean();
+ private final AtomicBoolean isStop = new AtomicBoolean(false);
+ private final AppConfig config;
+ private final int waitTime;
+ private final HgChannel channel;
+ private ScanIterator iterator;
+ private long limit = 0;
+ private int times = 0;
+ private long pageSize = 0;
+ private int total = 0;
+ private String graph;
+ private String table;
+
+ ScanStreamResponse(StreamObserver responseObserver,
+ HgStoreWrapperEx wrapper,
+ ThreadPoolExecutor executor, AppConfig appConfig) {
+ this.responseObserver = responseObserver;
+ this.wrapper = wrapper;
+ this.executor = executor;
+ this.config = appConfig;
+ this.waitTime = this.config.getServerWaitTime();
+ this.channel = HgChannel.of(waitTime);
+ }
+
+ public static ScanStreamResponse of(StreamObserver responseObserver,
+ HgStoreWrapperEx wrapper,
+ ThreadPoolExecutor executor, AppConfig appConfig) {
+ HgAssert.isArgumentNotNull(responseObserver, "responseObserver");
+ HgAssert.isArgumentNotNull(wrapper, "wrapper");
+ HgAssert.isArgumentNotNull(executor, "executor");
+ return new ScanStreamResponse(responseObserver, wrapper, executor, appConfig);
+ }
+
+ @Override
+ public void onNext(ScanStreamReq request) {
+ try {
+ if (request.getCloseFlag() == 1) {
+ close();
+ } else {
+ next(request);
+ }
+ } catch (Exception e) {
+ responseObserver.onError(e);
+ }
+ }
+
+ @Override
+ public void onError(Throwable t) {
+ this.isStop.set(true);
+ this.finishServer();
+ log.warn("onError from client [ graph: {} , table: {}]; Reason: {}]", graph, table,
+ t.getMessage());
+ }
+
+ @Override
+ public void onCompleted() {
+ this.isStop.set(true);
+ this.finishServer();
+ }
+
+ private void initIterator(ScanStreamReq request) {
+ try {
+ if (this.isStarted.getAndSet(true)) {
+ return;
+ }
+ this.iterator = getIterator(request, this.wrapper);
+ this.graph = request.getHeader().getGraph();
+ this.table = request.getTable();
+ this.limit = request.getLimit();
+ this.pageSize = request.getPageSize();
+ if (this.pageSize <= 0) {
+ log.warn(
+ "As page-Size is less than or equals 0, no data will be send to the " +
+ "client.");
+ }
+ /*** Start scanning loop ***/
+ Runnable scanning = () ->
+ {
+ // log.debug("Start scanning, graph = {}, table= {}, limit = " +
+ // "{}, page size = {}", this.graph, this.table, this.limit,
+ // this.pageSize);
+ KvPageRes.Builder dataBuilder = KvPageRes.newBuilder();
+ Kv.Builder kvBuilder = Kv.newBuilder();
+ int pageCount = 0;
+ try {
+ while (iterator.hasNext()) {
+ if (limit > 0 && ++this.total > limit) {
+ break;
+ }
+ if (++pageCount > pageSize) {
+ long start = System.currentTimeMillis();
+ if (!this.channel.send(dataBuilder)) {
+ if (System.currentTimeMillis() - start >= waitTime * 1000L) {
+ log.warn(msg, waitTime);
+ this.timeoutSever();
+ }
+ return;
+ }
+ if (this.isStop.get()) {
+ return;
+ }
+ pageCount = 1;
+ dataBuilder = KvPageRes.newBuilder();
+ }
+ dataBuilder.addData(toKv(kvBuilder, iterator.next(), iterator.position()));
+ }
+ this.channel.send(dataBuilder);
+ } catch (Throwable t) {
+ String msg = "an exception occurred while scanning data:";
+ StatusRuntimeException ex =
+ HgGrpc.toErr(Status.INTERNAL, msg + t.getMessage(), t);
+ responseObserver.onError(ex);
+ } finally {
+ try {
+ this.iterator.close();
+ this.channel.close();
+ } catch (Exception e) {
+
+ }
+ }
+
+ };
+ this.executor.execute(scanning);
+ } catch (Exception e) {
+ StatusRuntimeException ex = HgGrpc.toErr(Status.INTERNAL, null, e);
+ responseObserver.onError(ex);
+ try {
+ this.iterator.close();
+ this.channel.close();
+ } catch (Exception exception) {
+
+ }
+ }
+
+ /*** Scanning loop end ***/
+ }
+
+ private Kv toKv(Kv.Builder kvBuilder, RocksDBSession.BackendColumn col,
+ byte[] position) {
+ return kvBuilder
+ .setKey(ByteString.copyFrom(col.name))
+ .setValue(ByteString.copyFrom(col.value))
+ .setCode(HgStoreNodeUtil.toInt(position))
+ .build();
+ }
+
+ private void close() {
+ this.isStop.set(true);
+ this.channel.close();
+ if (!this.finishFlag.get()) {
+ responseObserver.onNext(KvPageRes.newBuilder()
+ .addAllData(Collections.EMPTY_LIST)
+ .setOver(true)
+ .setTimes(++times)
+ .build()
+ );
+ }
+
+ this.finishServer();
+ }
+
+ private void next(ScanStreamReq request) {
+ this.initIterator(request);
+ KvPageRes.Builder resBuilder;
+
+ try {
+ resBuilder = this.channel.receive();
+ times++;
+ } catch (Exception e) {
+ String msg = "failed to poll a page of data, cause by:";
+ log.error(msg, e);
+ responseObserver.onError(HgGrpc.toErr(msg + e.getMessage()));
+ return;
+ }
+ boolean isOver = false;
+ if (resBuilder == null || resBuilder.getDataList() == null ||
+ resBuilder.getDataList().isEmpty()) {
+ isOver = true;
+ resBuilder = KvPageRes.newBuilder().addAllData(Collections.EMPTY_LIST);
+ }
+ if (!this.finishFlag.get()) {
+ responseObserver.onNext(resBuilder.setOver(isOver).setTimes(times).build());
+ }
+ if (isOver) {
+ this.finishServer();
+ }
+
+ }
+
+ private void finishServer() {
+ if (!this.finishFlag.getAndSet(true)) {
+ responseObserver.onCompleted();
+ }
+ }
+
+ private void timeoutSever() {
+ if (!this.finishFlag.getAndSet(true)) {
+ String msg = "server wait time exceeds the threshold[" + waitTime +
+ "] seconds.";
+ responseObserver.onError(
+ HgGrpc.toErr(Status.Code.DEADLINE_EXCEEDED, msg));
+ }
+ }
+
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanUtil.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanUtil.java
new file mode 100644
index 0000000000..0148fa0b25
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/ScanUtil.java
@@ -0,0 +1,329 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+import org.apache.hugegraph.pd.common.KVPair;
+import org.apache.hugegraph.rocksdb.access.ScanIterator;
+import org.apache.hugegraph.store.business.SelectIterator;
+import org.apache.hugegraph.store.grpc.common.ScanMethod;
+import org.apache.hugegraph.store.grpc.stream.ScanQueryRequest;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamReq;
+import org.apache.hugegraph.store.grpc.stream.SelectParam;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2022/02/22
+ *
+ * @version 1.0.0
+ */
+@Slf4j
+class ScanUtil {
+
+ private final static Map tableKeyMap = new HashMap<>();
+
+ static ScanIterator getIterator(ScanStreamReq request, HgStoreWrapperEx wrapper) {
+ String graph = request.getHeader().getGraph();
+ String table = request.getTable();
+ ScanMethod method = request.getMethod();
+ byte[] start = request.getStart().toByteArray();
+ byte[] end = request.getEnd().toByteArray();
+ byte[] prefix = request.getPrefix().toByteArray();
+ int partition = request.getCode();
+ int scanType = request.getScanType();
+ byte[] query = request.getQuery().toByteArray();
+
+ ScanIterator iter = null;
+ switch (method) {
+ case ALL:
+ iter = wrapper.scanAll(graph, table, query);
+ break;
+ case PREFIX:
+ iter = wrapper.scanPrefix(graph, partition, table, prefix, scanType, query);
+ break;
+ case RANGE:
+ iter = wrapper.scan(graph, partition, table, start, end, scanType, query);
+ break;
+ }
+ if (iter == null) {
+ log.warn("Failed to create a scanIterator with ScanMethod: [" + method + "]");
+ iter = new EmptyIterator();
+ }
+ SelectParam selects = request.getSelects();
+ List properties = null;
+ if (selects != null) {
+ properties = selects.getPropertiesList();
+ }
+ iter = new SelectIterator(iter, properties);
+ iter.seek(request.getPosition().toByteArray());
+ return iter;
+ }
+
+ static ScanIterator getIterator(ScanQuery sq, HgStoreWrapperEx wrapper) {
+ if (log.isDebugEnabled()) {
+ log.debug("{}", sq);
+ }
+
+ ScanIterator iter = null;
+ switch (sq.method) {
+ case ALL:
+ iter = wrapper.scanAll(sq.graph, sq.table, sq.query);
+ break;
+ case PREFIX:
+ iter = wrapper.scanPrefix(sq.graph, sq.keyCode, sq.table, sq.prefix, sq.scanType,
+ sq.query);
+ break;
+ case RANGE:
+ iter = wrapper.scan(sq.graph, sq.keyCode, sq.table, sq.start, sq.end, sq.scanType,
+ sq.query);
+ break;
+ }
+
+ if (iter == null) {
+ log.warn("Failed to create a scanIterator with ScanMethod: [" + sq.method + "]");
+ iter = new EmptyIterator();
+ }
+
+ iter.seek(sq.position);
+
+ return iter;
+
+ }
+
+ static ScanQuery toSq(ScanStreamReq request) {
+ ScanQuery res = ScanQuery.of();
+
+ res.graph = request.getHeader().getGraph();
+ res.table = request.getTable();
+ res.method = request.getMethod();
+
+ res.keyCode = request.getCode();
+ res.start = request.getStart().toByteArray();
+ res.end = request.getEnd().toByteArray();
+ res.prefix = request.getPrefix().toByteArray();
+ res.scanType = request.getScanType();
+ res.query = request.getQuery().toByteArray();
+ res.position = request.getPosition().toByteArray();
+
+ if (log.isDebugEnabled()) {
+ log.debug("{}", res);
+ }
+ //TODO: removed below.
+
+ return res;
+ }
+
+ static ScanIterator getIterator(String graph, ScanQueryRequest request,
+ HgStoreWrapperEx wrapper) {
+ ScanIteratorSupplier supplier = new ScanIteratorSupplier(graph, request, wrapper);
+ return BatchScanIterator.of(supplier, supplier.getLimitSupplier());
+ }
+
+ /**
+ * 支持并行读取的多迭代器
+ */
+ static ScanIterator getParallelIterator(String graph, ScanQueryRequest request,
+ HgStoreWrapperEx wrapper, ThreadPoolExecutor executor) {
+ ScanIteratorSupplier supplier = new ScanIteratorSupplier(graph, request, wrapper);
+ return ParallelScanIterator.of(supplier, supplier.getLimitSupplier(),
+ request, executor);
+ }
+
+ @NotThreadSafe
+ private static class ScanIteratorSupplier implements
+ Supplier> {
+
+ private final AtomicBoolean isEmpty = new AtomicBoolean();
+
+ private final String graph;
+ private final long perKeyMax;
+ private final long skipDegree;
+ private final HgStoreWrapperEx wrapper;
+ private long perKeyLimit;
+ private List sqs = new LinkedList<>();
+ private Iterator sqIterator;
+
+ private ScanQueryProducer scanQueryProducer;
+ private Iterator scanQueryIterator;
+
+ ScanIteratorSupplier(String graph, ScanQueryRequest request, HgStoreWrapperEx wrapper) {
+ this.graph = graph;
+ this.perKeyLimit = request.getPerKeyLimit();
+ this.perKeyMax = request.getPerKeyMax();
+ this.skipDegree =
+ request.getSkipDegree() == 0 ? Integer.MAX_VALUE : request.getSkipDegree();
+ this.wrapper = wrapper;
+
+ if (this.perKeyLimit <= 0) {
+ this.perKeyLimit = Integer.MAX_VALUE;
+ log.warn("as perKeyLimit <=0 so default perKeyLimit was effective: {}",
+ Integer.MAX_VALUE);
+ }
+ //init(request);
+ init2(request);
+ }
+
+ private void init(ScanQueryRequest request) {
+ this.sqs = Arrays.stream(request.getTable().split(","))
+ .map(table -> {
+ if (table == null) {
+ return null;
+ }
+ if (table.isEmpty()) {
+ return null;
+ }
+
+ List list = request.getConditionList()
+ .stream()
+ .map(condition -> {
+ ScanQuery sq =
+ ScanQuery.of();
+ sq.graph = this.graph;
+ sq.table = table;
+ sq.method =
+ request.getMethod();
+ sq.scanType =
+ request.getScanType();
+ sq.query =
+ request.getQuery()
+ .toByteArray();
+ sq.position =
+ request.getPosition()
+ .toByteArray();
+
+ sq.keyCode =
+ condition.getCode();
+ sq.start =
+ condition.getStart()
+ .toByteArray();
+ sq.end = condition.getEnd()
+ .toByteArray();
+ sq.prefix =
+ condition.getPrefix()
+ .toByteArray();
+ sq.serialNo =
+ condition.getSerialNo();
+ return sq;
+ })
+ .filter(e -> e != null)
+ .collect(Collectors.toList());
+
+ if (list == null || list.isEmpty()) {
+ ScanQuery sq = ScanQuery.of();
+ sq.graph = this.graph;
+ sq.table = table;
+ sq.method = request.getMethod();
+ sq.scanType = request.getScanType();
+ sq.query = request.getQuery().toByteArray();
+ sq.position = request.getPosition().toByteArray();
+ list = Collections.singletonList(sq);
+ }
+ return list;
+
+ }
+ )
+ .flatMap(e -> e.stream())
+ .collect(Collectors.toList());
+
+ this.sqIterator = this.sqs.iterator();
+ }
+
+ //@Override
+ public KVPair get1() {
+ ScanIterator iterator = null;
+ ScanQuery query = null;
+ if (this.sqIterator != null && this.sqIterator.hasNext()) {
+ query = this.sqIterator.next();
+ iterator = getIterator(query, this.wrapper);
+ } else {
+ this.sqs.clear();
+ this.sqIterator = null;
+ }
+ return new KVPair<>(query, iterator);
+ }
+
+ public Supplier getLimitSupplier() {
+ return () -> Math.min(perKeyLimit, skipDegree);
+ }
+
+ /*----------- new -to add max --------------*/
+
+ private void init2(ScanQueryRequest request) {
+ List tableList = Arrays.stream(request.getTable().split(","))
+ .filter(e -> e != null && !e.isEmpty())
+ .collect(Collectors.toList());
+
+ if (tableList.isEmpty()) {
+ throw new RuntimeException("table name is invalid");
+ }
+
+ String[] tables = tableList.toArray(new String[tableList.size()]);
+ this.scanQueryProducer = ScanQueryProducer.requestOf(this.graph, tables, request);
+ this.scanQueryIterator = this.scanQueryProducer.groupedIterator();
+ }
+
+ @Override
+ public KVPair get() {
+ ScanIterator iterator = null;
+ ScanQuery query = null;
+
+ if (this.scanQueryIterator != null && this.scanQueryIterator.hasNext()) {
+ ScanQuery[] queries = this.scanQueryIterator.next();
+ query = queries[0];
+ iterator = FusingScanIterator.maxOf(this.perKeyMax, new Query2Iterator((queries)));
+ } else {
+ this.scanQueryProducer = null;
+ this.scanQueryIterator = null;
+ }
+ return new KVPair<>(query, iterator);
+ }
+
+ private class Query2Iterator implements Supplier {
+
+ ScanQuery[] queries;
+ int index;
+
+ Query2Iterator(ScanQuery[] queries) {
+ this.queries = queries;
+ }
+
+ @Override
+ public ScanIterator get() {
+ if (index + 1 > queries.length) {
+ return null;
+ }
+ return getIterator(queries[index++], wrapper);
+ }
+ }
+
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/GraphStoreImpl.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/GraphStoreImpl.java
new file mode 100644
index 0000000000..dcfc0549a8
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/GraphStoreImpl.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc.scan;
+
+import java.util.concurrent.ThreadPoolExecutor;
+
+import org.apache.hugegraph.store.business.BusinessHandler;
+import org.apache.hugegraph.store.grpc.GraphStoreGrpc.GraphStoreImplBase;
+import org.apache.hugegraph.store.grpc.Graphpb;
+import org.apache.hugegraph.store.grpc.Graphpb.ResponseHeader;
+import org.apache.hugegraph.store.grpc.Graphpb.ScanPartitionRequest;
+import org.apache.hugegraph.store.grpc.Graphpb.ScanResponse;
+import org.apache.hugegraph.store.node.grpc.HgStoreNodeService;
+import org.apache.hugegraph.store.node.grpc.HgStoreStreamImpl;
+import org.lognet.springboot.grpc.GRpcService;
+import org.springframework.beans.factory.annotation.Autowired;
+
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * graphpb.proto 实现类
+ */
+@Slf4j
+@GRpcService
+public class GraphStoreImpl extends GraphStoreImplBase {
+
+ private final ResponseHeader okHeader =
+ ResponseHeader.newBuilder().setError(
+ Graphpb.Error.newBuilder().setType(Graphpb.ErrorType.OK))
+ .build();
+ BusinessHandler handler;
+ @Autowired
+ private HgStoreNodeService storeService;
+ @Autowired
+ private HgStoreStreamImpl storeStream;
+
+ public BusinessHandler getHandler() {
+ if (this.handler == null) {
+ synchronized (this) {
+ if (this.handler == null) {
+ this.handler =
+ storeService.getStoreEngine().getBusinessHandler();
+ }
+ }
+ }
+ return this.handler;
+ }
+
+ public ThreadPoolExecutor getExecutor() {
+ return this.storeStream.getExecutor();
+ }
+
+ /**
+ * 流式回复消息,每个消息带有seqNo
+ * 客户端每消费一个消息,应答一个seqNo
+ * 服务端根据客户端的seqNo决定发送几个数据包
+ *
+ * @param ro
+ * @return
+ */
+ @Override
+ public StreamObserver scanPartition(
+ StreamObserver ro) {
+ return new ScanResponseObserver(ro, getHandler(), getExecutor());
+ }
+
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/ScanResponseObserver.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/ScanResponseObserver.java
new file mode 100644
index 0000000000..b5b49d0398
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/scan/ScanResponseObserver.java
@@ -0,0 +1,265 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.grpc.scan;
+
+import java.util.ArrayList;
+import java.util.concurrent.Future;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.hugegraph.store.business.BusinessHandler;
+import org.apache.hugegraph.store.business.GraphStoreIterator;
+import org.apache.hugegraph.store.grpc.Graphpb.Error;
+import org.apache.hugegraph.store.grpc.Graphpb.ErrorType;
+import org.apache.hugegraph.store.grpc.Graphpb.ResponseHeader;
+import org.apache.hugegraph.store.grpc.Graphpb.ScanPartitionRequest;
+import org.apache.hugegraph.store.grpc.Graphpb.ScanPartitionRequest.Request;
+import org.apache.hugegraph.store.grpc.Graphpb.ScanPartitionRequest.ScanType;
+import org.apache.hugegraph.store.grpc.Graphpb.ScanResponse;
+
+import com.google.protobuf.Descriptors;
+
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public class ScanResponseObserver implements
+ StreamObserver {
+
+ private static final int BATCH_SIZE = 100000;
+ private static final int MAX_PAGE = 8; //
+ private static final Error ok = Error.newBuilder().setType(ErrorType.OK).build();
+ private static final ResponseHeader okHeader =
+ ResponseHeader.newBuilder().setError(ok).build();
+ private final BusinessHandler handler;
+ private final AtomicInteger nextSeqNo = new AtomicInteger(0);
+ private final AtomicInteger cltSeqNo = new AtomicInteger(0);
+ private final ThreadPoolExecutor executor;
+ private final AtomicBoolean readOver = new AtomicBoolean(false);
+ private final LinkedBlockingQueue packages =
+ new LinkedBlockingQueue(MAX_PAGE * 2);
+ private final Descriptors.FieldDescriptor vertexField =
+ ScanResponse.getDescriptor().findFieldByNumber(3);
+ private final Descriptors.FieldDescriptor edgeField =
+ ScanResponse.getDescriptor().findFieldByNumber(4);
+ private final ReentrantLock readLock = new ReentrantLock();
+ private final ReentrantLock sendLock = new ReentrantLock();
+ private StreamObserver sender;
+ private ScanPartitionRequest scanReq;
+ private GraphStoreIterator iter;
+ private volatile long leftCount;
+ private volatile Future> sendTask;
+ private volatile Future> readTask;
+
+ /*
+ * 2022年11月1日
+ * 1.onNext 需要进行异步处理,以防止grpc的调用阻塞
+ * 2.不要读取迭代器或者发送数据不要产生线程等待
+ * 3.在发送前,尽量准备好要发送的数据
+ * */
+
+ /*
+ * 2022年11月2日
+ * 1.读取rocksdb迭代器的线程read
+ * 2.进行数据转换并发送到阻塞队列的线程offer
+ * 3.从阻塞队列读取数据,并发送的线程,包括在没有读取到数据的情况下唤醒读取和发送的线程send
+ * */
+
+ public ScanResponseObserver(StreamObserver sender,
+ BusinessHandler handler,
+ ThreadPoolExecutor executor) {
+ this.sender = sender;
+ this.handler = handler;
+ this.executor = executor;
+ }
+
+ private boolean readCondition() {
+ return packages.remainingCapacity() != 0 && !readOver.get();
+ }
+
+ private boolean readTaskCondition() {
+ return readCondition() && (readTask == null || readTask.isDone());
+ }
+
+ private boolean sendCondition() {
+ return nextSeqNo.get() - cltSeqNo.get() < MAX_PAGE;
+ }
+
+ private boolean sendTaskCondition() {
+ return sendCondition() && (sendTask == null || sendTask.isDone());
+ }
+
+ private void offer(Iterable data, boolean isVertex) {
+ ScanResponse.Builder builder = ScanResponse.newBuilder();
+ builder.setHeader(okHeader).setSeqNo(nextSeqNo.get());
+ if (isVertex) {
+ builder = builder.setField(vertexField, data);
+ } else {
+ builder = builder.setField(edgeField, data);
+ }
+ ScanResponse response = builder.build();
+ packages.offer(response);
+ startSend();
+ }
+
+ private void startRead() {
+ if (readTaskCondition()) {
+ if (readLock.tryLock()) {
+ if (readTaskCondition()) {
+ readTask = executor.submit(rr);
+ }
+ readLock.unlock();
+ }
+ }
+ }
+
+ private void startSend() {
+ if (sendTaskCondition()) {
+ if (sendLock.tryLock()) {
+ if (sendTaskCondition()) {
+ sendTask = executor.submit(sr);
+ }
+ sendLock.unlock();
+ }
+ }
+ }
+
+ @Override
+ public void onNext(ScanPartitionRequest scanReq) {
+ if (scanReq.hasScanRequest() && !scanReq.hasReplyRequest()) {
+ this.scanReq = scanReq;
+ Request request = scanReq.getScanRequest();
+ long rl = request.getLimit();
+ leftCount = rl > 0 ? rl : Long.MAX_VALUE;
+ iter = handler.scan(scanReq);
+ if (!iter.hasNext()) {
+ close();
+ sender.onCompleted();
+ } else {
+ readTask = executor.submit(rr);
+ }
+ } else {
+ cltSeqNo.getAndIncrement();
+ startSend();
+ }
+ }
+
+ @Override
+ public void onError(Throwable t) {
+ close();
+ log.warn("receive client error:", t);
+ }
+
+ @Override
+ public void onCompleted() {
+ close();
+ }
+
+ private void close() {
+ try {
+ nextSeqNo.set(0);
+ if (sendTask != null) {
+ sendTask.cancel(true);
+ }
+ if (readTask != null) {
+ readTask.cancel(true);
+ }
+ readOver.set(true);
+ iter.close();
+ } catch (Exception e) {
+ log.warn("on Complete with error:", e);
+ }
+ }
+
+ Runnable rr = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ if (readCondition()) {
+ synchronized (iter) {
+ while (readCondition()) {
+ Request r = scanReq.getScanRequest();
+ ScanType t = r.getScanType();
+ boolean isVertex = t.equals(ScanType.SCAN_VERTEX);
+ ArrayList data = new ArrayList<>(BATCH_SIZE);
+ int count = 0;
+ while (iter.hasNext() && leftCount > -1) {
+ count++;
+ leftCount--;
+ T next = (T) iter.next();
+ data.add(next);
+ if (count >= BATCH_SIZE) {
+ offer(data, isVertex);
+ // data.clear();
+ break;
+ }
+ }
+ if (!(iter.hasNext() && leftCount > -1)) {
+ if (data.size() > 0 &&
+ data.size() < BATCH_SIZE) {
+ offer(data, isVertex);
+ }
+ readOver.set(true);
+ data = null;
+ //log.warn("scan complete , count: {},time: {}",
+ // sum, System.currentTimeMillis());
+ return;
+ }
+ }
+ }
+ }
+ } catch (Exception e) {
+ log.warn("read data with error: ", e);
+ sender.onError(e);
+ }
+ }
+ };
+
+ Runnable sr = () -> {
+ while (sendCondition()) {
+ ScanResponse response;
+ try {
+ if (readOver.get()) {
+ if ((response = packages.poll()) == null) {
+ sender.onCompleted();
+ } else {
+ sender.onNext(response);
+ nextSeqNo.incrementAndGet();
+ }
+ } else {
+ response = packages.poll(10,
+ TimeUnit.MILLISECONDS);
+ if (response != null) {
+ sender.onNext(response);
+ nextSeqNo.incrementAndGet();
+ startRead();
+ } else {
+ break;
+ }
+ }
+
+ } catch (InterruptedException e) {
+ break;
+ }
+ }
+ };
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/listener/ContextClosedListener.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/listener/ContextClosedListener.java
new file mode 100644
index 0000000000..e990acfe6f
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/listener/ContextClosedListener.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.listener;
+
+import java.util.concurrent.ThreadPoolExecutor;
+
+import org.apache.hugegraph.store.node.grpc.HgStoreStreamImpl;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.context.ApplicationListener;
+import org.springframework.context.event.ContextClosedEvent;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public class ContextClosedListener implements ApplicationListener {
+
+ @Autowired
+ HgStoreStreamImpl storeStream;
+
+ @Override
+ public void onApplicationEvent(ContextClosedEvent event) {
+ try {
+ log.info("closing scan threads....");
+ ThreadPoolExecutor executor = storeStream.getRealExecutor();
+ if (executor != null) {
+ try {
+ executor.shutdownNow();
+ } catch (Exception e) {
+
+ }
+ }
+ } catch (Exception ignored) {
+
+ } finally {
+ log.info("closed scan threads");
+ }
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/listener/PdConfigureListener.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/listener/PdConfigureListener.java
new file mode 100644
index 0000000000..709e7fdb9d
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/listener/PdConfigureListener.java
@@ -0,0 +1,212 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.listener;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.security.SecureRandom;
+import java.util.Iterator;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Properties;
+import java.util.function.Consumer;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hugegraph.pd.client.KvClient;
+import org.apache.hugegraph.pd.client.PDConfig;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.kv.ScanPrefixResponse;
+import org.apache.hugegraph.pd.grpc.kv.WatchResponse;
+import org.springframework.boot.context.event.ApplicationEnvironmentPreparedEvent;
+import org.springframework.context.ApplicationListener;
+import org.springframework.context.ConfigurableApplicationContext;
+import org.springframework.core.env.MutablePropertySources;
+import org.springframework.core.env.PropertiesPropertySource;
+import org.yaml.snakeyaml.Yaml;
+
+import com.google.common.base.Charsets;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public class PdConfigureListener implements
+ ApplicationListener {
+
+ private static final String CONFIG_PREFIX = "S:";
+ private static final String CONFIG_FIX_PREFIX = "S:FS";
+ private static final String TIMESTAMP_KEY = "S:Timestamp";
+ private static final String PD_CONFIG_FILE_NAME = "application-pd.yml";
+ private final String workDir = System.getProperty("user.dir");
+ private final String fileSeparator = System.getProperty("file.separator");
+ private final String configFilePath =
+ workDir + fileSeparator + "conf" + fileSeparator + PD_CONFIG_FILE_NAME;
+ private final String restartShellPath = workDir + fileSeparator + "bin" + fileSeparator
+ + "restart-hugegraph-store.sh";
+ private ConfigurableApplicationContext context;
+ private File pdConfFile;
+ // private String restartPath = workDir + fileSeparator + "lib" + fileSeparator;
+
+ @Override
+ public void onApplicationEvent(ApplicationEnvironmentPreparedEvent event) {
+ MutablePropertySources sources = event.getEnvironment().getPropertySources();
+ String pdAddress = event.getEnvironment().getProperty("pdserver.address");
+ pdConfFile = new File(configFilePath);
+ // String[] defaultProfiles = event.getEnvironment().getDefaultProfiles();
+ // String defaultProfile = defaultProfiles[0];
+ // PropertySource> appSource = null;
+ // for (PropertySource> source : sources) {
+ // log.info("source name:{},{}", source.getName(), source.getSource());
+ // boolean applicationConfig = source.getName().contains("application.yml");
+ // if (applicationConfig) {
+ // appSource = source;
+ // break;
+ // }
+ // }
+ // Map appSourceMap = (Map)
+ // appSource
+ // .getSource();
+ // OriginTrackedValue pdTrackedValue = appSourceMap.get("pdserver.address");
+ // String pdAddress = pdTrackedValue.getValue().toString();
+ KvClient client = new KvClient(PDConfig.of(pdAddress));
+ try {
+ ScanPrefixResponse response = client.scanPrefix(CONFIG_PREFIX);
+ Map kvsMap = response.getKvsMap();
+ String pdConfig = kvsMap.get(CONFIG_FIX_PREFIX);
+ if (!StringUtils.isEmpty(pdConfig)) {
+ updatePdConfig(sources, client, pdConfig);
+ } else {
+ // send local application-pd.yml to pd
+ if (pdConfFile.exists()) {
+ String commons = FileUtils.readFileToString(pdConfFile, Charsets.UTF_8);
+ log.info("send local application-pd.yml to pd....{}", commons);
+ client.put(CONFIG_FIX_PREFIX, commons);
+ }
+ }
+ log.info("Start listening for keys :" + TIMESTAMP_KEY);
+ client.listen(TIMESTAMP_KEY, (Consumer) o -> {
+ log.info("receive message to restart :" + o);
+ try {
+ // 优先更新最新配置文件,以免修改像端口之类的参数导致旧文件被优先加载
+ ScanPrefixResponse responseNew = client.scanPrefix(CONFIG_PREFIX);
+ Map kvsMapNew = responseNew.getKvsMap();
+ String config = kvsMapNew.get(CONFIG_FIX_PREFIX);
+ updatePdConfig(sources, client, config);
+ restart();
+ } catch (Exception e) {
+ log.error("start listener with error:", e);
+ }
+ });
+
+ } catch (Exception e) {
+ log.error("start listener with error:", e);
+ }
+
+ }
+
+ private void updatePdConfig(MutablePropertySources sources, KvClient client,
+ String pdConfig) throws
+ PDException,
+ IOException {
+ Properties configs = getYmlConfig(pdConfig);
+ String property = client.get(TIMESTAMP_KEY).getValue();
+ long pdLastModified = 0;
+ if (!StringUtils.isEmpty(property)) {
+ pdLastModified = Long.parseLong(property);
+ }
+ if (!pdConfFile.exists() || pdConfFile.lastModified() <= pdLastModified) {
+ log.info("update local application-pd.yml from pd....{}", pdConfig);
+ writeYml(pdConfig);
+ PropertiesPropertySource source = new PropertiesPropertySource("pd-config", configs);
+ sources.addFirst(source);
+ }
+ }
+
+ private Properties getYmlConfig(String yml) {
+ Yaml yaml = new Yaml();
+ Iterable load = yaml.loadAll(yml);
+ Iterator iterator = load.iterator();
+ Properties properties = new Properties();
+ while (iterator.hasNext()) {
+ Map next = (Map) iterator.next();
+ map2Properties(next, "", properties);
+ }
+ return properties;
+ }
+
+ private void map2Properties(Map map, String prefix, Properties properties) {
+
+ for (Map.Entry entry : map.entrySet()) {
+ String key = entry.getKey();
+ String newPrefix = StringUtils.isEmpty(prefix) ? key : prefix + "." + key;
+ Object value = entry.getValue();
+ if (!(value instanceof Map)) {
+ properties.put(newPrefix, value);
+ } else {
+ map2Properties((Map) value, newPrefix, properties);
+ }
+
+ }
+ }
+
+ public ConfigurableApplicationContext getContext() {
+ return context;
+ }
+
+ public void setContext(ConfigurableApplicationContext context) {
+ this.context = context;
+ }
+
+ // private void restartBySpringBootApplication() {
+ // ApplicationArguments args = context.getBean(ApplicationArguments.class);
+ // Thread thread = new Thread(() -> {
+ // context.close();
+ // try {
+ // Thread.sleep(5000L);
+ // } catch (InterruptedException e) {
+ //
+ // }
+ // StoreNodeApplication.start();
+ // });
+ // thread.setDaemon(false);
+ // thread.start();
+ // }
+
+ private void restart() throws InterruptedException, IOException {
+ ProcessBuilder builder;
+ String os = System.getProperty("os.name");
+ if (os.toLowerCase(Locale.getDefault()).contains("win")) {
+ builder = new ProcessBuilder("cmd", "/c", restartShellPath).inheritIO();
+ } else {
+ log.info("run shell {}", restartShellPath);
+ builder = new ProcessBuilder("sh", "-c", restartShellPath).inheritIO();
+ }
+ SecureRandom random = new SecureRandom();
+ int sleepTime = random.nextInt(60);
+ log.info("app will restart in {} seconds:", sleepTime);
+ Thread.sleep(sleepTime * 1000);
+ Process process = builder.start();
+ log.info("waiting restart.... {}", restartShellPath);
+ process.waitFor();
+ }
+
+ private void writeYml(String yml) throws IOException {
+ FileUtils.writeStringToFile(pdConfFile, yml, Charset.defaultCharset(), false);
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/DriveMetrics.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/DriveMetrics.java
new file mode 100644
index 0000000000..8fc578c054
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/DriveMetrics.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.metrics;
+
+import java.io.File;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+/**
+ * 2021/11/23
+ */
+@Deprecated
+public class DriveMetrics {
+
+ private static final long MIB = 1024 * 1024;
+
+ // TODO: add a cache
+ public Map> metrics() {
+ File[] rootDrive = File.listRoots();
+
+ if (rootDrive == null) {
+ return new LinkedHashMap(0);
+ }
+
+ Map> metrics = new HashMap<>();
+
+ for (File d : rootDrive) {
+ Map buf = new HashMap<>();
+ buf.put("total_space", d.getTotalSpace() / MIB);
+ buf.put("free_space", d.getFreeSpace() / MIB);
+ buf.put("usable_space", d.getUsableSpace() / MIB);
+ buf.put("size_unit", "MB");
+
+ metrics.put(d.getPath().replace("\\", ""), buf);
+
+ }
+
+ return metrics;
+
+ }
+
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/GRpcExMetrics.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/GRpcExMetrics.java
new file mode 100644
index 0000000000..828b324b8c
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/GRpcExMetrics.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.metrics;
+
+import java.util.concurrent.ThreadPoolExecutor;
+
+import org.apache.hugegraph.store.node.grpc.GRpcServerConfig;
+import org.apache.hugegraph.store.node.util.HgExecutorUtil;
+
+import io.micrometer.core.instrument.Gauge;
+import io.micrometer.core.instrument.MeterRegistry;
+
+/**
+ * 2022/3/8
+ */
+public class GRpcExMetrics {
+
+ public final static String PREFIX = "grpc";
+ private final static ExecutorWrapper wrapper = new ExecutorWrapper();
+ private static MeterRegistry registry;
+
+ private GRpcExMetrics() {
+ }
+
+ public synchronized static void init(MeterRegistry meterRegistry) {
+ if (registry == null) {
+ registry = meterRegistry;
+ registerMeters();
+ }
+ }
+
+ private static void registerMeters() {
+ registerExecutor();
+
+ }
+
+ private static void registerExecutor() {
+
+ Gauge.builder(PREFIX + ".executor.pool.size", wrapper, (e) -> e.getPoolSize())
+ .description("The current number of threads in the pool.")
+ .register(registry);
+
+ Gauge.builder(PREFIX + ".executor.core.pool.size", wrapper, (e) -> e.getCorePoolSize())
+ .description(
+ "The largest number of threads that have ever simultaneously been in the " +
+ "pool.")
+ .register(registry);
+
+ Gauge.builder(PREFIX + ".executor.active.count", wrapper, (e) -> e.getActiveCount())
+ .description("The approximate number of threads that are actively executing tasks.")
+ .register(registry);
+ }
+
+ private static class ExecutorWrapper {
+
+ ThreadPoolExecutor pool;
+
+ void init() {
+ if (this.pool == null) {
+ pool = HgExecutorUtil.getThreadPoolExecutor(GRpcServerConfig.EXECUTOR_NAME);
+ }
+ }
+
+ double getPoolSize() {
+ init();
+ return this.pool == null ? 0d : this.pool.getPoolSize();
+ }
+
+ int getCorePoolSize() {
+ init();
+ return this.pool == null ? 0 : this.pool.getCorePoolSize();
+ }
+
+ int getActiveCount() {
+ init();
+ return this.pool == null ? 0 : this.pool.getActiveCount();
+ }
+
+ }
+
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/JRaftMetrics.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/JRaftMetrics.java
new file mode 100644
index 0000000000..a9b53de2db
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/JRaftMetrics.java
@@ -0,0 +1,316 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.metrics;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hugegraph.store.HgStoreEngine;
+import org.apache.hugegraph.store.node.util.HgRegexUtil;
+
+import com.alipay.sofa.jraft.core.NodeMetrics;
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Meter;
+import com.codahale.metrics.Snapshot;
+import com.codahale.metrics.Timer;
+
+import io.micrometer.core.instrument.Gauge;
+import io.micrometer.core.instrument.MeterRegistry;
+import io.micrometer.core.instrument.Tag;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * 2022/1/4
+ */
+@Slf4j
+public class JRaftMetrics {
+
+ public final static String PREFIX = "jraft";
+ public static final String LABELS = "quantile";
+ public static final String LABEL_50 = "0.5";
+ public static final String LABEL_75 = "0.75";
+ public static final String LABEL_95 = "0.95";
+ public static final String LABEL_98 = "0.98";
+ public static final String LABEL_99 = "0.99";
+ public static final String LABEL_999 = "0.999";
+ private final static HgStoreEngine storeEngine = HgStoreEngine.getInstance();
+ private final static AtomicInteger groups = new AtomicInteger(0);
+ private final static Tag handleDataTag = Tag.of("handle", "data");
+ // private final static Tag handleTxTag = Tag.of("handle", "tx"); //reservation
+ private final static Set groupSet = new HashSet<>();
+ private final static String REGEX_REFINE_REPLICATOR = "(replicator)(.+?:\\d+)(.*)";
+ private static MeterRegistry registry;
+
+ private JRaftMetrics() {
+ }
+
+ public synchronized static void init(MeterRegistry meterRegistry) {
+ if (registry == null) {
+ registry = meterRegistry;
+ registerMeters();
+ }
+ }
+
+ private static void registerMeters() {
+ Gauge.builder(PREFIX + ".groups", JRaftMetrics::updateGroups)
+ .description("Number of raft-groups, which handled the data of graph.")
+ .tags(Collections.singleton(handleDataTag))
+ .register(registry);
+
+ }
+
+ private static int updateGroups() {
+ int buf = getGroups();
+ if (buf != groups.get()) {
+ groups.set(buf);
+ registerNodeMetrics();
+ }
+ return buf;
+ }
+
+ private static int getGroups() {
+ return storeEngine.getRaftGroupCount();
+ }
+
+ private static Map getRaftGroupMetrics() {
+ Map map = storeEngine.getNodeMetrics();
+
+ if (map == null) {
+ return Collections.emptyMap();
+ }
+
+ return map;
+ }
+
+ private static void registerNodeMetrics() {
+ Map map = getRaftGroupMetrics();
+
+ synchronized (groupSet) {
+ map.forEach((group, metrics) -> {
+ if (!groupSet.add(group)) {
+ return;
+ }
+
+ metrics.getMetricRegistry().getGauges()
+ .forEach((k, v) -> registerGauge(group, k, v));
+ metrics.getMetricRegistry().getMeters()
+ .forEach((k, v) -> registerMeter(group, k, v));
+ metrics.getMetricRegistry().getCounters()
+ .forEach((k, v) -> registerCounter(group, k, v));
+ metrics.getMetricRegistry().getTimers()
+ .forEach((k, v) -> registerTimer(group, k, v));
+ metrics.getMetricRegistry().getHistograms()
+ .forEach((k, v) -> registerHistogram(group, k, v));
+ });
+ }
+
+ }
+
+ private static HistogramWrapper toWrapper(com.codahale.metrics.Histogram histogram) {
+ return new HistogramWrapper(histogram);
+ }
+
+ private static String refineMetrics(String name, List tags) {
+ if (name == null || name.isEmpty()) {
+ return name;
+ }
+
+ List buf = HgRegexUtil.toGroupValues(REGEX_REFINE_REPLICATOR, name);
+ String res = null;
+
+ /*Extracted name of replicator into a tag.*/
+
+ if (buf != null && buf.size() == 4) {
+ res = buf.get(1) + buf.get(3);
+
+ String value = buf.get(2);
+
+ if (value != null && value.startsWith("-")) {
+ value = value.substring(1);
+ }
+
+ tags.add(Tag.of("replicator", value));
+ } else {
+ res = name;
+ }
+
+ return res;
+ }
+
+ private static void registerHistogram(String group, String name,
+ com.codahale.metrics.Histogram histogram) {
+ if (histogram == null) {
+ return;
+ }
+
+ List tags = new LinkedList<>();
+ tags.add(handleDataTag);
+ tags.add(Tag.of("group", group));
+
+ name = refineMetrics(name, tags);
+
+ String baseName = PREFIX + "." + name.toLowerCase();
+
+ HistogramWrapper wrapper = toWrapper(histogram);
+
+ Gauge.builder(baseName + ".median", wrapper, (d) -> d.getSnapshot().getMedian())
+ .tags(tags).register(registry);
+ Gauge.builder(baseName + ".min", wrapper, (d) -> d.getSnapshot().getMin())
+ .tags(tags).register(registry);
+ Gauge.builder(baseName + ".max", wrapper, (d) -> d.getSnapshot().getMax())
+ .tags(tags).register(registry);
+ Gauge.builder(baseName + ".mean", wrapper, (d) -> d.getSnapshot().getMean())
+ .tags(tags).register(registry);
+
+ baseName = baseName + ".summary";
+ Gauge.builder(baseName, wrapper, (d) -> d.getSnapshot().getMedian())
+ .tags(tags).tag(LABELS, LABEL_50).register(registry);
+ Gauge.builder(baseName, wrapper, (d) -> d.getSnapshot().get75thPercentile())
+ .tags(tags).tag(LABELS, LABEL_75).register(registry);
+ Gauge.builder(baseName, wrapper, (d) -> d.getSnapshot().get95thPercentile())
+ .tags(tags).tag(LABELS, LABEL_95).register(registry);
+ Gauge.builder(baseName, wrapper, (d) -> d.getSnapshot().get98thPercentile())
+ .tags(tags).tag(LABELS, LABEL_98).register(registry);
+ Gauge.builder(baseName, wrapper, (d) -> d.getSnapshot().get99thPercentile())
+ .tags(tags).tag(LABELS, LABEL_99).register(registry);
+ Gauge.builder(baseName, wrapper, (d) -> d.getSnapshot().get999thPercentile())
+ .tags(tags).tag(LABELS, LABEL_999).register(registry);
+
+ Gauge.builder(baseName + ".sum", wrapper,
+ (d) -> Arrays.stream(d.getSnapshot().getValues()).sum())
+ .tags(tags).register(registry);
+ Gauge.builder(baseName + ".count", wrapper, (d) -> d.getSnapshot().size())
+ .tags(tags).register(registry);
+
+ }
+
+ private static void registerTimer(String group, String name, com.codahale.metrics.Timer timer) {
+ List tags = new LinkedList<>();
+ tags.add(handleDataTag);
+ tags.add(Tag.of("group", group));
+
+ name = refineMetrics(name, tags);
+
+ String baseName = PREFIX + "." + name.toLowerCase();
+
+ Gauge.builder(baseName + ".count", timer, Timer::getCount)
+ .tags(tags).register(registry);
+
+ Gauge.builder(baseName + ".timer", timer, Timer::getCount)
+ .tags(tags).tag("rate", "1m").register(registry);
+ Gauge.builder(baseName + ".timer", timer, Timer::getCount)
+ .tags(tags).tag("rate", "5m").register(registry);
+ Gauge.builder(baseName + ".timer", timer, Timer::getCount)
+ .tags(tags).tag("rate", "15m").register(registry);
+ Gauge.builder(baseName + ".timer", timer, Timer::getCount)
+ .tags(tags).tag("rate", "mean").register(registry);
+
+ }
+
+ private static void registerMeter(String group, String name, com.codahale.metrics.Meter meter) {
+ List tags = new LinkedList<>();
+ tags.add(handleDataTag);
+ tags.add(Tag.of("group", group));
+
+ name = refineMetrics(name, tags);
+
+ String baseName = PREFIX + "." + name.toLowerCase();
+
+ Gauge.builder(baseName + ".count", meter, Meter::getCount)
+ .tags(tags)
+ .register(registry);
+
+ Gauge.builder(baseName + ".rate", meter, Meter::getCount)
+ .tags(tags).tag("rate", "1m")
+ .register(registry);
+ Gauge.builder(baseName + ".rate", meter, Meter::getCount)
+ .tags(tags).tag("rate", "5m")
+ .register(registry);
+ Gauge.builder(baseName + ".rate", meter, Meter::getCount)
+ .tags(tags).tag("rate", "15m")
+ .register(registry);
+ Gauge.builder(baseName + ".rate", meter, Meter::getCount)
+ .tags(tags).tag("rate", "mean")
+ .register(registry);
+
+ }
+
+ private static void registerCounter(String group, String name,
+ com.codahale.metrics.Counter counter) {
+ List tags = new LinkedList<>();
+ tags.add(handleDataTag);
+ tags.add(Tag.of("group", group));
+
+ name = refineMetrics(name, tags);
+
+ name = name.toLowerCase();
+
+ //Adapted a counter to be a gauge.
+ Gauge.builder(PREFIX + "." + name + ".count", counter, Counter::getCount)
+ .tags(tags).register(registry);
+ }
+
+ private static void registerGauge(String group, String name,
+ com.codahale.metrics.Gauge> gauge) {
+ List tags = new LinkedList<>();
+ tags.add(handleDataTag);
+ tags.add(Tag.of("group", group));
+
+ name = refineMetrics(name, tags);
+
+ name = name.toLowerCase();
+
+ if (gauge.getValue() instanceof Number) {
+ Gauge.builder(PREFIX + "." + name, gauge, (g) -> ((Number) g.getValue()).doubleValue())
+ .tags(tags).register(registry);
+ } else {
+ Gauge.builder(PREFIX + "." + name, () -> 1.0)
+ .tags(tags)
+ .tag("str.gauge", String.valueOf(gauge.getValue())).register(registry);
+ }
+
+ }
+
+ private static class HistogramWrapper {
+
+ private final com.codahale.metrics.Histogram histogram;
+
+ private Snapshot snapshot;
+ private long ts = System.currentTimeMillis();
+
+ HistogramWrapper(com.codahale.metrics.Histogram histogram) {
+ this.histogram = histogram;
+ this.snapshot = this.histogram.getSnapshot();
+ }
+
+ Snapshot getSnapshot() {
+ if (System.currentTimeMillis() - this.ts > 30_000) {
+ this.snapshot = this.histogram.getSnapshot();
+ this.ts = System.currentTimeMillis();
+ }
+ return this.snapshot;
+ }
+ }
+
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/MetricsConfig.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/MetricsConfig.java
new file mode 100644
index 0000000000..7062ee7eb8
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/MetricsConfig.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.metrics;
+
+import org.springframework.boot.actuate.autoconfigure.metrics.MeterRegistryCustomizer;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+import io.micrometer.core.instrument.MeterRegistry;
+
+/**
+ * 2021/11/24
+ */
+@Configuration
+public class MetricsConfig {
+
+ @Bean
+ public MeterRegistryCustomizer metricsCommonTags() {
+ return (registry) -> registry.config().commonTags("hg", "store");
+ }
+
+ @Bean
+ public MeterRegistryCustomizer registerMeters() {
+ return (registry) -> {
+ StoreMetrics.init(registry);
+ RocksDBMetrics.init(registry);
+ JRaftMetrics.init(registry);
+ ProcfsMetrics.init(registry);
+ GRpcExMetrics.init(registry);
+ };
+ }
+
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/MetricsUtil.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/MetricsUtil.java
new file mode 100644
index 0000000000..62c966fbfd
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/MetricsUtil.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.metrics;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.Histogram;
+import com.codahale.metrics.Meter;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Timer;
+
+@Deprecated
+public class MetricsUtil {
+
+ private static final MetricRegistry registry = new MetricRegistry();
+
+ public static Gauge registerGauge(Class> clazz, String name,
+ Gauge gauge) {
+ return registry.register(MetricRegistry.name(clazz, name), gauge);
+ }
+
+ public static Counter registerCounter(Class> clazz, String name) {
+ return registry.counter(MetricRegistry.name(clazz, name));
+ }
+
+ public static Histogram registerHistogram(Class> clazz, String name) {
+ return registry.histogram(MetricRegistry.name(clazz, name));
+ }
+
+ public static Meter registerMeter(Class> clazz, String name) {
+ return registry.meter(MetricRegistry.name(clazz, name));
+ }
+
+ public static Timer registerTimer(Class> clazz, String name) {
+ return registry.timer(MetricRegistry.name(clazz, name));
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsEntry.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsEntry.java
new file mode 100644
index 0000000000..ef39fd7720
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsEntry.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hugegraph.store.node.metrics;
+
+import static org.apache.hugegraph.store.node.metrics.ProcfsReader.ReadResult;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Objects;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+// TODO: refer license later, 83% match, maybe refer to metrics-jvm-extras (0.1.3) APL2.0
+abstract class ProcfsEntry {
+
+ private static final Logger log = LoggerFactory.getLogger(ProcfsEntry.class);
+
+ private final Object lock = new Object();
+
+ private final ProcfsReader reader;
+
+ private long lastHandle = -1;
+
+ protected ProcfsEntry(ProcfsReader reader) {
+ this.reader = Objects.requireNonNull(reader);
+ }
+
+ protected final void collect() {
+ synchronized (lock) {
+ try {
+ final ReadResult result = reader.read();
+ if (result != null && (lastHandle == -1 || lastHandle != result.getReadTime())) {
+ reset();
+ handle(result.getLines());
+ lastHandle = result.getReadTime();
+ }
+ } catch (IOException e) {
+ reset();
+ log.warn("Failed reading '" + reader.getEntryPath() + "'!", e);
+ }
+ }
+ }
+
+ protected abstract void reset();
+
+ protected abstract void handle(Collection lines);
+
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsMetrics.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsMetrics.java
new file mode 100644
index 0000000000..64064158ca
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsMetrics.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.metrics;
+
+import io.micrometer.core.instrument.Gauge;
+import io.micrometer.core.instrument.MeterRegistry;
+
+/**
+ * 2022/3/1
+ *
+ * @version 0.1.0
+ */
+public class ProcfsMetrics {
+
+ public final static String PREFIX = "process_memory";
+ private final static ProcfsSmaps smaps = new ProcfsSmaps();
+ private static MeterRegistry registry;
+
+ private ProcfsMetrics() {
+
+ }
+
+ public synchronized static void init(MeterRegistry meterRegistry) {
+ if (registry == null) {
+ registry = meterRegistry;
+ registerMeters();
+ }
+ }
+
+ private static void registerMeters() {
+ registerProcessGauge();
+ }
+
+ private static void registerProcessGauge() {
+ Gauge.builder(PREFIX + ".rss.bytes", () -> smaps.get(ProcfsSmaps.KEY.RSS))
+ .register(registry);
+
+ Gauge.builder(PREFIX + ".pss.bytes", () -> smaps.get(ProcfsSmaps.KEY.PSS))
+ .register(registry);
+
+ Gauge.builder(PREFIX + ".vss.bytes", () -> smaps.get(ProcfsSmaps.KEY.VSS))
+ .register(registry);
+
+ Gauge.builder(PREFIX + ".swap.bytes", () -> smaps.get(ProcfsSmaps.KEY.SWAP))
+ .register(registry);
+
+ Gauge.builder(PREFIX + ".swappss.bytes", () -> smaps.get(ProcfsSmaps.KEY.SWAPPSS))
+ .register(registry);
+ }
+
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsReader.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsReader.java
new file mode 100644
index 0000000000..b378244f9f
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsReader.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hugegraph.store.node.metrics;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Objects;
+
+// TODO: refer license later, 90% match, maybe refer to metrics-jvm-extras (0.1.3) APL2.0
+class ProcfsReader {
+
+ /* default */ static final long CACHE_DURATION_MS = 100;
+ private static final Map instances = new HashMap<>();
+ private static final Object instancesLock = new Object();
+ private static final Map> data = new HashMap<>();
+ private static final Object dataLock = new Object();
+ private static final Path BASE = Paths.get("/proc", "self");
+ private final Path entryPath;
+ private final boolean osSupport;
+ /* default */ long lastReadTime = -1;
+
+ private ProcfsReader(String entry) {
+ this(BASE, entry, false);
+ }
+
+ /* default */ ProcfsReader(Path base, String entry) {
+ this(base, entry, true);
+ }
+
+ private ProcfsReader(Path base, String entry, boolean forceOSSupport) {
+ Objects.requireNonNull(base);
+ Objects.requireNonNull(entry);
+
+ this.entryPath = base.resolve(entry);
+
+ this.osSupport = forceOSSupport
+ || System.getProperty("os.name").toLowerCase(Locale.ENGLISH)
+ .startsWith("linux");
+ }
+
+ /* default */
+ static ProcfsReader getInstance(String entry) {
+ Objects.requireNonNull(entry);
+
+ synchronized (instancesLock) {
+ ProcfsReader reader = instances.get(entry);
+ if (reader == null) {
+ reader = new ProcfsReader(entry);
+ instances.put(entry, reader);
+ }
+ return reader;
+ }
+ }
+
+ /* default */ Path getEntryPath() {
+ return entryPath;
+ }
+
+ /* default */ ReadResult read() throws IOException {
+ return read(currentTime());
+ }
+
+ /* default */ ReadResult read(long currentTimeMillis) throws IOException {
+ synchronized (dataLock) {
+ final Path key = getEntryPath().getFileName();
+
+ final ReadResult readResult;
+ if (lastReadTime == -1 || lastReadTime + CACHE_DURATION_MS < currentTimeMillis) {
+ final List lines = readPath(entryPath);
+ cacheResult(key, lines);
+ lastReadTime = currentTime();
+ readResult = new ReadResult(lines, lastReadTime);
+ } else {
+ readResult = new ReadResult(data.get(key), lastReadTime);
+ }
+ return readResult;
+ }
+ }
+
+ /* default */ List readPath(Path path) throws IOException {
+ Objects.requireNonNull(path);
+
+ if (!osSupport) {
+ return Collections.emptyList();
+ }
+ return Files.readAllLines(path);
+ }
+
+ /* default */ void cacheResult(Path key, List lines) {
+ Objects.requireNonNull(key);
+ Objects.requireNonNull(lines);
+
+ data.put(key, lines);
+ }
+
+ /* default */ long currentTime() {
+ return System.currentTimeMillis();
+ }
+
+ /* default */ static class ReadResult {
+
+ private final List lines;
+
+ private final long readTime;
+
+ /* default */ ReadResult(List lines, long readTime) {
+ this.lines = Objects.requireNonNull(lines);
+ this.readTime = readTime;
+ }
+
+ public long getReadTime() {
+ return readTime;
+ }
+
+ public List getLines() {
+ return lines;
+ }
+
+ }
+
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsSmaps.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsSmaps.java
new file mode 100644
index 0000000000..69edf52de0
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/ProcfsSmaps.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hugegraph.store.node.metrics;
+
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.LongUnaryOperator;
+
+// TODO: refer license later, 88% match, maybe refer to metrics-jvm-extras (0.1.0) APL2.0
+public class ProcfsSmaps extends ProcfsEntry {
+
+ private static final int KILOBYTE = 1024;
+ private final Map values = new HashMap<>();
+
+ public ProcfsSmaps() {
+ super(ProcfsReader.getInstance("smaps"));
+ }
+
+ /* default */ ProcfsSmaps(ProcfsReader reader) {
+ super(reader);
+ }
+
+ private static long parseKiloBytes(String line) {
+ Objects.requireNonNull(line);
+
+ return Long.parseLong(line.split("\\s+")[1]);
+ }
+
+ @Override
+ protected void reset() {
+ EnumSet.allOf(KEY.class).forEach(key -> values.put(key, new AtomicLong(-1)));
+ }
+
+ @Override
+ protected void handle(Collection lines) {
+ Objects.requireNonNull(lines);
+
+ for (final String line : lines) {
+ if (line.startsWith("Size:")) {
+ inc(KEY.VSS, parseKiloBytes(line) * KILOBYTE);
+ } else if (line.startsWith("Rss:")) {
+ inc(KEY.RSS, parseKiloBytes(line) * KILOBYTE);
+ } else if (line.startsWith("Pss:")) {
+ inc(KEY.PSS, parseKiloBytes(line) * KILOBYTE);
+ } else if (line.startsWith("Swap:")) {
+ inc(KEY.SWAP, parseKiloBytes(line) * KILOBYTE);
+ } else if (line.startsWith("SwapPss:")) {
+ inc(KEY.SWAPPSS, parseKiloBytes(line) * KILOBYTE);
+ }
+ }
+ }
+
+ public Long get(KEY key) {
+ Objects.requireNonNull(key);
+
+ collect();
+ return Long.valueOf(values.get(key).longValue());
+ }
+
+ private void inc(KEY key, long increment) {
+ Objects.requireNonNull(key);
+
+ values.get(key).getAndUpdate(new LongUnaryOperator() {
+
+ @Override
+ public long applyAsLong(long currentValue) {
+ return currentValue + increment + (currentValue == -1 ? 1 : 0);
+ }
+
+ });
+ }
+
+ public enum KEY {
+ /**
+ * Virtual set size
+ */
+ VSS,
+ /**
+ * Resident set size
+ */
+ RSS,
+ /**
+ * Proportional set size
+ */
+ PSS,
+ /**
+ * Paged out memory
+ */
+ SWAP,
+ /**
+ * Paged out memory accounting shared pages. Since Linux 4.3.
+ */
+ SWAPPSS
+ }
+
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetrics.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetrics.java
new file mode 100644
index 0000000000..a18048aaef
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetrics.java
@@ -0,0 +1,421 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.metrics;
+
+import static org.apache.hugegraph.store.node.metrics.RocksDBMetricsConst.HISTOGRAMS;
+import static org.apache.hugegraph.store.node.metrics.RocksDBMetricsConst.LABELS;
+import static org.apache.hugegraph.store.node.metrics.RocksDBMetricsConst.LABEL_50;
+import static org.apache.hugegraph.store.node.metrics.RocksDBMetricsConst.LABEL_95;
+import static org.apache.hugegraph.store.node.metrics.RocksDBMetricsConst.LABEL_99;
+import static org.apache.hugegraph.store.node.metrics.RocksDBMetricsConst.PREFIX;
+import static org.apache.hugegraph.store.node.metrics.RocksDBMetricsConst.TICKERS;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Function;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+
+import org.apache.hugegraph.rocksdb.access.RocksDBFactory;
+import org.apache.hugegraph.rocksdb.access.RocksDBSession;
+import org.apache.hugegraph.store.HgStoreEngine;
+import org.apache.hugegraph.store.node.util.HgAssert;
+import org.rocksdb.HistogramData;
+import org.rocksdb.HistogramType;
+import org.rocksdb.MemoryUsageType;
+import org.rocksdb.Statistics;
+import org.rocksdb.TickerType;
+
+import io.micrometer.core.instrument.Gauge;
+import io.micrometer.core.instrument.Meter;
+import io.micrometer.core.instrument.MeterRegistry;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * 2021/12/30
+ *
+ * @version 1.2.0 on 2022/03/22 added auto meter removing when graph has been closed.
+ */
+@Slf4j
+public class RocksDBMetrics {
+
+ private final static RocksDBFactory rocksDBFactory = RocksDBFactory.getInstance();
+ private final static AtomicInteger rocks = new AtomicInteger(0);
+ private final static Set graphSet = new HashSet<>();
+ private final static HgStoreEngine storeEngine = HgStoreEngine.getInstance();
+ private final static MemoryUseWrapper memoryUseWrapper = new MemoryUseWrapper();
+ private final static Map statisticsHolder = new HashMap<>();
+ private final static Map histogramHolder = new HashMap<>();
+ private final static Map> graphMeterMap = new ConcurrentHashMap<>();
+ private static MeterRegistry registry;
+
+ private RocksDBMetrics() {
+ }
+
+ public static void init(final MeterRegistry meterRegistry) {
+ HgAssert.isArgumentNotNull(meterRegistry, "meterRegistry");
+
+ if (registry != null) {
+ return;
+
+ }
+
+ registry = meterRegistry;
+
+ Gauge.builder("rocks.num", RocksDBMetrics::updateRocks)
+ .description("Number of instance of RocksDB running in this node")
+ .register(registry);
+
+ registerMemoryUse();
+ }
+
+ private static int updateRocks() {
+ int buf = getRocks();
+
+ if (buf != rocks.get()) {
+ rocks.set(buf);
+ registerMeter();
+ }
+
+ return buf;
+ }
+
+ private static int getRocks() {
+ return rocksDBFactory.getSessionSize();
+ }
+
+ private static Set getGraphs() {
+ return rocksDBFactory.getGraphNames();
+ }
+
+ private static RocksDBSession getRocksDBSession(String graph) {
+ return rocksDBFactory.queryGraphDB(graph);
+ }
+
+ private static synchronized void registerMeter() {
+ Set graphs = getGraphs();
+
+ if (graphs == null) {
+ log.error(
+ "Failed to fetch the collection of names of graph, when invoking to register " +
+ "RocksDB gauge.");
+ return;
+ }
+
+ graphs.forEach(g -> {
+ if (!graphSet.add(g)) {
+ return;
+ }
+
+ StatisticsWrapper stats = new StatisticsWrapper(g);
+ statisticsHolder.put(g, stats);
+
+ for (final TickerType ticker : TICKERS) {
+ String gaugeName = PREFIX + "." + ticker.name().toLowerCase();
+
+ saveGraphMeter(g,
+ Gauge.builder(gaugeName, () -> stats.getTickerCount(ticker))
+ .description("RocksDB reported statistics for " + ticker.name())
+ .tag("graph", g)
+ .register(registry)
+ );
+ }
+
+ for (final HistogramType histogram : HISTOGRAMS) {
+ registerHistogram(g, registry, histogram, stats);
+ }
+
+ registrySessionRefNum(g);
+
+ });
+
+ graphSet.removeAll(graphSet.stream().filter(g -> !graphs.contains(g))
+ .peek(g -> removeMeters(g))
+ .collect(Collectors.toList())
+ );
+
+ }
+
+ private static void saveGraphMeter(String g, Meter meter) {
+ graphMeterMap.computeIfAbsent(g, k -> new HashSet<>()).add(meter);
+ }
+
+ private static void removeMeters(String g) {
+ graphMeterMap.getOrDefault(g, Collections.emptySet()).forEach(e -> registry.remove(e));
+ }
+
+ private static void registerHistogram(String graph, MeterRegistry registry, HistogramType
+ histogramType, StatisticsWrapper stats) {
+
+ HistogramDataWrapper histogram = new HistogramDataWrapper(histogramType,
+ () -> stats.getHistogramData(
+ histogramType));
+ histogramHolder.put(histogram, histogramType);
+
+ String baseName = PREFIX + "." + histogramType.name().toLowerCase();
+ saveGraphMeter(graph,
+ Gauge.builder(baseName + ".max", histogram, HistogramDataWrapper::getMax)
+ .tag("graph", graph).register(registry));
+ saveGraphMeter(graph, Gauge.builder(baseName + ".mean", histogram,
+ HistogramDataWrapper::getAverage).tag("graph", graph)
+ .register(registry));
+ saveGraphMeter(graph,
+ Gauge.builder(baseName + ".min", histogram, HistogramDataWrapper::getMin)
+ .tag("graph", graph).register(registry));
+
+ baseName = baseName + ".summary";
+ saveGraphMeter(graph, Gauge.builder(baseName, histogram, HistogramDataWrapper::getMedian)
+ .tags("graph", graph, LABELS, LABEL_50).register(registry));
+ saveGraphMeter(graph,
+ Gauge.builder(baseName, histogram, HistogramDataWrapper::getPercentile95)
+ .tags("graph", graph, LABELS, LABEL_95).register(registry));
+ saveGraphMeter(graph,
+ Gauge.builder(baseName, histogram, HistogramDataWrapper::getPercentile99)
+ .tags("graph", graph, LABELS, LABEL_99).register(registry));
+ saveGraphMeter(graph,
+ Gauge.builder(baseName + ".sum", histogram, HistogramDataWrapper::getSum)
+ .tags("graph", graph).register(registry));
+ saveGraphMeter(graph,
+ Gauge.builder(baseName + ".count", histogram, HistogramDataWrapper::getCount)
+ .tags("graph", graph).register(registry));
+
+ }
+
+ private static void registerMemoryUse() {
+ Gauge.builder(PREFIX + ".table.reader.total", memoryUseWrapper,
+ (e) -> e.getTableReaderTotal())
+ .description("The current number of threads in the pool.")
+ .register(registry);
+ Gauge.builder(PREFIX + ".mem.table.total", memoryUseWrapper, (e) -> e.getMemTableTotal())
+ .description("The current number of threads in the pool.")
+ .register(registry);
+ Gauge.builder(PREFIX + ".mem.table.unFlushed", memoryUseWrapper,
+ (e) -> e.getMemTableUnFlushed())
+ .description("The current number of threads in the pool.")
+ .register(registry);
+ Gauge.builder(PREFIX + ".cache.total", memoryUseWrapper, (e) -> e.getCacheTotal())
+ .description("The current number of threads in the pool.")
+ .register(registry);
+ Gauge.builder(PREFIX + ".block.cache.pinned-usage", memoryUseWrapper,
+ (e) -> e.getProperty("rocksdb.block-cache-pinned-usage"))
+ .description("The current number of threads in the pool.")
+ .register(registry);
+
+ }
+
+ private static void registrySessionRefNum(String graph) {
+
+ SessionWrapper sessionWrapper = new SessionWrapper(graph);
+ saveGraphMeter(graph,
+ Gauge.builder(PREFIX + ".session.ref.count", sessionWrapper,
+ (e) -> e.getRefCount() - 1)
+ .description("The current amount of reference of session")
+ .tag("ref", "self").tag("graph", graph)
+ .strongReference(true)
+ .register(registry)
+ );
+
+ }
+
+ private static T getValue(S stat, Function fun, T defaultValue) {
+ if (stat == null) {
+ return defaultValue;
+ }
+ return fun.apply(stat);
+ }
+
+ private static class SessionWrapper {
+
+ private final String graphName;
+
+ SessionWrapper(String graph) {
+
+ this.graphName = graph;
+ }
+
+ public int getRefCount() {
+ try (RocksDBSession session = getRocksDBSession(graphName)) {
+ if (session != null) {
+ return getValue(session, e -> e.getRefCount(), -1);
+ }
+ return 0;
+ }
+ }
+ }
+
+ private static class MemoryUseWrapper {
+
+ Map mems = null;
+ long lastTime = 0;
+
+ private void loadData() {
+ if (mems == null || System.currentTimeMillis() - lastTime > 30000) {
+ mems = storeEngine.getBusinessHandler().getApproximateMemoryUsageByType(null);
+ lastTime = System.currentTimeMillis();
+ }
+ }
+
+ public Long getTableReaderTotal() {
+ loadData();
+ return mems.get(MemoryUsageType.kTableReadersTotal);
+ }
+
+ public Long getMemTableTotal() {
+ loadData();
+ return mems.get(MemoryUsageType.kMemTableTotal);
+ }
+
+ public Long getCacheTotal() {
+ loadData();
+ return mems.get(MemoryUsageType.kCacheTotal);
+ }
+
+ public Long getMemTableUnFlushed() {
+ loadData();
+ return mems.get(MemoryUsageType.kMemTableUnFlushed);
+ }
+
+ public Long getProperty(String property) {
+ Set graphs = rocksDBFactory.getGraphNames();
+ if (graphs.size() > 0) {
+ try (RocksDBSession session = getRocksDBSession((String) graphs.toArray()[0])) {
+ if (session != null) {
+ return Long.parseLong(session.getProperty(property));
+ }
+ }
+ }
+ return null;
+ }
+ }
+
+ private static class StatisticsWrapper {
+
+ private final String graphName;
+ private final Map tickerCounteMap = new ConcurrentHashMap<>();
+ private final Map histogramDataMap =
+ new ConcurrentHashMap<>();
+ long lastTime = 0;
+
+ StatisticsWrapper(String graph) {
+
+ this.graphName = graph;
+ loadData();
+
+ }
+
+ private void loadData() {
+ if (System.currentTimeMillis() - lastTime < 30000) {
+ return;
+ }
+ lastTime = System.currentTimeMillis();
+ try (RocksDBSession session = getRocksDBSession(graphName)) {
+ if (session == null) {
+ // log.error("Failed to fetch the RocksDBSession with graph's name: [ " +
+ // graph + " ]");
+ return;
+ }
+
+ Statistics statistics = session.getRocksDbStats();
+ for (final TickerType ticker : TICKERS) {
+ tickerCounteMap.put(ticker, statistics.getTickerCount(ticker));
+ }
+
+ for (final HistogramType histogram : HISTOGRAMS) {
+ histogramDataMap.put(histogram, statistics.getHistogramData(histogram));
+ }
+ }
+ }
+
+ public long getTickerCount(TickerType tickerType) {
+ this.loadData();
+ return tickerCounteMap.containsKey(tickerType) ? tickerCounteMap.get(tickerType) : 0;
+ }
+
+ public HistogramData getHistogramData(HistogramType histogramType) {
+ this.loadData();
+ return histogramDataMap.get(histogramType);
+ }
+
+ }
+
+ private static class HistogramDataWrapper {
+
+ private final Supplier supplier;
+ private final HistogramType histogramType;
+ private HistogramData data = new HistogramData(0d, 0d, 0d, 0d, 0d);
+ private long ts = System.currentTimeMillis() - 30_000;
+
+ HistogramDataWrapper(HistogramType histogramType, Supplier supplier) {
+ this.supplier = supplier;
+ this.histogramType = histogramType;
+ }
+
+ private HistogramData getData() {
+ if (System.currentTimeMillis() - this.ts > 30_000) {
+ HistogramData buf = this.supplier.get();
+ if (buf != null) {
+ this.data = buf;
+ this.ts = System.currentTimeMillis();
+ }
+ }
+ return this.data;
+ }
+
+ public double getMedian() {
+ return getData().getMedian();
+ }
+
+ public double getPercentile95() {
+ return getData().getPercentile95();
+ }
+
+ public double getPercentile99() {
+ return getData().getPercentile99();
+ }
+
+ public double getAverage() {
+ return getData().getAverage();
+ }
+
+ public double getStandardDeviation() {
+ return getData().getStandardDeviation();
+ }
+
+ public double getMax() {
+ return getData().getMax();
+ }
+
+ public long getCount() {
+ return getData().getCount();
+ }
+
+ public long getSum() {
+ return getData().getSum();
+ }
+
+ public double getMin() {
+ return getData().getMin();
+ }
+
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetricsConst.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetricsConst.java
new file mode 100644
index 0000000000..92df91861e
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/RocksDBMetricsConst.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.metrics;
+
+import org.rocksdb.HistogramType;
+import org.rocksdb.TickerType;
+
+/**
+ * TODO: refer license later, 80% match, maybe refer to pantheon, This file need refactor!
+ */
+public final class RocksDBMetricsConst {
+
+ public static final String PREFIX = "rocks.stats";
+ public static final String LABELS = "quantile";
+ public static final String LABEL_50 = "0.5";
+ public static final String LABEL_95 = "0.95";
+ public static final String LABEL_99 = "0.99";
+
+ // Tickers - RocksDB equivalent of counters
+ public static final TickerType[] TICKERS = {
+ TickerType.BLOCK_CACHE_ADD,
+ TickerType.BLOCK_CACHE_HIT,
+ TickerType.BLOCK_CACHE_ADD_FAILURES,
+ TickerType.BLOCK_CACHE_INDEX_MISS,
+ TickerType.BLOCK_CACHE_INDEX_HIT,
+ TickerType.BLOCK_CACHE_INDEX_ADD,
+ TickerType.BLOCK_CACHE_INDEX_BYTES_INSERT,
+ TickerType.BLOCK_CACHE_INDEX_BYTES_EVICT,
+ TickerType.BLOCK_CACHE_FILTER_MISS,
+ TickerType.BLOCK_CACHE_FILTER_HIT,
+ TickerType.BLOCK_CACHE_FILTER_ADD,
+ TickerType.BLOCK_CACHE_FILTER_BYTES_INSERT,
+ TickerType.BLOCK_CACHE_FILTER_BYTES_EVICT,
+ TickerType.BLOCK_CACHE_DATA_MISS,
+ TickerType.BLOCK_CACHE_DATA_HIT,
+ TickerType.BLOCK_CACHE_DATA_ADD,
+ TickerType.BLOCK_CACHE_DATA_BYTES_INSERT,
+ TickerType.BLOCK_CACHE_BYTES_READ,
+ TickerType.BLOCK_CACHE_BYTES_WRITE,
+ TickerType.BLOOM_FILTER_USEFUL,
+ TickerType.PERSISTENT_CACHE_HIT,
+ TickerType.PERSISTENT_CACHE_MISS,
+ TickerType.SIM_BLOCK_CACHE_HIT,
+ TickerType.SIM_BLOCK_CACHE_MISS,
+ TickerType.MEMTABLE_HIT,
+ TickerType.MEMTABLE_MISS,
+ TickerType.GET_HIT_L0,
+ TickerType.GET_HIT_L1,
+ TickerType.GET_HIT_L2_AND_UP,
+ TickerType.COMPACTION_KEY_DROP_NEWER_ENTRY,
+ TickerType.COMPACTION_KEY_DROP_OBSOLETE,
+ TickerType.COMPACTION_KEY_DROP_RANGE_DEL,
+ TickerType.COMPACTION_KEY_DROP_USER,
+ TickerType.COMPACTION_RANGE_DEL_DROP_OBSOLETE,
+ TickerType.NUMBER_KEYS_WRITTEN,
+ TickerType.NUMBER_KEYS_READ,
+ TickerType.NUMBER_KEYS_UPDATED,
+ TickerType.BYTES_WRITTEN,
+ TickerType.BYTES_READ,
+ TickerType.NUMBER_DB_SEEK,
+ TickerType.NUMBER_DB_NEXT,
+ TickerType.NUMBER_DB_PREV,
+ TickerType.NUMBER_DB_SEEK_FOUND,
+ TickerType.NUMBER_DB_NEXT_FOUND,
+ TickerType.NUMBER_DB_PREV_FOUND,
+ TickerType.ITER_BYTES_READ,
+ TickerType.NO_FILE_CLOSES,
+ TickerType.NO_FILE_OPENS,
+ TickerType.NO_FILE_ERRORS,
+ // TickerType.STALL_L0_SLOWDOWN_MICROS,
+ // TickerType.STALL_MEMTABLE_COMPACTION_MICROS,
+ // TickerType.STALL_L0_NUM_FILES_MICROS,
+ TickerType.STALL_MICROS,
+ TickerType.DB_MUTEX_WAIT_MICROS,
+ TickerType.RATE_LIMIT_DELAY_MILLIS,
+ TickerType.NO_ITERATORS,
+ TickerType.NUMBER_MULTIGET_BYTES_READ,
+ TickerType.NUMBER_MULTIGET_KEYS_READ,
+ TickerType.NUMBER_MULTIGET_CALLS,
+ TickerType.NUMBER_FILTERED_DELETES,
+ TickerType.NUMBER_MERGE_FAILURES,
+ TickerType.BLOOM_FILTER_PREFIX_CHECKED,
+ TickerType.BLOOM_FILTER_PREFIX_USEFUL,
+ TickerType.NUMBER_OF_RESEEKS_IN_ITERATION,
+ TickerType.GET_UPDATES_SINCE_CALLS,
+ TickerType.BLOCK_CACHE_COMPRESSED_MISS,
+ TickerType.BLOCK_CACHE_COMPRESSED_HIT,
+ TickerType.BLOCK_CACHE_COMPRESSED_ADD,
+ TickerType.BLOCK_CACHE_COMPRESSED_ADD_FAILURES,
+ TickerType.WAL_FILE_SYNCED,
+ TickerType.WAL_FILE_BYTES,
+ TickerType.WRITE_DONE_BY_SELF,
+ TickerType.WRITE_DONE_BY_OTHER,
+ TickerType.WRITE_TIMEDOUT,
+ TickerType.WRITE_WITH_WAL,
+ TickerType.COMPACT_READ_BYTES,
+ TickerType.COMPACT_WRITE_BYTES,
+ TickerType.FLUSH_WRITE_BYTES,
+ TickerType.NUMBER_DIRECT_LOAD_TABLE_PROPERTIES,
+ TickerType.NUMBER_SUPERVERSION_ACQUIRES,
+ TickerType.NUMBER_SUPERVERSION_RELEASES,
+ TickerType.NUMBER_SUPERVERSION_CLEANUPS,
+ TickerType.NUMBER_BLOCK_COMPRESSED,
+ TickerType.NUMBER_BLOCK_DECOMPRESSED,
+ TickerType.NUMBER_BLOCK_NOT_COMPRESSED,
+ TickerType.MERGE_OPERATION_TOTAL_TIME,
+ TickerType.FILTER_OPERATION_TOTAL_TIME,
+ TickerType.ROW_CACHE_HIT,
+ TickerType.ROW_CACHE_MISS,
+ TickerType.READ_AMP_ESTIMATE_USEFUL_BYTES,
+ TickerType.READ_AMP_TOTAL_READ_BYTES,
+ TickerType.NUMBER_RATE_LIMITER_DRAINS,
+ TickerType.NUMBER_ITER_SKIP,
+ TickerType.NUMBER_MULTIGET_KEYS_FOUND,
+ };
+
+ // Histograms - treated as prometheus summaries
+ public static final HistogramType[] HISTOGRAMS = {
+ HistogramType.DB_GET,
+ HistogramType.DB_WRITE,
+ HistogramType.COMPACTION_TIME,
+ HistogramType.SUBCOMPACTION_SETUP_TIME,
+ HistogramType.TABLE_SYNC_MICROS,
+ HistogramType.COMPACTION_OUTFILE_SYNC_MICROS,
+ HistogramType.WAL_FILE_SYNC_MICROS,
+ HistogramType.MANIFEST_FILE_SYNC_MICROS,
+ HistogramType.TABLE_OPEN_IO_MICROS,
+ HistogramType.DB_MULTIGET,
+ HistogramType.READ_BLOCK_COMPACTION_MICROS,
+ HistogramType.READ_BLOCK_GET_MICROS,
+ HistogramType.WRITE_RAW_BLOCK_MICROS,
+ HistogramType.STALL_L0_SLOWDOWN_COUNT,
+ HistogramType.STALL_MEMTABLE_COMPACTION_COUNT,
+ HistogramType.STALL_L0_NUM_FILES_COUNT,
+ HistogramType.HARD_RATE_LIMIT_DELAY_COUNT,
+ HistogramType.SOFT_RATE_LIMIT_DELAY_COUNT,
+ HistogramType.NUM_FILES_IN_SINGLE_COMPACTION,
+ HistogramType.DB_SEEK,
+ HistogramType.WRITE_STALL,
+ HistogramType.SST_READ_MICROS,
+ HistogramType.NUM_SUBCOMPACTIONS_SCHEDULED,
+ HistogramType.BYTES_PER_READ,
+ HistogramType.BYTES_PER_WRITE,
+ HistogramType.BYTES_PER_MULTIGET,
+ HistogramType.BYTES_COMPRESSED,
+ HistogramType.BYTES_DECOMPRESSED,
+ HistogramType.COMPRESSION_TIMES_NANOS,
+ HistogramType.DECOMPRESSION_TIMES_NANOS,
+ HistogramType.READ_NUM_MERGE_OPERANDS,
+ };
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/StoreMetrics.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/StoreMetrics.java
new file mode 100644
index 0000000000..d2f022e00a
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/StoreMetrics.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.metrics;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
+
+import org.apache.hugegraph.store.HgStoreEngine;
+import org.apache.hugegraph.store.meta.Partition;
+
+import io.micrometer.core.instrument.Gauge;
+import io.micrometer.core.instrument.MeterRegistry;
+
+/**
+ * 2021/12/28
+ */
+public final class StoreMetrics {
+
+ public final static String PREFIX = "hg";
+ private final static HgStoreEngine storeEngine = HgStoreEngine.getInstance();
+ private final static AtomicInteger graphs = new AtomicInteger(0);
+ private static MeterRegistry registry;
+
+ private StoreMetrics() {
+ }
+
+ public synchronized static void init(MeterRegistry meterRegistry) {
+ if (registry == null) {
+ registry = meterRegistry;
+ registerMeters();
+ }
+ }
+
+ private static void registerMeters() {
+ Gauge.builder(PREFIX + ".up", () -> 1).register(registry);
+ Gauge.builder(PREFIX + ".graphs", StoreMetrics::updateGraphs)
+ .description("Number of graphs stored in this node")
+ .register(registry);
+
+ }
+
+ private static int getGraphs() {
+ return getGraphPartitions().size();
+ }
+
+ private static int updateGraphs() {
+ int buf = getGraphs();
+ if (buf != graphs.get()) {
+ graphs.set(buf);
+ registerPartitionGauge();
+ }
+ return buf;
+ }
+
+ private static void registerPartitionGauge() {
+ Map> map = getGraphPartitions();
+
+ map.forEach((k, v) -> Gauge.builder(PREFIX + ".partitions", new PartitionsGetter(k))
+ .description("Number of partitions stored in the node")
+ .tag("graph", k)
+ .register(registry));
+
+ }
+
+ private static int getPartitions(String graph) {
+ Map map = getGraphPartitions().get(graph);
+ if (map == null) {
+ return 0;
+ } else {
+ return map.size();
+ }
+ }
+
+ private static Map> getGraphPartitions() {
+ Map> map =
+ storeEngine.getPartitionManager().getPartitions();
+ if (map == null) {
+ return Collections.emptyMap();
+ }
+ return map;
+ }
+
+ private static class PartitionsGetter implements Supplier {
+
+ private final String graph;
+
+ PartitionsGetter(String graph) {
+ this.graph = graph;
+ }
+
+ @Override
+ public Number get() {
+ return getPartitions(this.graph);
+ }
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/SystemMetrics.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/SystemMetrics.java
new file mode 100644
index 0000000000..1935c90c05
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/metrics/SystemMetrics.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.metrics;
+
+import java.lang.management.ClassLoadingMXBean;
+import java.lang.management.GarbageCollectorMXBean;
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryUsage;
+import java.lang.management.ThreadMXBean;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hugegraph.util.Bytes;
+
+@Deprecated
+public class SystemMetrics {
+
+ private static final long MB = Bytes.MB;
+
+ private static long totalNonHeapMemory() {
+ try {
+ return ManagementFactory.getMemoryMXBean()
+ .getNonHeapMemoryUsage()
+ .getCommitted();
+ } catch (Throwable ignored) {
+ return 0;
+ }
+ }
+
+ private static String formatName(String name) {
+ return StringUtils.replace(name, " ", "_").toLowerCase();
+ }
+
+ public Map> metrics() {
+ Map> metrics = new LinkedHashMap<>();
+ metrics.put("basic", this.getBasicMetrics());
+ metrics.put("heap", this.getHeapMetrics());
+ metrics.put("nonheap", this.getNonHeapMetrics());
+ metrics.put("thread", this.getThreadMetrics());
+ metrics.put("class_loading", this.getClassLoadingMetrics());
+ metrics.put("garbage_collector", this.getGarbageCollectionMetrics());
+
+ return metrics;
+ }
+
+ private Map getBasicMetrics() {
+ Map metrics = new LinkedHashMap<>();
+ Runtime runtime = Runtime.getRuntime();
+ // Heap allocated memory (measured in bytes)
+ long total = runtime.totalMemory();
+ // Heap free memory
+ long free = runtime.freeMemory();
+ long used = total - free;
+
+ metrics.put("mem", (total + totalNonHeapMemory()) / MB);
+ metrics.put("mem_total", total / MB);
+ metrics.put("mem_used", used / MB);
+ metrics.put("mem_free", free / MB);
+ metrics.put("mem_unit", "MB");
+ metrics.put("processors", runtime.availableProcessors());
+ metrics.put("uptime", ManagementFactory.getRuntimeMXBean().getUptime());
+ metrics.put("systemload_average",
+ ManagementFactory.getOperatingSystemMXBean()
+ .getSystemLoadAverage());
+ return metrics;
+ }
+
+ private Map getHeapMetrics() {
+ Map metrics = new LinkedHashMap<>();
+ MemoryUsage memoryUsage = ManagementFactory.getMemoryMXBean()
+ .getHeapMemoryUsage();
+ metrics.put("committed", memoryUsage.getCommitted() / MB);
+ metrics.put("init", memoryUsage.getInit() / MB);
+ metrics.put("used", memoryUsage.getUsed() / MB);
+ metrics.put("max", memoryUsage.getMax() / MB);
+ return metrics;
+ }
+
+ private Map getNonHeapMetrics() {
+ Map metrics = new LinkedHashMap<>();
+ MemoryUsage memoryUsage = ManagementFactory.getMemoryMXBean()
+ .getNonHeapMemoryUsage();
+ metrics.put("committed", memoryUsage.getCommitted() / MB);
+ metrics.put("init", memoryUsage.getInit() / MB);
+ metrics.put("used", memoryUsage.getUsed() / MB);
+ metrics.put("max", memoryUsage.getMax() / MB);
+ return metrics;
+ }
+
+ private Map getThreadMetrics() {
+ Map metrics = new LinkedHashMap<>();
+ ThreadMXBean threadMxBean = ManagementFactory.getThreadMXBean();
+ metrics.put("peak", threadMxBean.getPeakThreadCount());
+ metrics.put("daemon", threadMxBean.getDaemonThreadCount());
+ metrics.put("total_started", threadMxBean.getTotalStartedThreadCount());
+ metrics.put("count", threadMxBean.getThreadCount());
+ return metrics;
+ }
+
+ private Map getClassLoadingMetrics() {
+ Map metrics = new LinkedHashMap<>();
+ ClassLoadingMXBean classLoadingMxBean = ManagementFactory
+ .getClassLoadingMXBean();
+ metrics.put("count", classLoadingMxBean.getLoadedClassCount());
+ metrics.put("loaded", classLoadingMxBean.getTotalLoadedClassCount());
+ metrics.put("unloaded", classLoadingMxBean.getUnloadedClassCount());
+ return metrics;
+ }
+
+ private Map getGarbageCollectionMetrics() {
+ Map metrics = new LinkedHashMap<>();
+ List gcMxBeans = ManagementFactory
+ .getGarbageCollectorMXBeans();
+ for (GarbageCollectorMXBean gcMxBean : gcMxBeans) {
+ String name = formatName(gcMxBean.getName());
+ metrics.put(name + "_count", gcMxBean.getCollectionCount());
+ metrics.put(name + "_time", gcMxBean.getCollectionTime());
+ }
+ metrics.put("time_unit", "ms");
+ return metrics;
+ }
+
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/model/HgNodeStatus.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/model/HgNodeStatus.java
new file mode 100644
index 0000000000..b971e117d0
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/model/HgNodeStatus.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.model;
+
+import java.util.Objects;
+
+/**
+ * created on 2021/11/1
+ */
+public class HgNodeStatus {
+
+ private int status;
+ private String text;
+
+ public HgNodeStatus(int status, String text) {
+ this.status = status;
+ this.text = text;
+ }
+
+ public int getStatus() {
+ return status;
+ }
+
+ public HgNodeStatus setStatus(int status) {
+ this.status = status;
+ return this;
+ }
+
+ public String getText() {
+ return text;
+ }
+
+ public HgNodeStatus setText(String text) {
+ this.text = text;
+ return this;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ HgNodeStatus that = (HgNodeStatus) o;
+ return status == that.status && Objects.equals(text, that.text);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(status, text);
+ }
+
+ @Override
+ public String toString() {
+ return "HgNodeStatus{" +
+ "status=" + status +
+ ", text='" + text + '\'' +
+ '}';
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Base58.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Base58.java
new file mode 100644
index 0000000000..dff6a0406e
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Base58.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.util;
+
+import java.math.BigInteger;
+import java.nio.charset.StandardCharsets;
+
+/**
+ * TODO: refer license later, 78% match, maybe refer to google? ensure it later
+ */
+public class Base58 {
+
+ public static final char[] ALPHABET =
+ "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz".toCharArray();
+ private static final int[] INDEXES = new int[128];
+
+ static {
+ for (int i = 0; i < INDEXES.length; i++) {
+ INDEXES[i] = -1;
+ }
+ for (int i = 0; i < ALPHABET.length; i++) {
+ INDEXES[ALPHABET[i]] = i;
+ }
+ }
+
+ /**
+ * Encodes the given bytes in base58. No checksum is appended.
+ */
+ public static String encode(byte[] input) {
+ if (input.length == 0) {
+ return "";
+ }
+ input = copyOfRange(input, 0, input.length);
+ // Count leading zeroes.
+ int zeroCount = 0;
+ while (zeroCount < input.length && input[zeroCount] == 0) {
+ ++zeroCount;
+ }
+ // The actual encoding.
+ byte[] temp = new byte[input.length * 2];
+ int j = temp.length;
+
+ int startAt = zeroCount;
+ while (startAt < input.length) {
+ byte mod = divmod58(input, startAt);
+ if (input[startAt] == 0) {
+ ++startAt;
+ }
+ temp[--j] = (byte) ALPHABET[mod];
+ }
+
+ // Strip extra '1' if there are some after decoding.
+ while (j < temp.length && temp[j] == ALPHABET[0]) {
+ ++j;
+ }
+ // Add as many leading '1' as there were leading zeros.
+ while (--zeroCount >= 0) {
+ temp[--j] = (byte) ALPHABET[0];
+ }
+
+ byte[] output = copyOfRange(temp, j, temp.length);
+ return new String(output, StandardCharsets.US_ASCII);
+ }
+
+ public static byte[] decode(String input) throws IllegalArgumentException {
+ if (input.length() == 0) {
+ return new byte[0];
+ }
+ byte[] input58 = new byte[input.length()];
+ // Transform the String to a base58 byte sequence
+ for (int i = 0; i < input.length(); ++i) {
+ char c = input.charAt(i);
+
+ int digit58 = -1;
+ if (c >= 0 && c < 128) {
+ digit58 = INDEXES[c];
+ }
+ if (digit58 < 0) {
+ throw new IllegalArgumentException("Illegal character " + c + " at " + i);
+ }
+
+ input58[i] = (byte) digit58;
+ }
+ // Count leading zeroes
+ int zeroCount = 0;
+ while (zeroCount < input58.length && input58[zeroCount] == 0) {
+ ++zeroCount;
+ }
+ // The encoding
+ byte[] temp = new byte[input.length()];
+ int j = temp.length;
+
+ int startAt = zeroCount;
+ while (startAt < input58.length) {
+ byte mod = divmod256(input58, startAt);
+ if (input58[startAt] == 0) {
+ ++startAt;
+ }
+
+ temp[--j] = mod;
+ }
+ // Do no add extra leading zeroes, move j to first non null byte.
+ while (j < temp.length && temp[j] == 0) {
+ ++j;
+ }
+
+ return copyOfRange(temp, j - zeroCount, temp.length);
+ }
+
+ public static BigInteger decodeToBigInteger(String input) throws IllegalArgumentException {
+ return new BigInteger(1, decode(input));
+ }
+
+ //
+ // number -> number / 58, returns number % 58
+ //
+ private static byte divmod58(byte[] number, int startAt) {
+ int remainder = 0;
+ for (int i = startAt; i < number.length; i++) {
+ int digit256 = (int) number[i] & 0xFF;
+ int temp = remainder * 256 + digit256;
+
+ number[i] = (byte) (temp / 58);
+
+ remainder = temp % 58;
+ }
+
+ return (byte) remainder;
+ }
+
+ //
+ // number -> number / 256, returns number % 256
+ //
+ private static byte divmod256(byte[] number58, int startAt) {
+ int remainder = 0;
+ for (int i = startAt; i < number58.length; i++) {
+ int digit58 = (int) number58[i] & 0xFF;
+ int temp = remainder * 58 + digit58;
+
+ number58[i] = (byte) (temp / 256);
+
+ remainder = temp % 256;
+ }
+
+ return (byte) remainder;
+ }
+
+ private static byte[] copyOfRange(byte[] source, int from, int to) {
+ byte[] range = new byte[to - from];
+ System.arraycopy(source, from, range, 0, range.length);
+
+ return range;
+ }
+
+}
+
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Err.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Err.java
new file mode 100644
index 0000000000..9f156d69a0
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/Err.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.util;
+
+class Err {
+
+ private final String msg;
+
+ private Err(String msg) {
+ this.msg = msg;
+ }
+
+ public static Err of(String msg) {
+ return new Err(msg);
+ }
+
+ @Override
+ public String toString() {
+ return "Err{" +
+ "msg='" + msg + '\'' +
+ '}';
+ }
+}
diff --git a/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgAssert.java b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgAssert.java
new file mode 100644
index 0000000000..7df843a956
--- /dev/null
+++ b/hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/util/HgAssert.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.node.util;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.function.Supplier;
+
+public final class HgAssert {
+
+ @Deprecated
+ public static void isTrue(boolean expression, String message) {
+ if (message == null) {
+ throw new IllegalArgumentException("message is null");
+ }
+ if (!expression) {
+ throw new IllegalArgumentException(message);
+ }
+ }
+
+ public static void isTrue(boolean expression, Supplier msg) {
+ if (msg == null) {
+ throw new IllegalArgumentException("message supplier is null");
+ }
+ if (!expression) {
+ throw new IllegalArgumentException(msg.get());
+ }
+ }
+
+ @Deprecated
+ public static void isTrue(boolean expression, RuntimeException e) {
+ if (e == null) {
+ throw new IllegalArgumentException("e is null");
+ }
+ if (!expression) {
+ throw e;
+ }
+ }
+
+ public static void isFalse(boolean expression, String message) {
+ isTrue(!expression, message);
+ }
+
+ public static void isFalse(boolean expression, Supplier