From 83086b1999c4bbf252ddcfec7a9ca4efa0aa1044 Mon Sep 17 00:00:00 2001 From: VGalaxies Date: Fri, 26 Apr 2024 13:48:31 +0800 Subject: [PATCH 1/8] git add hugegraph-server/hugegraph-hstore/ --- hugegraph-server/hugegraph-hstore/pom.xml | 50 ++ .../backend/store/hstore/HstoreFeatures.java | 133 +++ .../backend/store/hstore/HstoreMetrics.java | 44 + .../backend/store/hstore/HstoreOptions.java | 52 ++ .../backend/store/hstore/HstoreProvider.java | 54 ++ .../backend/store/hstore/HstoreSessions.java | 208 +++++ .../store/hstore/HstoreSessionsImpl.java | 802 +++++++++++++++++ .../backend/store/hstore/HstoreStore.java | 825 ++++++++++++++++++ .../backend/store/hstore/HstoreTable.java | 732 ++++++++++++++++ .../backend/store/hstore/HstoreTables.java | 214 +++++ .../backend/store/hstore/fake/IdClient.java | 54 ++ .../backend/store/hstore/fake/PDIdClient.java | 48 + 12 files changed, 3216 insertions(+) create mode 100644 hugegraph-server/hugegraph-hstore/pom.xml create mode 100644 hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreFeatures.java create mode 100644 hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreMetrics.java create mode 100644 hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreOptions.java create mode 100644 hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreProvider.java create mode 100755 hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreSessions.java create mode 100755 hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreSessionsImpl.java create mode 100644 hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreStore.java create mode 100755 hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTable.java create mode 100644 hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTables.java create mode 100644 hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/fake/IdClient.java create mode 100644 hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/fake/PDIdClient.java diff --git a/hugegraph-server/hugegraph-hstore/pom.xml b/hugegraph-server/hugegraph-hstore/pom.xml new file mode 100644 index 0000000000..f777eb05ef --- /dev/null +++ b/hugegraph-server/hugegraph-hstore/pom.xml @@ -0,0 +1,50 @@ + + + + + hugegraph-server + org.apache.hugegraph + ${revision} + ../pom.xml + + + 4.0.0 + + hugegraph-hstore + + + + org.apache.hugegraph + hugegraph-core + ${revision} + + + org.apache.hugegraph + hg-store-client + ${revision} + + + org.apache.hugegraph + hg-pd-client + ${revision} + + + + diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreFeatures.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreFeatures.java new file mode 100644 index 0000000000..3af6f803bc --- /dev/null +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreFeatures.java @@ -0,0 +1,133 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.backend.store.hstore; + +import org.apache.hugegraph.backend.store.BackendFeatures; + +public class HstoreFeatures implements BackendFeatures { + + @Override + public boolean supportsScanToken() { + return false; + } + + @Override + public boolean supportsScanKeyPrefix() { + return true; + } + + @Override + public boolean supportsScanKeyRange() { + return true; + } + + @Override + public boolean supportsQuerySchemaByName() { + return false; + } + + @Override + public boolean supportsQueryByLabel() { + return false; + } + + @Override + public boolean supportsQueryWithInCondition() { + return false; + } + + @Override + public boolean supportsQueryWithRangeCondition() { + return true; + } + + @Override + public boolean supportsQuerySortByInputIds() { + return true; + } + + @Override + public boolean supportsQueryWithOrderBy() { + return true; + } + + @Override + public boolean supportsQueryWithContains() { + return false; + } + + @Override + public boolean supportsQueryWithContainsKey() { + return false; + } + + @Override + public boolean supportsQueryByPage() { + return true; + } + + @Override + public boolean supportsDeleteEdgeByLabel() { + return false; + } + + @Override + public boolean supportsUpdateVertexProperty() { + // Vertex properties are stored in a cell(column value) + return false; + } + + @Override + public boolean supportsMergeVertexProperty() { + return false; + } + + @Override + public boolean supportsUpdateEdgeProperty() { + // Edge properties are stored in a cell(column value) + return false; + } + + @Override + public boolean supportsTransaction() { + return false; + } + + @Override + public boolean supportsNumberType() { + return false; + } + + @Override + public boolean supportsAggregateProperty() { + return false; + } + + @Override + public boolean supportsTtl() { + return false; + } + + @Override + public boolean supportsOlapProperties() { + return true; + } + + @Override + public boolean supportsTaskAndServerVertex() { return true; } +} diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreMetrics.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreMetrics.java new file mode 100644 index 0000000000..c5f1808876 --- /dev/null +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreMetrics.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.backend.store.hstore; + +import java.util.List; +import java.util.Map; + +import org.apache.hugegraph.backend.store.BackendMetrics; +import org.apache.hugegraph.util.InsertionOrderUtil; + +public class HstoreMetrics implements BackendMetrics { + + private final List dbs; + private final HstoreSessions.Session session; + + public HstoreMetrics(List dbs, + HstoreSessions.Session session) { + this.dbs = dbs; + this.session = session; + } + + @Override + public Map metrics() { + Map results = InsertionOrderUtil.newMap(); + // TODO(metrics): fetch more metrics from PD + results.put(NODES, session.getActiveStoreSize()); + return results; + } +} diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreOptions.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreOptions.java new file mode 100644 index 0000000000..6de800697c --- /dev/null +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreOptions.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.backend.store.hstore; + +import static org.apache.hugegraph.config.OptionChecker.disallowEmpty; + +import org.apache.hugegraph.config.ConfigOption; +import org.apache.hugegraph.config.OptionHolder; + +public class HstoreOptions extends OptionHolder { + + public static final ConfigOption PARTITION_COUNT = new ConfigOption<>( + "hstore.partition_count", + "Number of partitions, which PD controls partitions based on.", + disallowEmpty(), + 0 + ); + public static final ConfigOption SHARD_COUNT = new ConfigOption<>( + "hstore.shard_count", + "Number of copies, which PD controls partition copies based on.", + disallowEmpty(), + 0 + ); + private static volatile HstoreOptions instance; + + private HstoreOptions() { + super(); + } + + public static synchronized HstoreOptions instance() { + if (instance == null) { + instance = new HstoreOptions(); + instance.registerOptions(); + } + return instance; + } +} diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreProvider.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreProvider.java new file mode 100644 index 0000000000..f9d48d36c9 --- /dev/null +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreProvider.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.backend.store.hstore; + +import org.apache.hugegraph.backend.store.AbstractBackendStoreProvider; +import org.apache.hugegraph.backend.store.BackendStore; +import org.apache.hugegraph.config.HugeConfig; + +public class HstoreProvider extends AbstractBackendStoreProvider { + + protected String namespace() { + return this.graph(); + } + + @Override + public String type() { + return "hstore"; + } + + @Override + public String driverVersion() { + return "1.13"; + } + + @Override + protected BackendStore newSchemaStore(HugeConfig config, String store) { + return new HstoreStore.HstoreSchemaStore(this, this.namespace(), store); + } + + @Override + protected BackendStore newGraphStore(HugeConfig config, String store) { + return new HstoreStore.HstoreGraphStore(this, this.namespace(), store); + } + + @Override + protected BackendStore newSystemStore(HugeConfig config, String store) { + return null; + } +} diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreSessions.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreSessions.java new file mode 100755 index 0000000000..0abb6458b9 --- /dev/null +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreSessions.java @@ -0,0 +1,208 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.backend.store.hstore; + +import java.util.Iterator; +import java.util.List; +import java.util.Set; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hugegraph.backend.query.Query; +import org.apache.hugegraph.backend.store.BackendEntry; +import org.apache.hugegraph.backend.store.BackendEntry.BackendColumnIterator; +import org.apache.hugegraph.backend.store.BackendSession.AbstractBackendSession; +import org.apache.hugegraph.backend.store.BackendSessionPool; +import org.apache.hugegraph.config.HugeConfig; +import org.apache.hugegraph.store.HgOwnerKey; +import org.apache.hugegraph.type.define.GraphMode; + +public abstract class HstoreSessions extends BackendSessionPool { + + public HstoreSessions(HugeConfig config, String database, String store) { + super(config, database + "/" + store); + } + + public abstract Set openedTables(); + + public abstract void createTable(String... tables); + + public abstract void dropTable(String... tables); + + public abstract boolean existsTable(String table); + + public abstract void truncateTable(String table); + + public abstract void clear(); + + @Override + public abstract Session session(); + + public interface Countable { + + public long count(); + } + + /** + * Session for Hstore + */ + public static abstract class Session extends AbstractBackendSession { + + public static final int SCAN_ANY = 0x80; + public static final int SCAN_PREFIX_BEGIN = 0x01; + public static final int SCAN_PREFIX_END = 0x02; + public static final int SCAN_GT_BEGIN = 0x04; + public static final int SCAN_GTE_BEGIN = 0x0c; + public static final int SCAN_LT_END = 0x10; + public static final int SCAN_LTE_END = 0x30; + public static final int SCAN_KEY_ONLY = 0x40; + public static final int SCAN_HASHCODE = 0x100; + + private HugeConfig conf; + private String graphName; + + public static boolean matchScanType(int expected, int actual) { + return (expected & actual) == expected; + } + + public abstract void createTable(String tableName); + + public abstract void dropTable(String tableName); + + public abstract boolean existsTable(String tableName); + + public abstract void truncateTable(String tableName); + + public abstract void deleteGraph(); + + public abstract Pair keyRange(String table); + + public abstract void put(String table, byte[] ownerKey, + byte[] key, byte[] value); + + public abstract void increase(String table, byte[] ownerKey, + byte[] key, byte[] value); + + public abstract void delete(String table, byte[] ownerKey, byte[] key); + + public abstract void deletePrefix(String table, byte[] ownerKey, + byte[] key); + + public abstract void deleteRange(String table, byte[] ownerKeyFrom, + byte[] ownerKeyTo, byte[] keyFrom, + byte[] keyTo); + + public abstract byte[] get(String table, byte[] key); + + public abstract byte[] get(String table, byte[] ownerKey, byte[] key); + + public abstract BackendColumnIterator scan(String table); + + public abstract BackendColumnIterator scan(String table, + byte[] ownerKey, + byte[] prefix); + + public BackendColumnIterator scan(String table, byte[] ownerKeyFrom, + byte[] ownerKeyTo, byte[] keyFrom, + byte[] keyTo) { + return this.scan(table, ownerKeyFrom, ownerKeyTo, keyFrom, keyTo, + SCAN_LT_END); + } + + public abstract List scan(String table, + List keys, + int scanType, + long limit, + byte[] query); + + public abstract BackendEntry.BackendIterator scan(String table, + Iterator keys, + int scanType, + Query queryParam, + byte[] query); + + public abstract BackendColumnIterator scan(String table, + byte[] ownerKeyFrom, + byte[] ownerKeyTo, + byte[] keyFrom, + byte[] keyTo, + int scanType); + + public abstract BackendColumnIterator scan(String table, + byte[] ownerKeyFrom, + byte[] ownerKeyTo, + byte[] keyFrom, + byte[] keyTo, + int scanType, + byte[] query); + + public abstract BackendColumnIterator scan(String table, + byte[] ownerKeyFrom, + byte[] ownerKeyTo, + byte[] keyFrom, + byte[] keyTo, + int scanType, + byte[] query, + byte[] position); + + public abstract BackendColumnIterator scan(String table, + int codeFrom, + int codeTo, + int scanType, + byte[] query); + + public abstract BackendColumnIterator scan(String table, + int codeFrom, + int codeTo, + int scanType, + byte[] query, + byte[] position); + + public abstract BackendColumnIterator getWithBatch(String table, + List keys); + + public abstract void merge(String table, byte[] ownerKey, + byte[] key, byte[] value); + + public abstract void setMode(GraphMode mode); + + public abstract void truncate() throws Exception; + + public abstract BackendColumnIterator scan(String table, + byte[] conditionQueryToByte); + + public HugeConfig getConf() { + return conf; + } + + public void setConf(HugeConfig conf) { + this.conf = conf; + } + + public String getGraphName() { + return graphName; + } + + public void setGraphName(String graphName) { + this.graphName = graphName; + } + + public abstract void beginTx(); + + public abstract int getActiveStoreSize(); + } +} diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreSessionsImpl.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreSessionsImpl.java new file mode 100755 index 0000000000..e2ddfd97cb --- /dev/null +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreSessionsImpl.java @@ -0,0 +1,802 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.backend.store.hstore; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.commons.lang3.ArrayUtils; +import org.apache.commons.lang3.NotImplementedException; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hugegraph.backend.query.Query; +import org.apache.hugegraph.backend.store.BackendEntry; +import org.apache.hugegraph.backend.store.BackendEntry.BackendColumn; +import org.apache.hugegraph.backend.store.BackendEntry.BackendColumnIterator; +import org.apache.hugegraph.backend.store.BackendEntryIterator; +import org.apache.hugegraph.config.CoreOptions; +import org.apache.hugegraph.config.HugeConfig; +import org.apache.hugegraph.pd.client.PDClient; +import org.apache.hugegraph.pd.client.PDConfig; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.store.HgKvEntry; +import org.apache.hugegraph.store.HgKvIterator; +import org.apache.hugegraph.store.HgOwnerKey; +import org.apache.hugegraph.store.HgScanQuery; +import org.apache.hugegraph.store.HgStoreClient; +import org.apache.hugegraph.store.HgStoreSession; +import org.apache.hugegraph.store.client.grpc.KvCloseableIterator; +import org.apache.hugegraph.store.client.util.HgStoreClientConst; +import org.apache.hugegraph.store.grpc.common.ScanOrderType; +import org.apache.hugegraph.testutil.Assert; +import org.apache.hugegraph.type.define.GraphMode; +import org.apache.hugegraph.util.Bytes; +import org.apache.hugegraph.util.E; +import org.apache.hugegraph.util.StringEncoding; + +public class HstoreSessionsImpl extends HstoreSessions { + + private static final Set infoInitializedGraph = + Collections.synchronizedSet(new HashSet<>()); + private static int tableCode = 0; + private static volatile Boolean initializedNode = Boolean.FALSE; + private static volatile PDClient defaultPdClient; + private static volatile HgStoreClient hgStoreClient; + private final HugeConfig config; + private final HstoreSession session; + private final Map tables; + private final AtomicInteger refCount; + private final String graphName; + + public HstoreSessionsImpl(HugeConfig config, String database, + String store) { + super(config, database, store); + this.config = config; + this.graphName = database + "/" + store; + this.initStoreNode(config); + this.session = new HstoreSession(this.config, graphName); + this.tables = new ConcurrentHashMap<>(); + this.refCount = new AtomicInteger(1); + } + + public static HgStoreClient getHgStoreClient() { + return hgStoreClient; + } + + public static PDClient getDefaultPdClient() { + return defaultPdClient; + } + + public static byte[] encode(String string) { + return StringEncoding.encode(string); + } + + public static String decode(byte[] bytes) { + return StringEncoding.decode(bytes); + } + + private void initStoreNode(HugeConfig config) { + if (!initializedNode) { + synchronized (this) { + if (!initializedNode) { + PDConfig pdConfig = + PDConfig.of(config.get(CoreOptions.PD_PEERS)) + .setEnableCache(true); + defaultPdClient = PDClient.create(pdConfig); + hgStoreClient = + HgStoreClient.create(defaultPdClient); + initializedNode = Boolean.TRUE; + } + } + } + } + + @Override + public void open() throws Exception { + if (!infoInitializedGraph.contains(this.graphName)) { + synchronized (infoInitializedGraph) { + if (!infoInitializedGraph.contains(this.graphName)) { + Integer partitionCount = + this.config.get(HstoreOptions.PARTITION_COUNT); + Assert.assertTrue("The value of hstore.partition_count" + + " cannot be less than 0.", + partitionCount > -1); + defaultPdClient.setGraph(Metapb.Graph.newBuilder() + .setGraphName( + this.graphName) + .setPartitionCount( + partitionCount) + .build()); + infoInitializedGraph.add(this.graphName); + } + } + } + this.session.open(); + } + + @Override + protected boolean opened() { + return this.session != null; + } + + @Override + public Set openedTables() { + return this.tables.keySet(); + } + + @Override + public synchronized void createTable(String... tables) { + for (String table : tables) { + this.session.createTable(table); + this.tables.put(table, tableCode++); + } + } + + @Override + public synchronized void dropTable(String... tables) { + for (String table : tables) { + this.session.dropTable(table); + this.tables.remove(table); + } + } + + @Override + public boolean existsTable(String table) { + return this.session.existsTable(table); + } + + @Override + public void truncateTable(String table) { + this.session.truncateTable(table); + } + + @Override + public void clear() { + this.session.deleteGraph(); + try { + hgStoreClient.getPdClient().delGraph(this.graphName); + } catch (PDException e) { + + } + } + + @Override + public final Session session() { + return (Session) super.getOrNewSession(); + } + + @Override + protected final Session newSession() { + return new HstoreSession(this.config(), this.graphName); + } + + @Override + protected synchronized void doClose() { + this.checkValid(); + if (this.refCount != null) { + if (this.refCount.decrementAndGet() > 0) { + return; + } + if (this.refCount.get() != 0) { + return; + } + } + assert this.refCount.get() == 0; + this.tables.clear(); + this.session.close(); + } + + private void checkValid() { + } + + private static class ColumnIterator implements + BackendColumnIterator, + Countable { + + private final T iter; + private final byte[] keyBegin; + private final byte[] keyEnd; + private final int scanType; + private final String table; + private final byte[] value; + private boolean gotNext; + private byte[] position; + + public ColumnIterator(String table, T results) { + this(table, results, null, null, 0); + } + + public ColumnIterator(String table, T results, byte[] keyBegin, + byte[] keyEnd, int scanType) { + E.checkNotNull(results, "results"); + this.table = table; + this.iter = results; + this.keyBegin = keyBegin; + this.keyEnd = keyEnd; + this.scanType = scanType; + this.value = null; + if (this.iter.hasNext()) { + this.iter.next(); + this.gotNext = true; + this.position = iter.position(); + } else { + this.gotNext = false; + // QUESTION: Resetting the position may result in the caller being unable to + // retrieve the corresponding position. + this.position = null; + } + if (!ArrayUtils.isEmpty(this.keyBegin) || + !ArrayUtils.isEmpty(this.keyEnd)) { + this.checkArguments(); + } + + } + + public T iter() { + return iter; + } + + private void checkArguments() { + E.checkArgument(!(this.match(Session.SCAN_PREFIX_BEGIN) && + this.match(Session.SCAN_PREFIX_END)), + "Can't set SCAN_PREFIX_WITH_BEGIN and " + + "SCAN_PREFIX_WITH_END at the same time"); + + E.checkArgument(!(this.match(Session.SCAN_PREFIX_BEGIN) && + this.match(Session.SCAN_GT_BEGIN)), + "Can't set SCAN_PREFIX_WITH_BEGIN and " + + "SCAN_GT_BEGIN/SCAN_GTE_BEGIN at the same time"); + + E.checkArgument(!(this.match(Session.SCAN_PREFIX_END) && + this.match(Session.SCAN_LT_END)), + "Can't set SCAN_PREFIX_WITH_END and " + + "SCAN_LT_END/SCAN_LTE_END at the same time"); + + if (this.match(Session.SCAN_PREFIX_BEGIN) && !matchHash()) { + E.checkArgument(this.keyBegin != null, + "Parameter `keyBegin` can't be null " + + "if set SCAN_PREFIX_WITH_BEGIN"); + E.checkArgument(this.keyEnd == null, + "Parameter `keyEnd` must be null " + + "if set SCAN_PREFIX_WITH_BEGIN"); + } + + if (this.match(Session.SCAN_PREFIX_END) && !matchHash()) { + E.checkArgument(this.keyEnd != null, + "Parameter `keyEnd` can't be null " + + "if set SCAN_PREFIX_WITH_END"); + } + + if (this.match(Session.SCAN_GT_BEGIN) && !matchHash()) { + E.checkArgument(this.keyBegin != null, + "Parameter `keyBegin` can't be null " + + "if set SCAN_GT_BEGIN or SCAN_GTE_BEGIN"); + } + + if (this.match(Session.SCAN_LT_END) && !matchHash()) { + E.checkArgument(this.keyEnd != null, + "Parameter `keyEnd` can't be null " + + "if set SCAN_LT_END or SCAN_LTE_END"); + } + } + + private boolean matchHash() { + return this.scanType == Session.SCAN_HASHCODE; + } + + private boolean match(int expected) { + return Session.matchScanType(expected, this.scanType); + } + + + @Override + public boolean hasNext() { + if (gotNext) { + this.position = this.iter.position(); + } else { + // QUESTION: Resetting the position may result in the caller being unable to + // retrieve the corresponding position. + this.position = null; + } + return gotNext; + } + + private boolean filter(byte[] key) { + if (this.match(Session.SCAN_PREFIX_BEGIN)) { + /* + * Prefix with `keyBegin`? + * TODO: use custom prefix_extractor instead + * or use ReadOptions.prefix_same_as_start + */ + return Bytes.prefixWith(key, this.keyBegin); + } else if (this.match(Session.SCAN_PREFIX_END)) { + /* + * Prefix with `keyEnd`? + * like the following query for range index: + * key > 'age:20' and prefix with 'age' + */ + assert this.keyEnd != null; + return Bytes.prefixWith(key, this.keyEnd); + } else if (this.match(Session.SCAN_LT_END)) { + /* + * Less (equal) than `keyEnd`? + * NOTE: don't use BytewiseComparator due to signed byte + */ + if ((this.scanType | Session.SCAN_HASHCODE) != 0) { + return true; + } + assert this.keyEnd != null; + if (this.match(Session.SCAN_LTE_END)) { + // Just compare the prefix, can be there are excess tail + key = Arrays.copyOfRange(key, 0, this.keyEnd.length); + return Bytes.compare(key, this.keyEnd) <= 0; + } else { + return Bytes.compare(key, this.keyEnd) < 0; + } + } else { + assert this.match(Session.SCAN_ANY) || this.match(Session.SCAN_GT_BEGIN) || + this.match( + Session.SCAN_GTE_BEGIN) : "Unknown scan type"; + return true; + } + } + + @Override + public BackendColumn next() { + BackendEntryIterator.checkInterrupted(); + if (!this.hasNext()) { + throw new NoSuchElementException(); + } + BackendColumn col = + BackendColumn.of(this.iter.key(), + this.iter.value()); + if (this.iter.hasNext()) { + gotNext = true; + this.iter.next(); + } else { + gotNext = false; + } + return col; + } + + @Override + public long count() { + long count = 0L; + while (this.hasNext()) { + this.next(); + count++; + BackendEntryIterator.checkInterrupted(); + } + return count; + } + + @Override + public byte[] position() { + return this.position; + } + + @Override + public void close() { + if (this.iter != null) { + this.iter.close(); + } + } + } + + /** + * HstoreSession implement for hstore + */ + private final class HstoreSession extends Session { + + private static final boolean TRANSACTIONAL = true; + private final HgStoreSession graph; + int changedSize = 0; + + public HstoreSession(HugeConfig conf, String graphName) { + setGraphName(graphName); + setConf(conf); + this.graph = hgStoreClient.openSession(graphName); + } + + @Override + public void open() { + this.opened = true; + } + + @Override + public void close() { + this.opened = false; + } + + @Override + public boolean closed() { + return !this.opened; + } + + @Override + public void reset() { + if (this.changedSize != 0) { + this.rollback(); + this.changedSize = 0; + } + } + + /** + * Any change in the session + */ + @Override + public boolean hasChanges() { + return this.changedSize > 0; + } + + /** + * Commit all updates(put/delete) to DB + */ + @Override + public Integer commit() { + if (!this.hasChanges()) { + // TODO: log a message with level WARNING + return 0; + } + int commitSize = this.changedSize; + if (TRANSACTIONAL) { + this.graph.commit(); + } + this.changedSize = 0; + return commitSize; + } + + /** + * Rollback all updates(put/delete) not committed + */ + @Override + public void rollback() { + if (TRANSACTIONAL) { + this.graph.rollback(); + } + this.changedSize = 0; + } + + @Override + public void createTable(String tableName) { + this.graph.createTable(tableName); + } + + @Override + public void dropTable(String tableName) { + this.graph.dropTable(tableName); + } + + @Override + public boolean existsTable(String tableName) { + return this.graph.existsTable(tableName); + } + + @Override + public void truncateTable(String tableName) { + this.graph.deleteTable(tableName); + } + + @Override + public void deleteGraph() { + this.graph.deleteGraph(this.getGraphName()); + } + + @Override + public Pair keyRange(String table) { + return null; + } + + private void prepare() { + if (!this.hasChanges() && TRANSACTIONAL) { + this.graph.beginTx(); + } + this.changedSize++; + } + + /** + * Add a KV record to a table + */ + @Override + public void put(String table, byte[] ownerKey, byte[] key, + byte[] value) { + prepare(); + this.graph.put(table, HgOwnerKey.of(ownerKey, key), value); + } + + @Override + public synchronized void increase(String table, byte[] ownerKey, + byte[] key, byte[] value) { + prepare(); + this.graph.merge(table, HgOwnerKey.of(ownerKey, key), value); + } + + @Override + public void delete(String table, byte[] ownerKey, byte[] key) { + prepare(); + this.graph.delete(table, HgOwnerKey.of(ownerKey, key)); + } + + @Override + public void deletePrefix(String table, byte[] ownerKey, byte[] key) { + prepare(); + this.graph.deletePrefix(table, HgOwnerKey.of(ownerKey, key)); + } + + /** + * Delete a range of keys from a table + */ + @Override + public void deleteRange(String table, byte[] ownerKeyFrom, + byte[] ownerKeyTo, byte[] keyFrom, + byte[] keyTo) { + prepare(); + this.graph.deleteRange(table, HgOwnerKey.of(ownerKeyFrom, keyFrom), + HgOwnerKey.of(ownerKeyTo, keyTo)); + } + + @Override + public byte[] get(String table, byte[] key) { + return this.graph.get(table, HgOwnerKey.of( + HgStoreClientConst.ALL_PARTITION_OWNER, key)); + } + + @Override + public byte[] get(String table, byte[] ownerKey, byte[] key) { + byte[] values = this.graph.get(table, HgOwnerKey.of(ownerKey, key)); + return values != null ? values : new byte[0]; + } + + @Override + public void beginTx() { + this.graph.beginTx(); + } + + @Override + public BackendColumnIterator scan(String table) { + assert !this.hasChanges(); + return new ColumnIterator<>(table, this.graph.scanIterator(table)); + } + + @Override + public BackendColumnIterator scan(String table, + byte[] conditionQueryToByte) { + assert !this.hasChanges(); + HgKvIterator results = + this.graph.scanIterator(table, conditionQueryToByte); + return new ColumnIterator<>(table, results); + } + + @Override + public BackendColumnIterator scan(String table, byte[] ownerKey, + byte[] prefix) { + assert !this.hasChanges(); + HgKvIterator result = this.graph.scanIterator(table, + HgOwnerKey.of( + ownerKey, + prefix)); + return new ColumnIterator<>(table, result); + } + + @Override + public List scan(String table, + List keys, + int scanType, long limit, + byte[] query) { + HgScanQuery scanQuery = HgScanQuery.prefixOf(table, keys).builder() + .setScanType(scanType) + .setQuery(query) + .setPerKeyLimit(limit).build(); + List> scanIterators = + this.graph.scanBatch(scanQuery); + LinkedList columnIterators = + new LinkedList<>(); + scanIterators.forEach(item -> { + columnIterators.add( + new ColumnIterator<>(table, item)); + }); + return columnIterators; + } + + @Override + public BackendEntry.BackendIterator scan( + String table, + Iterator keys, + int scanType, Query queryParam, byte[] query) { + ScanOrderType orderType; + switch (queryParam.orderType()) { + case ORDER_NONE: + orderType = ScanOrderType.ORDER_NONE; + break; + case ORDER_WITHIN_VERTEX: + orderType = ScanOrderType.ORDER_WITHIN_VERTEX; + break; + case ORDER_STRICT: + orderType = ScanOrderType.ORDER_STRICT; + break; + default: + throw new RuntimeException("not implement"); + } + HgScanQuery scanQuery = HgScanQuery.prefixIteratorOf(table, keys) + .builder() + .setScanType(scanType) + .setQuery(query) + .setPerKeyMax(queryParam.limit()) + .setOrderType(orderType) + .setOnlyKey( + !queryParam.withProperties()) + .setSkipDegree( + queryParam.skipDegree()) + .build(); + KvCloseableIterator> scanIterators = + this.graph.scanBatch2(scanQuery); + return new BackendEntry.BackendIterator() { + @Override + public void close() { + scanIterators.close(); + } + + @Override + public byte[] position() { + throw new NotImplementedException(); + } + + @Override + public boolean hasNext() { + return scanIterators.hasNext(); + } + + @Override + public BackendColumnIterator next() { + return new ColumnIterator(table, + scanIterators.next()); + } + }; + } + + @Override + public BackendColumnIterator scan(String table, byte[] ownerKeyFrom, + byte[] ownerKeyTo, + byte[] keyFrom, byte[] keyTo, + int scanType) { + assert !this.hasChanges(); + HgKvIterator result = this.graph.scanIterator(table, HgOwnerKey.of( + ownerKeyFrom, keyFrom), + HgOwnerKey.of( + ownerKeyTo, + keyTo), 0, + scanType, + null); + return new ColumnIterator<>(table, result, keyFrom, + keyTo, scanType); + } + + @Override + public BackendColumnIterator scan(String table, byte[] ownerKeyFrom, + byte[] ownerKeyTo, + byte[] keyFrom, byte[] keyTo, + int scanType, byte[] query) { + assert !this.hasChanges(); + HgKvIterator result = this.graph.scanIterator(table, + HgOwnerKey.of( + ownerKeyFrom, + keyFrom), + HgOwnerKey.of( + ownerKeyTo, + keyTo), + 0, + scanType, + query); + return new ColumnIterator<>(table, result, keyFrom, keyTo, + scanType); + } + + @Override + public BackendColumnIterator scan(String table, byte[] ownerKeyFrom, + byte[] ownerKeyTo, + byte[] keyFrom, byte[] keyTo, + int scanType, byte[] query, + byte[] position) { + assert !this.hasChanges(); + HgKvIterator result = this.graph.scanIterator(table, + HgOwnerKey.of( + ownerKeyFrom, + keyFrom), + HgOwnerKey.of( + ownerKeyTo, + keyTo), + 0, + scanType, + query); + result.seek(position); + return new ColumnIterator<>(table, result, keyFrom, keyTo, + scanType); + } + + @Override + public BackendColumnIterator scan(String table, int codeFrom, + int codeTo, int scanType, + byte[] query) { + assert !this.hasChanges(); + HgKvIterator iterator = + this.graph.scanIterator(table, codeFrom, codeTo, 256, + new byte[0]); + return new ColumnIterator<>(table, iterator, new byte[0], + new byte[0], scanType); + } + + @Override + public BackendColumnIterator scan(String table, int codeFrom, + int codeTo, int scanType, + byte[] query, byte[] position) { + assert !this.hasChanges(); + HgKvIterator iterator = + this.graph.scanIterator(table, codeFrom, codeTo, 256, + new byte[0]); + iterator.seek(position); + return new ColumnIterator<>(table, iterator, new byte[0], + new byte[0], scanType); + } + + @Override + public BackendColumnIterator getWithBatch(String table, + List keys) { + assert !this.hasChanges(); + HgKvIterator kvIterator = + this.graph.batchPrefix(table, keys); + return new ColumnIterator<>(table, kvIterator); + } + + @Override + public void merge(String table, byte[] ownerKey, byte[] key, + byte[] value) { + prepare(); + this.graph.merge(table, HgOwnerKey.of(ownerKey, key), value); + } + + @Override + public void setMode(GraphMode mode) { + // no need to set pd mode + } + + @Override + public void truncate() throws Exception { + this.graph.truncate(); + HstoreSessionsImpl.getDefaultPdClient() + .resetIdByKey(this.getGraphName()); + } + + @Override + public int getActiveStoreSize() { + try { + return defaultPdClient.getActiveStores().size(); + } catch (PDException ignore) { + } + return 0; + } + } +} diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreStore.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreStore.java new file mode 100644 index 0000000000..1127d122e5 --- /dev/null +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreStore.java @@ -0,0 +1,825 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.backend.store.hstore; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import org.apache.commons.collections.CollectionUtils; +import org.apache.hugegraph.backend.id.Id; +import org.apache.hugegraph.backend.id.IdGenerator; +import org.apache.hugegraph.backend.query.IdPrefixQuery; +import org.apache.hugegraph.backend.query.IdQuery; +import org.apache.hugegraph.backend.query.Query; +import org.apache.hugegraph.backend.serializer.BinaryBackendEntry; +import org.apache.hugegraph.backend.serializer.BytesBuffer; +import org.apache.hugegraph.backend.serializer.MergeIterator; +import org.apache.hugegraph.backend.store.AbstractBackendStore; +import org.apache.hugegraph.backend.store.BackendAction; +import org.apache.hugegraph.backend.store.BackendEntry; +import org.apache.hugegraph.backend.store.BackendFeatures; +import org.apache.hugegraph.backend.store.BackendMutation; +import org.apache.hugegraph.backend.store.BackendStoreProvider; +import org.apache.hugegraph.backend.store.BackendTable; +import org.apache.hugegraph.backend.store.hstore.HstoreSessions.Session; +import org.apache.hugegraph.config.CoreOptions; +import org.apache.hugegraph.config.HugeConfig; +import org.apache.hugegraph.iterator.CIter; +import org.apache.hugegraph.type.HugeTableType; +import org.apache.hugegraph.type.HugeType; +import org.apache.hugegraph.type.define.Action; +import org.apache.hugegraph.type.define.GraphMode; +import org.apache.hugegraph.util.E; +import org.apache.hugegraph.util.Log; +import org.slf4j.Logger; + +import com.google.common.collect.ImmutableSet; + +public abstract class HstoreStore extends AbstractBackendStore { + + private static final Logger LOG = Log.logger(HstoreStore.class); + + private static final Set INDEX_TYPES = ImmutableSet.of( + HugeType.SECONDARY_INDEX, HugeType.VERTEX_LABEL_INDEX, + HugeType.EDGE_LABEL_INDEX, HugeType.RANGE_INT_INDEX, + HugeType.RANGE_FLOAT_INDEX, HugeType.RANGE_LONG_INDEX, + HugeType.RANGE_DOUBLE_INDEX, HugeType.SEARCH_INDEX, + HugeType.SHARD_INDEX, HugeType.UNIQUE_INDEX + ); + + private static final BackendFeatures FEATURES = new HstoreFeatures(); + private final String store, namespace; + + private final BackendStoreProvider provider; + private final Map tables; + private final ReadWriteLock storeLock; + private boolean isGraphStore; + private HstoreSessions sessions; + + public HstoreStore(final BackendStoreProvider provider, + String namespace, String store) { + this.tables = new HashMap<>(); + this.provider = provider; + this.namespace = namespace; + this.store = store; + this.sessions = null; + this.storeLock = new ReentrantReadWriteLock(); + this.registerMetaHandlers(); + LOG.debug("Store loaded: {}", store); + } + + private void registerMetaHandlers() { + Supplier> dbsGet = () -> { + List dbs = new ArrayList<>(); + dbs.add(this.sessions); + return dbs; + }; + this.registerMetaHandler("metrics", (session, meta, args) -> { + HstoreMetrics metrics = new HstoreMetrics(dbsGet.get(), session); + return metrics.metrics(); + }); + this.registerMetaHandler("mode", (session, meta, args) -> { + E.checkArgument(args.length == 1, + "The args count of %s must be 1", meta); + session.setMode((GraphMode) args[0]); + return null; + }); + } + + protected void registerTableManager(HugeTableType type, HstoreTable table) { + this.tables.put((int) type.code(), table); + } + + @Override + protected final HstoreTable table(HugeType type) { + assert type != null; + HugeTableType table; + switch (type) { + case VERTEX: + table = HugeTableType.VERTEX; + break; + case EDGE_OUT: + table = HugeTableType.OUT_EDGE; + break; + case EDGE_IN: + table = HugeTableType.IN_EDGE; + break; + case OLAP: + table = HugeTableType.OLAP_TABLE; + break; + case TASK: + table = HugeTableType.TASK_INFO_TABLE; + break; + case SERVER: + table = HugeTableType.SERVER_INFO_TABLE; + break; + case SEARCH_INDEX: + case SHARD_INDEX: + case SECONDARY_INDEX: + case RANGE_INT_INDEX: + case RANGE_LONG_INDEX: + case RANGE_FLOAT_INDEX: + case RANGE_DOUBLE_INDEX: + case EDGE_LABEL_INDEX: + case VERTEX_LABEL_INDEX: + case UNIQUE_INDEX: + table = HugeTableType.ALL_INDEX_TABLE; + break; + default: + throw new AssertionError(String.format( + "Invalid type: %s", type)); + } + return this.tables.get((int) table.code()); + } + + protected List tableNames() { + return this.tables.values().stream() + .map(BackendTable::table) + .collect(Collectors.toList()); + } + + @Override + protected Session session(HugeType type) { + this.checkOpened(); + return this.sessions.session(); + } + + public String namespace() { + return this.namespace; + } + + @Override + public String store() { + return this.store; + } + + @Override + public String database() { + return this.namespace; + } + + @Override + public BackendStoreProvider provider() { + return this.provider; + } + + @Override + public BackendFeatures features() { + return FEATURES; + } + + @Override + public synchronized void open(HugeConfig config) { + E.checkNotNull(config, "config"); + + if (this.sessions == null) { + this.sessions = new HstoreSessionsImpl(config, this.namespace, + this.store); + } + + String graphStore = config.get(CoreOptions.STORE_GRAPH); + this.isGraphStore = this.store.equals(graphStore); + assert this.sessions != null; + if (!this.sessions.closed()) { + LOG.debug("Store {} has been opened before", this.store); + this.sessions.useSession(); + return; + } + + try { + // NOTE: won't throw error even if connection refused + this.sessions.open(); + } catch (Exception e) { + LOG.error("Failed to open Hstore '{}':{}", this.store, e); + } + this.sessions.session(); + LOG.debug("Store opened: {}", this.store); + } + + @Override + public void close() { + this.checkOpened(); + this.sessions.close(); + + LOG.debug("Store closed: {}", this.store); + } + + @Override + public boolean opened() { + this.checkConnectionOpened(); + return this.sessions.session().opened(); + } + + @Override + public void mutate(BackendMutation mutation) { + Session session = this.sessions.session(); + assert session.opened(); + Map>> mutations = mutation.mutations(); + Set>>> entries = mutations.entrySet(); + for (Map.Entry>> entry : entries) { + HugeType key = entry.getKey(); + // in order to obtain the owner efficiently, special for edge + boolean isEdge = key.isEdge(); + HstoreTable hTable = this.table(key); + Map> table = entry.getValue(); + Collection> values = table.values(); + for (List items : values) { + for (int i = 0; i < items.size(); i++) { + BackendAction item = items.get(i); + // set to ArrayList, use index to get item + this.mutate(session, item, hTable, isEdge); + } + } + } + } + + private void mutate(Session session, BackendAction item, + HstoreTable hTable, boolean isEdge) { + BackendEntry entry = item.entry(); + HstoreTable table; + if (!entry.olap()) { + // Oltp table + table = hTable; + } else { + if (entry.type().isIndex()) { + // Olap index + table = this.table(entry.type()); + } else { + // Olap vertex + table = this.table(HugeType.OLAP); + } + session = this.session(HugeType.OLAP); + } + + if (item.action().code() == Action.INSERT.code()) { + table.insert(session, entry, isEdge); + } else { + if (item.action().code() == Action.APPEND.code()) { + table.append(session, entry); + } else { + switch (item.action()) { + case DELETE: + table.delete(session, entry); + break; + case ELIMINATE: + table.eliminate(session, entry); + break; + case UPDATE_IF_PRESENT: + table.updateIfPresent(session, entry); + break; + case UPDATE_IF_ABSENT: + table.updateIfAbsent(session, entry); + break; + default: + throw new AssertionError(String.format( + "Unsupported mutate action: %s", + item.action())); + } + } + } + } + + private HstoreTable getTableByQuery(Query query) { + HugeType tableType = HstoreTable.tableType(query); + HstoreTable table; + if (query.olap()) { + if (query.resultType().isIndex()) { + // Any index type is ok here + table = this.table(HugeType.SECONDARY_INDEX); + } else { + table = this.table(HugeType.OLAP); + } + } else { + table = this.table(tableType); + } + return table; + } + + @Override + public Iterator query(Query query) { + Lock readLock = this.storeLock.readLock(); + readLock.lock(); + try { + this.checkOpened(); + Session session = this.sessions.session(); + HstoreTable table = getTableByQuery(query); + Iterator entries = table.query(session, query); + // Merge olap results as needed + entries = getBackendEntryIterator(entries, query); + return entries; + } finally { + readLock.unlock(); + } + } + + // TODO: uncomment later - sub edge labels + //@Override + //public Iterator> query(Iterator queries, + // Function queryWriter, + // HugeGraph hugeGraph) { + // if (queries == null || !queries.hasNext()) { + // return Collections.emptyIterator(); + // } + // + // class QueryWrapper implements Iterator { + // Query first; + // final Iterator queries; + // Iterator subEls; + // Query preQuery; + // Iterator queryListIterator; + // + // QueryWrapper(Iterator queries, Query first) { + // this.queries = queries; + // this.first = first; + // } + // + // @Override + // public boolean hasNext() { + // return first != null || (this.subEls != null && this.subEls.hasNext()) + // || (queryListIterator != null && queryListIterator.hasNext()) || + // queries.hasNext(); + // } + // + // @Override + // public IdPrefixQuery next() { + // if (queryListIterator != null && queryListIterator.hasNext()) { + // return queryListIterator.next(); + // } + // + // Query q; + // if (first != null) { + // q = first; + // preQuery = q.copy(); + // first = null; + // } else { + // if (this.subEls == null || !this.subEls.hasNext()) { + // q = queries.next(); + // preQuery = q.copy(); + // } else { + // q = preQuery.copy(); + // } + // } + // + // assert q instanceof ConditionQuery; + // ConditionQuery cq = (ConditionQuery) q; + // ConditionQuery originQuery = (ConditionQuery) q.copy(); + // + // List queryList = Lists.newArrayList(); + // if (hugeGraph != null) { + // for (ConditionQuery conditionQuery : + // ConditionQueryFlatten.flatten(cq)) { + // Id label = conditionQuery.condition(HugeKeys.LABEL); + // /* 父类型 + sortKeys: g.V("V.id").outE("parentLabel").has + // ("sortKey","value")转成 所有子类型 + sortKeys*/ + // if ((this.subEls == null || + // !this.subEls.hasNext()) && label != null && + // hugeGraph.edgeLabel(label).isFather() && + // conditionQuery.condition(HugeKeys.SUB_LABEL) == + // null && + // conditionQuery.condition(HugeKeys.OWNER_VERTEX) != + // null && + // conditionQuery.condition(HugeKeys.DIRECTION) != + // null && + // matchEdgeSortKeys(conditionQuery, false, + // hugeGraph)) { + // this.subEls = + // getSubLabelsOfParentEl( + // hugeGraph.edgeLabels(), + // label); + // } + // + // if (this.subEls != null && + // this.subEls.hasNext()) { + // conditionQuery.eq(HugeKeys.SUB_LABEL, + // subEls.next()); + // } + // + // HugeType hugeType = conditionQuery.resultType(); + // if (hugeType != null && hugeType.isEdge() && + // !conditionQuery.conditions().isEmpty()) { + // IdPrefixQuery idPrefixQuery = + // (IdPrefixQuery) queryWriter.apply( + // conditionQuery); + // idPrefixQuery.setOriginQuery(originQuery); + // queryList.add(idPrefixQuery); + // } + // } + // + // queryListIterator = queryList.iterator(); + // if (queryListIterator.hasNext()) { + // return queryListIterator.next(); + // } + // } + // + // Id ownerId = cq.condition(HugeKeys.OWNER_VERTEX); + // assert ownerId != null; + // BytesBuffer buffer = + // BytesBuffer.allocate(BytesBuffer.BUF_EDGE_ID); + // buffer.writeId(ownerId); + // return new IdPrefixQuery(cq, new BinaryBackendEntry.BinaryId( + // buffer.bytes(), ownerId)); + // } + // + // private boolean matchEdgeSortKeys(ConditionQuery query, + // boolean matchAll, + // HugeGraph graph) { + // assert query.resultType().isEdge(); + // Id label = query.condition(HugeKeys.LABEL); + // if (label == null) { + // return false; + // } + // List sortKeys = graph.edgeLabel(label).sortKeys(); + // if (sortKeys.isEmpty()) { + // return false; + // } + // Set queryKeys = query.userpropKeys(); + // for (int i = sortKeys.size(); i > 0; i--) { + // List subFields = sortKeys.subList(0, i); + // if (queryKeys.containsAll(subFields)) { + // if (queryKeys.size() == subFields.size() || !matchAll) { + // /* + // * Return true if: + // * matchAll=true and all queryKeys are in sortKeys + // * or + // * partial queryKeys are in sortKeys + // */ + // return true; + // } + // } + // } + // return false; + // } + // } + // Query first = queries.next(); + // List typeList = getHugeTypes(first); + // QueryWrapper idPrefixQueries = new QueryWrapper(queries, first); + // + // return query(typeList, idPrefixQueries); + //} + + //private Iterator getSubLabelsOfParentEl(Collection allEls, + // Id label) { + // List list = new ArrayList<>(); + // for (EdgeLabel el : allEls) { + // if (el.edgeLabelType().sub() && el.fatherId().equals(label)) { + // list.add(el.id()); + // } + // } + // return list.iterator(); + //} + + public List> query(List typeList, + List queries) { + Lock readLock = this.storeLock.readLock(); + readLock.lock(); + LinkedList> results = new LinkedList<>(); + try { + this.checkOpened(); + Session session = this.sessions.session(); + E.checkState(!CollectionUtils.isEmpty(queries) && + !CollectionUtils.isEmpty(typeList), + "Please check query list or type list."); + HstoreTable table = null; + StringBuilder builder = new StringBuilder(); + for (HugeType type : typeList) { + builder.append((table = this.table(type)).table()).append(","); + } + List> iteratorList = + table.query(session, queries, + builder.substring(0, builder.length() - 1)); + for (int i = 0; i < iteratorList.size(); i++) { + Iterator entries = iteratorList.get(i); + // Merge olap results as needed + Query query = queries.get(i); + entries = getBackendEntryIterator(entries, query); + if (entries instanceof CIter) { + results.add((CIter) entries); + } + } + return results; + } finally { + readLock.unlock(); + } + } + + public Iterator> query(List typeList, + Iterator queries) { + Lock readLock = this.storeLock.readLock(); + readLock.lock(); + try { + this.checkOpened(); + Session session = this.sessions.session(); + E.checkState(queries.hasNext() && + !CollectionUtils.isEmpty(typeList), + "Please check query list or type list."); + HstoreTable table = null; + StringBuilder builder = new StringBuilder(); + for (HugeType type : typeList) { + builder.append((table = this.table(type)).table()).append(","); + } + + Iterator> iterators = + table.query(session, queries, + builder.substring(0, builder.length() - 1)); + + return iterators; + } finally { + readLock.unlock(); + } + } + + private Iterator getBackendEntryIterator( + Iterator entries, + Query query) { + HstoreTable table; + Set olapPks = query.olapPks(); + if (this.isGraphStore && !olapPks.isEmpty()) { + List> iterators = new ArrayList<>(); + for (Id pk : olapPks) { + // 构造olap表查询query condition + Query q = this.constructOlapQueryCondition(pk, query); + table = this.table(HugeType.OLAP); + iterators.add(table.queryOlap(this.session(HugeType.OLAP), q)); + } + entries = new MergeIterator<>(entries, iterators, + BackendEntry::mergeable); + } + return entries; + } + + + /** + * 重新构造 查询olap表 query + * 由于 olap合并成一张表, 在写入olap数据, key在后面增加了pk + * 所以在此进行查询的时候,需要重新构造pk前缀 + * 写入参考 BinarySerializer.writeOlapVertex + * + * @param pk + * @param query + * @return + */ + private Query constructOlapQueryCondition(Id pk, Query query) { + if (query instanceof IdQuery && !CollectionUtils.isEmpty((query).ids())) { + IdQuery q = (IdQuery) query.copy(); + Iterator iterator = q.ids().iterator(); + LinkedHashSet linkedHashSet = new LinkedHashSet<>(); + while (iterator.hasNext()) { + Id id = iterator.next(); + if (id instanceof BinaryBackendEntry.BinaryId) { + id = ((BinaryBackendEntry.BinaryId) id).origin(); + } + + // create binary id + BytesBuffer buffer = + BytesBuffer.allocate(1 + pk.length() + 1 + id.length()); + buffer.writeId(pk); + id = new BinaryBackendEntry.BinaryId( + buffer.writeId(id).bytes(), id); + linkedHashSet.add(id); + } + q.resetIds(); + q.query(linkedHashSet); + return q; + } else { + // create binary id + BytesBuffer buffer = BytesBuffer.allocate(1 + pk.length()); + pk = new BinaryBackendEntry.BinaryId( + buffer.writeId(pk).bytes(), pk); + + IdPrefixQuery idPrefixQuery = new IdPrefixQuery(HugeType.OLAP, pk); + return idPrefixQuery; + } + } + + @Override + public Number queryNumber(Query query) { + this.checkOpened(); + + Session session = this.sessions.session(); + HstoreTable table = this.table(HstoreTable.tableType(query)); + return table.queryNumber(session, query); + } + + @Override + public synchronized void init() { + Lock writeLock = this.storeLock.writeLock(); + writeLock.lock(); + try { + // Create tables with main disk + this.sessions.createTable(this.tableNames().toArray(new String[0])); + LOG.debug("Store initialized: {}", this.store); + } finally { + writeLock.unlock(); + } + } + + @Override + public void clear(boolean clearSpace) { + Lock writeLock = this.storeLock.writeLock(); + writeLock.lock(); + try { + // Drop tables with main disk + this.sessions.dropTable(this.tableNames().toArray(new String[0])); + if (clearSpace) { + this.sessions.clear(); + } + LOG.debug("Store cleared: {}", this.store); + } finally { + writeLock.unlock(); + } + } + + @Override + public boolean initialized() { + return true; + } + + @Override + public void truncate() { + try { + this.sessions.session().truncate(); + } catch (Exception e) { + LOG.error("Store truncated failed", e); + return; + } + LOG.debug("Store truncated: {}", this.store); + } + + @Override + public void beginTx() { + this.sessions.session().beginTx(); + } + + @Override + public void commitTx() { + this.checkOpened(); + Session session = this.sessions.session(); + session.commit(); + } + + @Override + public void rollbackTx() { + this.checkOpened(); + Session session = this.sessions.session(); + session.rollback(); + } + + private void checkConnectionOpened() { + } + + @Override + public Id nextId(HugeType type) { + long counter = 0L; + counter = this.getCounter(type); + E.checkState(counter != 0L, "Please check whether '%s' is OK", + this.provider().type()); + return IdGenerator.of(counter); + } + + @Override + public void setCounterLowest(HugeType type, long lowest) { + this.increaseCounter(type, lowest); + } + + /***************************** Store defines *****************************/ + + public static class HstoreSchemaStore extends HstoreStore { + + public HstoreSchemaStore(BackendStoreProvider provider, String namespace, String store) { + super(provider, namespace, store); + } + + @Override + public boolean isSchemaStore() { + return true; + } + + @Override + public void increaseCounter(HugeType type, long num) { + throw new UnsupportedOperationException( + "HstoreSchemaStore.increaseCounter()"); + } + + @Override + public long getCounter(HugeType type) { + throw new UnsupportedOperationException( + "HstoreSchemaStore.getCounter()"); + } + } + + public static class HstoreGraphStore extends HstoreStore { + + public HstoreGraphStore(BackendStoreProvider provider, + String namespace, String store) { + super(provider, namespace, store); + + registerTableManager(HugeTableType.VERTEX, + new HstoreTables.Vertex(store)); + registerTableManager(HugeTableType.OUT_EDGE, + HstoreTables.Edge.out(store)); + registerTableManager(HugeTableType.IN_EDGE, + HstoreTables.Edge.in(store)); + registerTableManager(HugeTableType.ALL_INDEX_TABLE, + new HstoreTables.IndexTable(store)); + registerTableManager(HugeTableType.OLAP_TABLE, + new HstoreTables.OlapTable(store)); + registerTableManager(HugeTableType.TASK_INFO_TABLE, + new HstoreTables.TaskInfo(store)); + registerTableManager(HugeTableType.SERVER_INFO_TABLE, + new HstoreTables.ServerInfo(store)); + } + + @Override + public boolean isSchemaStore() { + return false; + } + + @Override + public Id nextId(HugeType type) { + throw new UnsupportedOperationException( + "HstoreGraphStore.nextId()"); + } + + @Override + public void increaseCounter(HugeType type, long num) { + throw new UnsupportedOperationException( + "HstoreGraphStore.increaseCounter()"); + } + + @Override + public long getCounter(HugeType type) { + throw new UnsupportedOperationException( + "HstoreGraphStore.getCounter()"); + } + + @Override + public void createOlapTable(Id pkId) { + HstoreTable table = new HstoreTables.OlapTable(this.store()); + LOG.info("Hstore create olap table {}", table.table()); + super.sessions.createTable(table.table()); + LOG.info("Hstore finish create olap table"); + registerTableManager(HugeTableType.OLAP_TABLE, table); + LOG.info("OLAP table {} has been created", table.table()); + } + + @Override + public void checkAndRegisterOlapTable(Id pkId) { + HstoreTable table = new HstoreTables.OlapTable(this.store()); + if (!super.sessions.existsTable(table.table())) { + LOG.error("Found exception: Table '{}' doesn't exist, we'll " + + "recreate it now. Please carefully check the recent" + + "operation in server and computer, then ensure the " + + "integrity of store file.", table.table()); + this.createOlapTable(pkId); + } else { + registerTableManager(HugeTableType.OLAP_TABLE, table); + } + } + + @Override + public void clearOlapTable(Id pkId) { + } + + @Override + public void removeOlapTable(Id pkId) { + } + + @Override + public boolean existOlapTable(Id pkId) { + String tableName = this.olapTableName(pkId); + return super.sessions.existsTable(tableName); + } + } + + @Override + public String storedVersion() { + return "1.13"; + } +} diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTable.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTable.java new file mode 100755 index 0000000000..39e24a1d9d --- /dev/null +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTable.java @@ -0,0 +1,732 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.backend.store.hstore; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.function.Supplier; + +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.backend.id.EdgeId; +import org.apache.hugegraph.backend.id.Id; +import org.apache.hugegraph.backend.page.PageState; +import org.apache.hugegraph.backend.query.Aggregate; +import org.apache.hugegraph.backend.query.Aggregate.AggregateFunc; +import org.apache.hugegraph.backend.query.Condition; +import org.apache.hugegraph.backend.query.Condition.Relation; +import org.apache.hugegraph.backend.query.ConditionQuery; +import org.apache.hugegraph.backend.query.IdPrefixQuery; +import org.apache.hugegraph.backend.query.IdRangeQuery; +import org.apache.hugegraph.backend.query.Query; +import org.apache.hugegraph.backend.serializer.BinaryBackendEntry; +import org.apache.hugegraph.backend.serializer.BinaryEntryIterator; +import org.apache.hugegraph.backend.store.BackendEntry; +import org.apache.hugegraph.backend.store.BackendEntry.BackendColumn; +import org.apache.hugegraph.backend.store.BackendEntry.BackendColumnIterator; +import org.apache.hugegraph.backend.store.BackendEntryIterator; +import org.apache.hugegraph.backend.store.BackendTable; +import org.apache.hugegraph.backend.store.Shard; +import org.apache.hugegraph.backend.store.hstore.HstoreSessions.Countable; +import org.apache.hugegraph.backend.store.hstore.HstoreSessions.Session; +import org.apache.hugegraph.exception.NotSupportException; +import org.apache.hugegraph.pd.client.PDClient; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.store.HgOwnerKey; +import org.apache.hugegraph.store.client.util.HgStoreClientConst; +import org.apache.hugegraph.type.HugeType; +import org.apache.hugegraph.type.define.HugeKeys; +import org.apache.hugegraph.util.E; +import org.apache.hugegraph.util.Log; +import org.apache.hugegraph.util.StringEncoding; +import org.apache.tinkerpop.gremlin.util.iterator.IteratorUtils; +import org.slf4j.Logger; + +public class HstoreTable extends BackendTable { + + private static final Logger LOG = Log.logger(HstoreStore.class); + + private final HstoreShardSplitter shardSpliter; + Function ownerDelegate = (entry) -> getOwner(entry); + Function ownerByIdDelegate = (id) -> getOwnerId(id); + BiFunction ownerByQueryDelegate = + (type, id) -> getOwnerId(type, id); + Supplier ownerScanDelegate = + () -> HgStoreClientConst.ALL_PARTITION_OWNER; + + public HstoreTable(String database, String table) { + super(String.format("%s+%s", database, table)); + this.shardSpliter = new HstoreShardSplitter(this.table()); + } + + public static ConditionQuery removeDirectionCondition(ConditionQuery conditionQuery) { + Collection conditions = conditionQuery.conditions(); + List newConditions = new ArrayList<>(); + for (Condition condition : conditions) { + if (!direction(condition)) { + newConditions.add(condition); + } + } + if (newConditions.size() > 0) { + conditionQuery.resetConditions(newConditions); + return conditionQuery; + } else { + return null; + } + } + + private static boolean direction(Condition condition) { + boolean direction = true; + List relations = condition.relations(); + for (Relation r : relations) { + if (!r.key().equals(HugeKeys.DIRECTION)) { + direction = false; + break; + } + } + return direction; + } + + protected static BackendEntryIterator newEntryIterator( + BackendColumnIterator cols, Query query) { + return new BinaryEntryIterator<>(cols, query, (entry, col) -> { + if (entry == null || !entry.belongToMe(col)) { + HugeType type = query.resultType(); + // NOTE: only support BinaryBackendEntry currently + entry = new BinaryBackendEntry(type, col.name); + } + entry.columns(col); + return entry; + }); + } + + protected static BackendEntryIterator newEntryIteratorOlap( + BackendColumnIterator cols, Query query, boolean isOlap) { + return new BinaryEntryIterator<>(cols, query, (entry, col) -> { + if (entry == null || !entry.belongToMe(col)) { + HugeType type = query.resultType(); + // NOTE: only support BinaryBackendEntry currently + entry = new BinaryBackendEntry(type, col.name, false, isOlap); + } + entry.columns(col); + return entry; + }); + } + + public static String bytes2String(byte[] bytes) { + StringBuilder result = new StringBuilder(); + for (byte b : bytes) { + String st = String.format("%02x", b); + result.append(st); + } + return result.toString(); + } + + @Override + protected void registerMetaHandlers() { + this.registerMetaHandler("splits", (session, meta, args) -> { + E.checkArgument(args.length == 1, + "The args count of %s must be 1", meta); + long splitSize = (long) args[0]; + return this.shardSpliter.getSplits(session, splitSize); + }); + } + + @Override + public void init(Session session) { + // pass + } + + @Override + public void clear(Session session) { + // pass + } + + public boolean isOlap() { + return false; + } + + private byte[] getOwner(BackendEntry entry) { + if (entry == null) { + return HgStoreClientConst.ALL_PARTITION_OWNER; + } + Id id = entry.type().isIndex() ? entry.id() : entry.originId(); + return getOwnerId(id); + } + + public Supplier getOwnerScanDelegate() { + return ownerScanDelegate; + } + + public byte[] getInsertEdgeOwner(BackendEntry entry) { + Id id = entry.originId(); + id = ((EdgeId) id).ownerVertexId(); + return id.asBytes(); + } + + public byte[] getInsertOwner(BackendEntry entry) { + // 为适应label索引散列,不聚焦在一个分区 + if (entry.type().isLabelIndex() && (entry.columns().size() == 1)) { + Iterator iterator = entry.columns().iterator(); + while (iterator.hasNext()) { + BackendColumn next = iterator.next(); + return next.name; + } + } + + Id id = entry.type().isIndex() ? entry.id() : entry.originId(); + return getOwnerId(id); + } + + /** + * 返回Id所属的点ID + * + * @param id + * @return + */ + protected byte[] getOwnerId(Id id) { + if (id instanceof BinaryBackendEntry.BinaryId) { + id = ((BinaryBackendEntry.BinaryId) id).origin(); + } + if (id != null && id.edge()) { + id = ((EdgeId) id).ownerVertexId(); + } + return id != null ? id.asBytes() : + HgStoreClientConst.ALL_PARTITION_OWNER; + } + + /** + * 返回Id所属的点ID + * + * @param id + * @return + */ + protected byte[] getOwnerId(HugeType type, Id id) { + if (type.equals(HugeType.VERTEX) || type.equals(HugeType.EDGE) || + type.equals(HugeType.EDGE_OUT) || type.equals(HugeType.EDGE_IN) || + type.equals(HugeType.COUNTER)) { + return getOwnerId(id); + } else { + return HgStoreClientConst.ALL_PARTITION_OWNER; + } + } + + @Override + public void insert(Session session, BackendEntry entry) { + byte[] owner = entry.type().isEdge() ? getInsertEdgeOwner(entry) : getInsertOwner(entry); + ArrayList columns = new ArrayList<>(entry.columns()); + for (int i = 0; i < columns.size(); i++) { + BackendColumn col = columns.get(i); + session.put(this.table(), owner, col.name, col.value); + } + } + + public void insert(Session session, BackendEntry entry, boolean isEdge) { + byte[] owner = isEdge ? getInsertEdgeOwner(entry) : getInsertOwner(entry); + ArrayList columns = new ArrayList<>(entry.columns()); + for (int i = 0; i < columns.size(); i++) { + BackendColumn col = columns.get(i); + session.put(this.table(), owner, col.name, col.value); + } + } + + @Override + public void delete(Session session, BackendEntry entry) { + byte[] ownerKey = ownerDelegate.apply(entry); + if (entry.columns().isEmpty()) { + byte[] idBytes = entry.id().asBytes(); + // LOG.debug("Delete from {} with owner {}, id: {}", + // this.table(), bytes2String(ownerKey), idBytes); + session.delete(this.table(), ownerKey, idBytes); + } else { + for (BackendColumn col : entry.columns()) { + // LOG.debug("Delete from {} with owner {}, id: {}", + // this.table(), bytes2String(ownerKey), + // bytes2String(col.name)); + assert entry.belongToMe(col) : entry; + session.delete(this.table(), ownerKey, col.name); + } + } + } + + @Override + public void append(Session session, BackendEntry entry) { + assert entry.columns().size() == 1; + this.insert(session, entry); + } + + @Override + public void eliminate(Session session, BackendEntry entry) { + assert entry.columns().size() == 1; + this.delete(session, entry); + } + + @Override + public boolean queryExist(Session session, BackendEntry entry) { + Id id = entry.id(); + try (BackendColumnIterator iter = this.queryById(session, id)) { + return iter.hasNext(); + } + } + + @Override + public Number queryNumber(Session session, Query query) { + Aggregate aggregate = query.aggregateNotNull(); + if (aggregate.func() != AggregateFunc.COUNT) { + throw new NotSupportException(aggregate.toString()); + } + + assert aggregate.func() == AggregateFunc.COUNT; + assert query.noLimit(); + Iterator results = this.queryBy(session, query); + if (results instanceof Countable) { + return ((Countable) results).count(); + } + return IteratorUtils.count(results); + } + + @Override + public Iterator query(Session session, Query query) { + if (query.limit() == 0L && !query.noLimit()) { + // LOG.debug("Return empty result(limit=0) for query {}", query); + return Collections.emptyIterator(); + } + return newEntryIterator(this.queryBy(session, query), query); + } + + @Override + public Iterator queryOlap(Session session, Query query) { + if (query.limit() == 0L && !query.noLimit()) { + // LOG.debug("Return empty result(limit=0) for query {}", query); + return Collections.emptyIterator(); + } + return newEntryIteratorOlap(this.queryBy(session, query), query, true); + } + + public List> query(Session session, + List queries, + String tableName) { + List queryByPrefixList = + this.queryByPrefixList(session, queries, tableName); + LinkedList> iterators = new LinkedList<>(); + for (int i = 0; i < queryByPrefixList.size(); i++) { + IdPrefixQuery q = queries.get(i).copy(); + q.capacity(Query.NO_CAPACITY); + q.limit(Query.NO_LIMIT); + BackendEntryIterator iterator = + newEntryIterator(queryByPrefixList.get(i), q); + iterators.add(iterator); + } + return iterators; + } + + public BackendEntry.BackendIterator> query(Session session, + Iterator queries, + String tableName) { + final IdPrefixQuery[] first = {queries.next()}; + int type = first[0].withProperties() ? 0 : Session.SCAN_KEY_ONLY; + + IdPrefixQuery queryTmpl = first[0].copy(); + queryTmpl.capacity(Query.NO_CAPACITY); + queryTmpl.limit(Query.NO_LIMIT); + + ConditionQuery originQuery = (ConditionQuery) first[0].originQuery(); + if (originQuery != null) { + originQuery = prepareConditionQueryList(originQuery); + } + byte[] queryBytes = originQuery == null ? null : originQuery.bytes(); + + BackendEntry.BackendIterator it + = session.scan(tableName, new Iterator() { + @Override + public boolean hasNext() { + if (first[0] != null) { + return true; + } + return queries.hasNext(); + } + + @Override + public HgOwnerKey next() { + IdPrefixQuery query = first[0] != null ? first[0] : queries.next(); + first[0] = null; + byte[] prefix = ownerByQueryDelegate.apply(query.resultType(), + query.prefix()); + return HgOwnerKey.of(prefix, query.prefix().asBytes()); + } + }, type, first[0], queryBytes); + return new BackendEntry.BackendIterator>() { + @Override + public boolean hasNext() { + return it.hasNext(); + } + + @Override + public Iterator next() { + BackendEntryIterator iterator = newEntryIterator(it.next(), queryTmpl); + return iterator; + } + + @Override + public void close() { + it.close(); + } + + @Override + public byte[] position() { + return new byte[0]; + } + }; + } + + protected BackendColumnIterator queryBy(Session session, Query query) { + // Query all + if (query.empty()) { + return this.queryAll(session, query); + } + + // Query by prefix + if (query instanceof IdPrefixQuery) { + IdPrefixQuery pq = (IdPrefixQuery) query; + return this.queryByPrefix(session, pq); + } + + // Query by range + if (query instanceof IdRangeQuery) { + IdRangeQuery rq = (IdRangeQuery) query; + return this.queryByRange(session, rq); + } + + // Query by id + if (query.conditions().isEmpty()) { + assert !query.ids().isEmpty(); + // 单个id查询 走get接口查询 + if (query.ids().size() == 1) { + return this.getById(session, query.ids().iterator().next()); + } + // NOTE: this will lead to lazy create rocksdb iterator + LinkedList hgOwnerKeys = new LinkedList<>(); + for (Id id : query.ids()) { + hgOwnerKeys.add(HgOwnerKey.of(this.ownerByIdDelegate.apply(id), + id.asBytes())); + } + BackendColumnIterator withBatch = session.getWithBatch(this.table(), + hgOwnerKeys); + return BackendColumnIterator.wrap(withBatch); + } + + // Query by condition (or condition + id) + ConditionQuery cq = (ConditionQuery) query; + return this.queryByCond(session, cq); + } + + protected BackendColumnIterator queryAll(Session session, Query query) { + if (query.paging()) { + PageState page = PageState.fromString(query.page()); + byte[] ownerKey = this.getOwnerScanDelegate().get(); + int scanType = Session.SCAN_ANY | + (query.withProperties() ? 0 : Session.SCAN_KEY_ONLY); + byte[] queryBytes = query instanceof ConditionQuery ? + ((ConditionQuery) query).bytes() : null; + // LOG.debug("query {} with ownerKeyFrom: {}, ownerKeyTo: {}, " + + // "keyFrom: null, keyTo: null, scanType: {}, " + + // "conditionQuery: {}, position: {}", + // this.table(), bytes2String(ownerKey), + // bytes2String(ownerKey), scanType, + // queryBytes, page.position()); + return session.scan(this.table(), ownerKey, ownerKey, null, + null, scanType, queryBytes, + page.position()); + } + return session.scan(this.table(), + query instanceof ConditionQuery ? + ((ConditionQuery) query).bytes() : null); + } + + protected BackendColumnIterator queryById(Session session, Id id) { + // TODO: change to get() after vertex and schema don't use id prefix + return session.scan(this.table(), this.ownerByIdDelegate.apply(id), + id.asBytes()); + } + + protected BackendColumnIterator getById(Session session, Id id) { + byte[] value = session.get(this.table(), + this.ownerByIdDelegate.apply(id), + id.asBytes()); + if (value.length == 0) { + return BackendColumnIterator.empty(); + } + BackendColumn col = BackendColumn.of(id.asBytes(), value); + return BackendColumnIterator.iterator(col); + } + + protected BackendColumnIterator queryByPrefix(Session session, + IdPrefixQuery query) { + int type = query.inclusiveStart() ? + Session.SCAN_GTE_BEGIN : Session.SCAN_GT_BEGIN; + type |= Session.SCAN_PREFIX_END; + byte[] position = null; + if (query.paging()) { + position = PageState.fromString(query.page()).position(); + } + ConditionQuery originQuery = (ConditionQuery) query.originQuery(); + if (originQuery != null) { + originQuery = prepareConditionQuery(originQuery); + } + byte[] ownerKeyFrom = this.ownerByQueryDelegate.apply(query.resultType(), + query.start()); + byte[] ownerKeyTo = this.ownerByQueryDelegate.apply(query.resultType(), + query.prefix()); + byte[] keyFrom = query.start().asBytes(); + // 前缀分页查询中, start为最初的位置。因为在不同的分区 都是从start位置开始查询 + if (query.paging()) { + keyFrom = query.prefix().asBytes(); + } + byte[] keyTo = query.prefix().asBytes(); + byte[] queryBytes = originQuery == null ? + null : + originQuery.bytes(); + + // LOG.debug("query {} with ownerKeyFrom: {}, ownerKeyTo: {}," + + // "keyFrom: {}, keyTo: {}, scanType: {}, conditionQuery: {}," + + // "position: {}", + // this.table(), bytes2String(ownerKeyFrom), + // bytes2String(ownerKeyTo), bytes2String(keyFrom), + // bytes2String(keyTo), type, originQuery, position); + + return session.scan(this.table(), ownerKeyFrom, ownerKeyTo, keyFrom, + keyTo, type, queryBytes, position); + } + + protected List queryByPrefixList( + Session session, + List queries, + String tableName) { + E.checkArgument(queries.size() > 0, + "The size of queries must be greater than zero"); + IdPrefixQuery query = queries.get(0); + int type = 0; + LinkedList ownerKey = new LinkedList<>(); + queries.forEach((item) -> { + byte[] prefix = this.ownerByQueryDelegate.apply(item.resultType(), + item.prefix()); + ownerKey.add(HgOwnerKey.of(prefix, item.prefix().asBytes())); + }); + ConditionQuery originQuery = (ConditionQuery) query.originQuery(); + if (originQuery != null) { + originQuery = prepareConditionQueryList(originQuery); + } + byte[] queryBytes = originQuery == null ? null : originQuery.bytes(); + + // LOG.debug("query {} with scanType: {}, limit: {}, conditionQuery: + // {}", this.table(), type, query.limit(), queryBytes); + return session.scan(tableName, ownerKey, type, + query.limit(), queryBytes); + } + + /*** + * Prepare ConditionQuery to do operator sinking, because some scenes do not need to be + * preserved + * @param conditionQuery + * @return + */ + private ConditionQuery prepareConditionQuery(ConditionQuery conditionQuery) { + if (CollectionUtils.isEmpty(conditionQuery.userpropConditions())) { + return null; + } + // only userpropConditions can send to store + Collection conditions = conditionQuery.conditions(); + List newConditions = new ArrayList<>(); + for (Condition condition : conditions) { + if (!onlyOwnerVertex(condition)) { + newConditions.add(condition); + } + } + if (newConditions.size() > 0) { + conditionQuery.resetConditions(newConditions); + return conditionQuery; + } else { + return null; + } + } + + /*** + * Prepare ConditionQuery to do operator sinking, because some scenes do not need to be + * preserved + * @param conditionQuery + * @return + */ + private ConditionQuery prepareConditionQueryList(ConditionQuery conditionQuery) { + if (!conditionQuery.containsLabelOrUserpropRelation()) { + return null; + } + // only userpropConditions can send to store + Collection conditions = conditionQuery.conditions(); + List newConditions = new ArrayList<>(); + for (Condition condition : conditions) { + if (!onlyOwnerVertex(condition)) { + newConditions.add(condition); + } + } + if (newConditions.size() > 0) { + conditionQuery.resetConditions(newConditions); + return conditionQuery; + } else { + return null; + } + } + + private boolean onlyOwnerVertex(Condition condition) { + boolean onlyOwnerVertex = true; + List relations = condition.relations(); + for (Relation r : relations) { + if (!r.key().equals(HugeKeys.OWNER_VERTEX)) { + onlyOwnerVertex = false; + break; + } + } + return onlyOwnerVertex; + } + + protected BackendColumnIterator queryByRange(Session session, + IdRangeQuery query) { + byte[] start = query.start().asBytes(); + byte[] end = query.end() == null ? null : query.end().asBytes(); + int type = query.inclusiveStart() ? + Session.SCAN_GTE_BEGIN : Session.SCAN_GT_BEGIN; + if (end != null) { + type |= query.inclusiveEnd() ? + Session.SCAN_LTE_END : Session.SCAN_LT_END; + } + ConditionQuery cq; + Query origin = query.originQuery(); + byte[] position = null; + if (query.paging() && !query.page().isEmpty()) { + position = PageState.fromString(query.page()).position(); + } + byte[] ownerStart = this.ownerByQueryDelegate.apply(query.resultType(), + query.start()); + byte[] ownerEnd = this.ownerByQueryDelegate.apply(query.resultType(), + query.end()); + if (origin instanceof ConditionQuery && + (query.resultType().isEdge() || query.resultType().isVertex())) { + cq = (ConditionQuery) query.originQuery(); + + // LOG.debug("query {} with ownerKeyFrom: {}, ownerKeyTo: {}, " + + // "keyFrom: {}, keyTo: {}, " + + // "scanType: {}, conditionQuery: {}", + // this.table(), bytes2String(ownerStart), + // bytes2String(ownerEnd), bytes2String(start), + // bytes2String(end), type, cq.bytes()); + return session.scan(this.table(), ownerStart, + ownerEnd, start, end, type, cq.bytes(), position); + } + return session.scan(this.table(), ownerStart, + ownerEnd, start, end, type, null, position); + } + + protected BackendColumnIterator queryByCond(Session session, + ConditionQuery query) { + if (query.containsScanCondition()) { + E.checkArgument(query.relations().size() == 1, + "Invalid scan with multi conditions: %s", query); + Relation scan = query.relations().iterator().next(); + Shard shard = (Shard) scan.value(); + return this.queryByRange(session, shard, query); + } + // throw new NotSupportException("query: %s", query); + return this.queryAll(session, query); + } + + protected BackendColumnIterator queryByRange(Session session, Shard shard, + ConditionQuery query) { + int type = Session.SCAN_GTE_BEGIN; + type |= Session.SCAN_LT_END; + type |= Session.SCAN_HASHCODE; + type |= query.withProperties() ? 0 : Session.SCAN_KEY_ONLY; + + int start = Integer.parseInt(StringUtils.isEmpty(shard.start()) ? + "0" : shard.start()); + int end = Integer.parseInt(StringUtils.isEmpty(shard.end()) ? + "0" : shard.end()); + byte[] queryBytes = query.bytes(); + String page = query.page(); + if (page != null && !page.isEmpty()) { + byte[] position = PageState.fromString(page).position(); + return session.scan(this.table(), start, end, type, queryBytes, + position); + } + return session.scan(this.table(), start, end, type, queryBytes); + } + + private static class HstoreShardSplitter extends ShardSplitter { + + public HstoreShardSplitter(String table) { + super(table); + } + + @Override + public List getSplits(Session session, long splitSize) { + E.checkArgument(splitSize >= MIN_SHARD_SIZE, + "The split-size must be >= %s bytes, but got %s", + MIN_SHARD_SIZE, splitSize); + + List splits = new ArrayList<>(); + try { + PDClient pdClient = HstoreSessionsImpl.getDefaultPdClient(); + List partitions = pdClient.getPartitions(0, + session.getGraphName()); + for (Metapb.Partition partition : partitions) { + String start = String.valueOf(partition.getStartKey()); + String end = String.valueOf(partition.getEndKey()); + splits.add(new Shard(start, end, 0)); + } + } catch (PDException e) { + e.printStackTrace(); + } + + return splits.size() != 0 ? + splits : super.getSplits(session, splitSize); + } + + @Override + public long estimateDataSize(Session session) { + return 1L; + } + + @Override + public long estimateNumKeys(Session session) { + return 1L; + } + + @Override + public byte[] position(String position) { + if (END.equals(position)) { + return null; + } + return StringEncoding.decodeBase64(position); + } + } +} diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTables.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTables.java new file mode 100644 index 0000000000..f4ebf7ebef --- /dev/null +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTables.java @@ -0,0 +1,214 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.backend.store.hstore; + +import java.util.List; + +import org.apache.hugegraph.backend.id.Id; +import org.apache.hugegraph.backend.query.Condition; +import org.apache.hugegraph.backend.query.Condition.Relation; +import org.apache.hugegraph.backend.query.ConditionQuery; +import org.apache.hugegraph.backend.serializer.BinarySerializer; +import org.apache.hugegraph.backend.store.BackendEntry; +import org.apache.hugegraph.backend.store.BackendEntry.BackendColumnIterator; +import org.apache.hugegraph.backend.store.hstore.HstoreSessions.Session; +import org.apache.hugegraph.type.HugeTableType; +import org.apache.hugegraph.type.HugeType; +import org.apache.hugegraph.type.define.HugeKeys; +import org.apache.hugegraph.util.E; + +public class HstoreTables { + + public static class Vertex extends HstoreTable { + + public static final String TABLE = HugeTableType.VERTEX.string(); + + public Vertex(String database) { + super(database, TABLE); + } + + @Override + protected BackendColumnIterator queryById(Session session, Id id) { + return this.getById(session, id); + } + } + + /** + * task信息存储表 + */ + public static class TaskInfo extends HstoreTable { + public static final String TABLE = HugeTableType.TASK_INFO_TABLE.string(); + + public TaskInfo(String database) { + super(database, TABLE); + } + + @Override + protected BackendColumnIterator queryById(Session session, Id id) { + return this.getById(session, id); + } + } + + public static class ServerInfo extends HstoreTable { + public static final String TABLE = HugeTableType.SERVER_INFO_TABLE.string(); + + public ServerInfo(String database) { + super(database, TABLE); + } + + @Override + protected BackendColumnIterator queryById(Session session, Id id) { + return this.getById(session, id); + } + } + + public static class Edge extends HstoreTable { + + public static final String TABLE_SUFFIX = HugeType.EDGE.string(); + + public Edge(boolean out, String database) { + // Edge out/in table + super(database, (out ? HugeTableType.OUT_EDGE.string() : + HugeTableType.IN_EDGE.string())); + } + + public static Edge out(String database) { + return new Edge(true, database); + } + + public static Edge in(String database) { + return new Edge(false, database); + } + + @Override + protected BackendColumnIterator queryById(Session session, Id id) { + return this.getById(session, id); + } + } + + public static class IndexTable extends HstoreTable { + + public static final String TABLE = HugeTableType.ALL_INDEX_TABLE.string(); + + public IndexTable(String database) { + super(database, TABLE); + } + + @Override + public void eliminate(Session session, BackendEntry entry) { + assert entry.columns().size() == 1; + super.delete(session, entry); + } + + @Override + public void delete(Session session, BackendEntry entry) { + /* + * Only delete index by label will come here + * Regular index delete will call eliminate() + */ + byte[] ownerKey = super.ownerDelegate.apply(entry); + for (BackendEntry.BackendColumn column : entry.columns()) { + // Don't assert entry.belongToMe(column), length-prefix is 1* + session.deletePrefix(this.table(), ownerKey, column.name); + } + } + + /** + * 主要用于 range类型的index处理 + * + * @param session + * @param query + * @return + */ + @Override + protected BackendColumnIterator queryByCond(Session session, + ConditionQuery query) { + assert !query.conditions().isEmpty(); + + List conds = query.syspropConditions(HugeKeys.ID); + E.checkArgument(!conds.isEmpty(), + "Please specify the index conditions"); + + Id prefix = null; + Id min = null; + boolean minEq = false; + Id max = null; + boolean maxEq = false; + + for (Condition c : conds) { + Relation r = (Relation) c; + switch (r.relation()) { + case PREFIX: + prefix = (Id) r.value(); + break; + case GTE: + minEq = true; + case GT: + min = (Id) r.value(); + break; + case LTE: + maxEq = true; + case LT: + max = (Id) r.value(); + break; + default: + E.checkArgument(false, "Unsupported relation '%s'", + r.relation()); + } + } + + E.checkArgumentNotNull(min, "Range index begin key is missing"); + byte[] begin = min.asBytes(); + if (!minEq) { + BinarySerializer.increaseOne(begin); + } + byte[] ownerStart = this.ownerScanDelegate.get(); + byte[] ownerEnd = this.ownerScanDelegate.get(); + if (max == null) { + E.checkArgumentNotNull(prefix, "Range index prefix is missing"); + return session.scan(this.table(), ownerStart, ownerEnd, begin, + prefix.asBytes(), Session.SCAN_PREFIX_END); + } else { + byte[] end = max.asBytes(); + int type = maxEq ? Session.SCAN_LTE_END : Session.SCAN_LT_END; + return session.scan(this.table(), ownerStart, + ownerEnd, begin, end, type); + } + } + } + + public static class OlapTable extends HstoreTable { + + public static final String TABLE = HugeTableType.OLAP_TABLE.string(); + + public OlapTable(String database) { + // 由原先多个ap_{pk_id} 合并成一个ap表 + super(database, TABLE); + } + + @Override + protected BackendColumnIterator queryById(Session session, Id id) { + return this.getById(session, id); + } + + @Override + public boolean isOlap() { + return true; + } + } +} diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/fake/IdClient.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/fake/IdClient.java new file mode 100644 index 0000000000..3e42bce2d7 --- /dev/null +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/fake/IdClient.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.backend.store.hstore.fake; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +import org.apache.hugegraph.backend.store.hstore.HstoreSessions; +import org.apache.hugegraph.pd.grpc.Pdpb; + +public abstract class IdClient { + + protected HstoreSessions.Session session; + protected String table; + + public IdClient(HstoreSessions.Session session, String table) { + this.session = session; + this.table = table; + } + + protected static byte[] b(long value) { + return ByteBuffer.allocate(Long.BYTES).order( + ByteOrder.nativeOrder()).putLong(value).array(); + } + + protected static long l(byte[] bytes) { + assert bytes.length == Long.BYTES; + return ByteBuffer.wrap(bytes).order( + ByteOrder.nativeOrder()).getLong(); + } + + public abstract Pdpb.GetIdResponse getIdByKey(String key, int delta) + throws Exception; + + public abstract Pdpb.ResetIdResponse resetIdByKey(String key) throws Exception; + + public abstract void increaseId(String key, long increment) + throws Exception; +} diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/fake/PDIdClient.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/fake/PDIdClient.java new file mode 100644 index 0000000000..0dbfc56eec --- /dev/null +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/fake/PDIdClient.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.backend.store.hstore.fake; + +import org.apache.hugegraph.backend.store.hstore.HstoreSessions; +import org.apache.hugegraph.backend.store.hstore.HstoreSessionsImpl; +import org.apache.hugegraph.pd.client.PDClient; +import org.apache.hugegraph.pd.grpc.Pdpb; + +public class PDIdClient extends IdClient { + + PDClient pdClient; + + public PDIdClient(HstoreSessions.Session session, String table) { + super(session, table); + pdClient = HstoreSessionsImpl.getDefaultPdClient(); + } + + @Override + public Pdpb.GetIdResponse getIdByKey(String key, int delta) throws Exception { + return pdClient.getIdByKey(key, delta); + } + + @Override + public Pdpb.ResetIdResponse resetIdByKey(String key) throws Exception { + return pdClient.resetIdByKey(key); + } + + @Override + public void increaseId(String key, long increment) throws Exception { + pdClient.getIdByKey(key, (int) increment); + } +} From 55c1d6edb219df4a1fe524c3ffd65bc6eb704299 Mon Sep 17 00:00:00 2001 From: VGalaxies Date: Fri, 26 Apr 2024 13:49:08 +0800 Subject: [PATCH 2/8] adjust pom.xml --- hugegraph-server/hugegraph-hstore/pom.xml | 1 - hugegraph-server/pom.xml | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/hugegraph-server/hugegraph-hstore/pom.xml b/hugegraph-server/hugegraph-hstore/pom.xml index f777eb05ef..64f6e10973 100644 --- a/hugegraph-server/hugegraph-hstore/pom.xml +++ b/hugegraph-server/hugegraph-hstore/pom.xml @@ -45,6 +45,5 @@ hg-pd-client ${revision} - diff --git a/hugegraph-server/pom.xml b/hugegraph-server/pom.xml index 60946101f0..4c55da8b3f 100644 --- a/hugegraph-server/pom.xml +++ b/hugegraph-server/pom.xml @@ -78,6 +78,7 @@ hugegraph-palo hugegraph-hbase hugegraph-postgresql + hugegraph-hstore From 0cf1cc157cab0ed6b0ff9ccb742377be759e6f53 Mon Sep 17 00:00:00 2001 From: VGalaxies Date: Fri, 26 Apr 2024 13:49:50 +0800 Subject: [PATCH 3/8] format hugegraph-hstore --- .../backend/store/hstore/HstoreFeatures.java | 4 +- .../store/hstore/HstoreSessionsImpl.java | 73 +++++++++---------- .../backend/store/hstore/HstoreStore.java | 55 +++++++------- .../backend/store/hstore/HstoreTable.java | 20 ++--- .../backend/store/hstore/HstoreTables.java | 2 + 5 files changed, 78 insertions(+), 76 deletions(-) diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreFeatures.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreFeatures.java index 3af6f803bc..e5aa4b943c 100644 --- a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreFeatures.java +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreFeatures.java @@ -129,5 +129,7 @@ public boolean supportsOlapProperties() { } @Override - public boolean supportsTaskAndServerVertex() { return true; } + public boolean supportsTaskAndServerVertex() { + return true; + } } diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreSessionsImpl.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreSessionsImpl.java index e2ddfd97cb..bf45b6800f 100755 --- a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreSessionsImpl.java +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreSessionsImpl.java @@ -61,7 +61,7 @@ public class HstoreSessionsImpl extends HstoreSessions { private static final Set infoInitializedGraph = - Collections.synchronizedSet(new HashSet<>()); + Collections.synchronizedSet(new HashSet<>()); private static int tableCode = 0; private static volatile Boolean initializedNode = Boolean.FALSE; private static volatile PDClient defaultPdClient; @@ -104,11 +104,11 @@ private void initStoreNode(HugeConfig config) { synchronized (this) { if (!initializedNode) { PDConfig pdConfig = - PDConfig.of(config.get(CoreOptions.PD_PEERS)) - .setEnableCache(true); + PDConfig.of(config.get(CoreOptions.PD_PEERS)) + .setEnableCache(true); defaultPdClient = PDClient.create(pdConfig); hgStoreClient = - HgStoreClient.create(defaultPdClient); + HgStoreClient.create(defaultPdClient); initializedNode = Boolean.TRUE; } } @@ -121,15 +121,15 @@ public void open() throws Exception { synchronized (infoInitializedGraph) { if (!infoInitializedGraph.contains(this.graphName)) { Integer partitionCount = - this.config.get(HstoreOptions.PARTITION_COUNT); + this.config.get(HstoreOptions.PARTITION_COUNT); Assert.assertTrue("The value of hstore.partition_count" + " cannot be less than 0.", partitionCount > -1); defaultPdClient.setGraph(Metapb.Graph.newBuilder() .setGraphName( - this.graphName) + this.graphName) .setPartitionCount( - partitionCount) + partitionCount) .build()); infoInitializedGraph.add(this.graphName); } @@ -312,7 +312,6 @@ private boolean match(int expected) { return Session.matchScanType(expected, this.scanType); } - @Override public boolean hasNext() { if (gotNext) { @@ -360,7 +359,7 @@ private boolean filter(byte[] key) { } else { assert this.match(Session.SCAN_ANY) || this.match(Session.SCAN_GT_BEGIN) || this.match( - Session.SCAN_GTE_BEGIN) : "Unknown scan type"; + Session.SCAN_GTE_BEGIN) : "Unknown scan type"; return true; } } @@ -372,8 +371,8 @@ public BackendColumn next() { throw new NoSuchElementException(); } BackendColumn col = - BackendColumn.of(this.iter.key(), - this.iter.value()); + BackendColumn.of(this.iter.key(), + this.iter.value()); if (this.iter.hasNext()) { gotNext = true; this.iter.next(); @@ -562,7 +561,7 @@ public void deleteRange(String table, byte[] ownerKeyFrom, @Override public byte[] get(String table, byte[] key) { return this.graph.get(table, HgOwnerKey.of( - HgStoreClientConst.ALL_PARTITION_OWNER, key)); + HgStoreClientConst.ALL_PARTITION_OWNER, key)); } @Override @@ -587,7 +586,7 @@ public BackendColumnIterator scan(String table, byte[] conditionQueryToByte) { assert !this.hasChanges(); HgKvIterator results = - this.graph.scanIterator(table, conditionQueryToByte); + this.graph.scanIterator(table, conditionQueryToByte); return new ColumnIterator<>(table, results); } @@ -597,8 +596,8 @@ public BackendColumnIterator scan(String table, byte[] ownerKey, assert !this.hasChanges(); HgKvIterator result = this.graph.scanIterator(table, HgOwnerKey.of( - ownerKey, - prefix)); + ownerKey, + prefix)); return new ColumnIterator<>(table, result); } @@ -612,21 +611,21 @@ public List scan(String table, .setQuery(query) .setPerKeyLimit(limit).build(); List> scanIterators = - this.graph.scanBatch(scanQuery); + this.graph.scanBatch(scanQuery); LinkedList columnIterators = - new LinkedList<>(); + new LinkedList<>(); scanIterators.forEach(item -> { columnIterators.add( - new ColumnIterator<>(table, item)); + new ColumnIterator<>(table, item)); }); return columnIterators; } @Override public BackendEntry.BackendIterator scan( - String table, - Iterator keys, - int scanType, Query queryParam, byte[] query) { + String table, + Iterator keys, + int scanType, Query queryParam, byte[] query) { ScanOrderType orderType; switch (queryParam.orderType()) { case ORDER_NONE: @@ -685,10 +684,10 @@ public BackendColumnIterator scan(String table, byte[] ownerKeyFrom, int scanType) { assert !this.hasChanges(); HgKvIterator result = this.graph.scanIterator(table, HgOwnerKey.of( - ownerKeyFrom, keyFrom), + ownerKeyFrom, keyFrom), HgOwnerKey.of( - ownerKeyTo, - keyTo), 0, + ownerKeyTo, + keyTo), 0, scanType, null); return new ColumnIterator<>(table, result, keyFrom, @@ -703,11 +702,11 @@ public BackendColumnIterator scan(String table, byte[] ownerKeyFrom, assert !this.hasChanges(); HgKvIterator result = this.graph.scanIterator(table, HgOwnerKey.of( - ownerKeyFrom, - keyFrom), + ownerKeyFrom, + keyFrom), HgOwnerKey.of( - ownerKeyTo, - keyTo), + ownerKeyTo, + keyTo), 0, scanType, query); @@ -724,11 +723,11 @@ public BackendColumnIterator scan(String table, byte[] ownerKeyFrom, assert !this.hasChanges(); HgKvIterator result = this.graph.scanIterator(table, HgOwnerKey.of( - ownerKeyFrom, - keyFrom), + ownerKeyFrom, + keyFrom), HgOwnerKey.of( - ownerKeyTo, - keyTo), + ownerKeyTo, + keyTo), 0, scanType, query); @@ -743,8 +742,8 @@ public BackendColumnIterator scan(String table, int codeFrom, byte[] query) { assert !this.hasChanges(); HgKvIterator iterator = - this.graph.scanIterator(table, codeFrom, codeTo, 256, - new byte[0]); + this.graph.scanIterator(table, codeFrom, codeTo, 256, + new byte[0]); return new ColumnIterator<>(table, iterator, new byte[0], new byte[0], scanType); } @@ -755,8 +754,8 @@ public BackendColumnIterator scan(String table, int codeFrom, byte[] query, byte[] position) { assert !this.hasChanges(); HgKvIterator iterator = - this.graph.scanIterator(table, codeFrom, codeTo, 256, - new byte[0]); + this.graph.scanIterator(table, codeFrom, codeTo, 256, + new byte[0]); iterator.seek(position); return new ColumnIterator<>(table, iterator, new byte[0], new byte[0], scanType); @@ -767,7 +766,7 @@ public BackendColumnIterator getWithBatch(String table, List keys) { assert !this.hasChanges(); HgKvIterator kvIterator = - this.graph.batchPrefix(table, keys); + this.graph.batchPrefix(table, keys); return new ColumnIterator<>(table, kvIterator); } diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreStore.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreStore.java index 1127d122e5..603ef3a936 100644 --- a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreStore.java +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreStore.java @@ -67,11 +67,11 @@ public abstract class HstoreStore extends AbstractBackendStore { private static final Logger LOG = Log.logger(HstoreStore.class); private static final Set INDEX_TYPES = ImmutableSet.of( - HugeType.SECONDARY_INDEX, HugeType.VERTEX_LABEL_INDEX, - HugeType.EDGE_LABEL_INDEX, HugeType.RANGE_INT_INDEX, - HugeType.RANGE_FLOAT_INDEX, HugeType.RANGE_LONG_INDEX, - HugeType.RANGE_DOUBLE_INDEX, HugeType.SEARCH_INDEX, - HugeType.SHARD_INDEX, HugeType.UNIQUE_INDEX + HugeType.SECONDARY_INDEX, HugeType.VERTEX_LABEL_INDEX, + HugeType.EDGE_LABEL_INDEX, HugeType.RANGE_INT_INDEX, + HugeType.RANGE_FLOAT_INDEX, HugeType.RANGE_LONG_INDEX, + HugeType.RANGE_DOUBLE_INDEX, HugeType.SEARCH_INDEX, + HugeType.SHARD_INDEX, HugeType.UNIQUE_INDEX ); private static final BackendFeatures FEATURES = new HstoreFeatures(); @@ -154,7 +154,7 @@ protected final HstoreTable table(HugeType type) { break; default: throw new AssertionError(String.format( - "Invalid type: %s", type)); + "Invalid type: %s", type)); } return this.tables.get((int) table.code()); } @@ -299,8 +299,8 @@ private void mutate(Session session, BackendAction item, break; default: throw new AssertionError(String.format( - "Unsupported mutate action: %s", - item.action())); + "Unsupported mutate action: %s", + item.action())); } } } @@ -512,8 +512,8 @@ public List> query(List typeList, builder.append((table = this.table(type)).table()).append(","); } List> iteratorList = - table.query(session, queries, - builder.substring(0, builder.length() - 1)); + table.query(session, queries, + builder.substring(0, builder.length() - 1)); for (int i = 0; i < iteratorList.size(); i++) { Iterator entries = iteratorList.get(i); // Merge olap results as needed @@ -546,8 +546,8 @@ public Iterator> query(List typeList, } Iterator> iterators = - table.query(session, queries, - builder.substring(0, builder.length() - 1)); + table.query(session, queries, + builder.substring(0, builder.length() - 1)); return iterators; } finally { @@ -556,8 +556,8 @@ public Iterator> query(List typeList, } private Iterator getBackendEntryIterator( - Iterator entries, - Query query) { + Iterator entries, + Query query) { HstoreTable table; Set olapPks = query.olapPks(); if (this.isGraphStore && !olapPks.isEmpty()) { @@ -574,7 +574,6 @@ private Iterator getBackendEntryIterator( return entries; } - /** * 重新构造 查询olap表 query * 由于 olap合并成一张表, 在写入olap数据, key在后面增加了pk @@ -598,10 +597,10 @@ private Query constructOlapQueryCondition(Id pk, Query query) { // create binary id BytesBuffer buffer = - BytesBuffer.allocate(1 + pk.length() + 1 + id.length()); + BytesBuffer.allocate(1 + pk.length() + 1 + id.length()); buffer.writeId(pk); id = new BinaryBackendEntry.BinaryId( - buffer.writeId(id).bytes(), id); + buffer.writeId(id).bytes(), id); linkedHashSet.add(id); } q.resetIds(); @@ -611,7 +610,7 @@ private Query constructOlapQueryCondition(Id pk, Query query) { // create binary id BytesBuffer buffer = BytesBuffer.allocate(1 + pk.length()); pk = new BinaryBackendEntry.BinaryId( - buffer.writeId(pk).bytes(), pk); + buffer.writeId(pk).bytes(), pk); IdPrefixQuery idPrefixQuery = new IdPrefixQuery(HugeType.OLAP, pk); return idPrefixQuery; @@ -708,6 +707,11 @@ public void setCounterLowest(HugeType type, long lowest) { this.increaseCounter(type, lowest); } + @Override + public String storedVersion() { + return "1.13"; + } + /***************************** Store defines *****************************/ public static class HstoreSchemaStore extends HstoreStore { @@ -724,13 +728,13 @@ public boolean isSchemaStore() { @Override public void increaseCounter(HugeType type, long num) { throw new UnsupportedOperationException( - "HstoreSchemaStore.increaseCounter()"); + "HstoreSchemaStore.increaseCounter()"); } @Override public long getCounter(HugeType type) { throw new UnsupportedOperationException( - "HstoreSchemaStore.getCounter()"); + "HstoreSchemaStore.getCounter()"); } } @@ -764,19 +768,19 @@ public boolean isSchemaStore() { @Override public Id nextId(HugeType type) { throw new UnsupportedOperationException( - "HstoreGraphStore.nextId()"); + "HstoreGraphStore.nextId()"); } @Override public void increaseCounter(HugeType type, long num) { throw new UnsupportedOperationException( - "HstoreGraphStore.increaseCounter()"); + "HstoreGraphStore.increaseCounter()"); } @Override public long getCounter(HugeType type) { throw new UnsupportedOperationException( - "HstoreGraphStore.getCounter()"); + "HstoreGraphStore.getCounter()"); } @Override @@ -817,9 +821,4 @@ public boolean existOlapTable(Id pkId) { return super.sessions.existsTable(tableName); } } - - @Override - public String storedVersion() { - return "1.13"; - } } diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTable.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTable.java index 39e24a1d9d..de9f745863 100755 --- a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTable.java +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTable.java @@ -72,9 +72,9 @@ public class HstoreTable extends BackendTable { Function ownerDelegate = (entry) -> getOwner(entry); Function ownerByIdDelegate = (id) -> getOwnerId(id); BiFunction ownerByQueryDelegate = - (type, id) -> getOwnerId(type, id); + (type, id) -> getOwnerId(type, id); Supplier ownerScanDelegate = - () -> HgStoreClientConst.ALL_PARTITION_OWNER; + () -> HgStoreClientConst.ALL_PARTITION_OWNER; public HstoreTable(String database, String table) { super(String.format("%s+%s", database, table)); @@ -110,7 +110,7 @@ private static boolean direction(Condition condition) { } protected static BackendEntryIterator newEntryIterator( - BackendColumnIterator cols, Query query) { + BackendColumnIterator cols, Query query) { return new BinaryEntryIterator<>(cols, query, (entry, col) -> { if (entry == null || !entry.belongToMe(col)) { HugeType type = query.resultType(); @@ -123,7 +123,7 @@ protected static BackendEntryIterator newEntryIterator( } protected static BackendEntryIterator newEntryIteratorOlap( - BackendColumnIterator cols, Query query, boolean isOlap) { + BackendColumnIterator cols, Query query, boolean isOlap) { return new BinaryEntryIterator<>(cols, query, (entry, col) -> { if (entry == null || !entry.belongToMe(col)) { HugeType type = query.resultType(); @@ -329,14 +329,14 @@ public List> query(Session session, List queries, String tableName) { List queryByPrefixList = - this.queryByPrefixList(session, queries, tableName); + this.queryByPrefixList(session, queries, tableName); LinkedList> iterators = new LinkedList<>(); for (int i = 0; i < queryByPrefixList.size(); i++) { IdPrefixQuery q = queries.get(i).copy(); q.capacity(Query.NO_CAPACITY); q.limit(Query.NO_LIMIT); BackendEntryIterator iterator = - newEntryIterator(queryByPrefixList.get(i), q); + newEntryIterator(queryByPrefixList.get(i), q); iterators.add(iterator); } return iterators; @@ -359,7 +359,7 @@ public BackendEntry.BackendIterator> query(Session sessio byte[] queryBytes = originQuery == null ? null : originQuery.bytes(); BackendEntry.BackendIterator it - = session.scan(tableName, new Iterator() { + = session.scan(tableName, new Iterator() { @Override public boolean hasNext() { if (first[0] != null) { @@ -521,9 +521,9 @@ protected BackendColumnIterator queryByPrefix(Session session, } protected List queryByPrefixList( - Session session, - List queries, - String tableName) { + Session session, + List queries, + String tableName) { E.checkArgument(queries.size() > 0, "The size of queries must be greater than zero"); IdPrefixQuery query = queries.get(0); diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTables.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTables.java index f4ebf7ebef..7042046707 100644 --- a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTables.java +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTables.java @@ -52,6 +52,7 @@ protected BackendColumnIterator queryById(Session session, Id id) { * task信息存储表 */ public static class TaskInfo extends HstoreTable { + public static final String TABLE = HugeTableType.TASK_INFO_TABLE.string(); public TaskInfo(String database) { @@ -65,6 +66,7 @@ protected BackendColumnIterator queryById(Session session, Id id) { } public static class ServerInfo extends HstoreTable { + public static final String TABLE = HugeTableType.SERVER_INFO_TABLE.string(); public ServerInfo(String database) { From 6d8fbb34d6a15e8bcfd31b810a599da60f379a19 Mon Sep 17 00:00:00 2001 From: VGalaxies Date: Fri, 26 Apr 2024 13:50:45 +0800 Subject: [PATCH 4/8] mark useless --- .../apache/hugegraph/backend/store/hstore/fake/IdClient.java | 2 ++ .../apache/hugegraph/backend/store/hstore/fake/PDIdClient.java | 2 ++ 2 files changed, 4 insertions(+) diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/fake/IdClient.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/fake/IdClient.java index 3e42bce2d7..44564d881c 100644 --- a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/fake/IdClient.java +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/fake/IdClient.java @@ -21,8 +21,10 @@ import java.nio.ByteOrder; import org.apache.hugegraph.backend.store.hstore.HstoreSessions; +import org.apache.hugegraph.pd.common.Useless; import org.apache.hugegraph.pd.grpc.Pdpb; +@Useless public abstract class IdClient { protected HstoreSessions.Session session; diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/fake/PDIdClient.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/fake/PDIdClient.java index 0dbfc56eec..dc3c76c4ec 100644 --- a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/fake/PDIdClient.java +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/fake/PDIdClient.java @@ -20,8 +20,10 @@ import org.apache.hugegraph.backend.store.hstore.HstoreSessions; import org.apache.hugegraph.backend.store.hstore.HstoreSessionsImpl; import org.apache.hugegraph.pd.client.PDClient; +import org.apache.hugegraph.pd.common.Useless; import org.apache.hugegraph.pd.grpc.Pdpb; +@Useless public class PDIdClient extends IdClient { PDClient pdClient; From 79f6be95bff4cc6433e1c2f57f65e0dbc7191eac Mon Sep 17 00:00:00 2001 From: VGalaxies Date: Fri, 26 Apr 2024 14:29:41 +0800 Subject: [PATCH 5/8] adapt --- hugegraph-server/hugegraph-core/pom.xml | 26 +++ .../hugegraph/backend/id/IdGenerator.java | 2 +- .../hugegraph/backend/query/Condition.java | 84 +++++++++- .../backend/query/ConditionQuery.java | 66 ++++++++ .../apache/hugegraph/backend/query/Query.java | 47 +++++- .../serializer/AbstractSerializerAdapter.java | 66 ++++++++ .../query/serializer/QueryAdapter.java | 148 ++++++++++++++++++ .../query/serializer/QueryIdAdapter.java | 46 ++++++ .../serializer/BinaryBackendEntry.java | 12 +- .../backend/serializer/BytesBuffer.java | 40 ++++- .../backend/store/AbstractBackendStore.java | 7 + .../backend/store/BackendFeatures.java | 2 + .../backend/store/BackendMutation.java | 4 + .../hugegraph/backend/store/BackendStore.java | 4 + .../hugegraph/backend/store/BackendTable.java | 5 + .../apache/hugegraph/config/CoreOptions.java | 31 ++++ .../apache/hugegraph/type/HugeTableType.java | 64 ++++++++ .../org/apache/hugegraph/type/HugeType.java | 9 +- 18 files changed, 650 insertions(+), 13 deletions(-) create mode 100644 hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/serializer/AbstractSerializerAdapter.java create mode 100644 hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/serializer/QueryAdapter.java create mode 100644 hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/serializer/QueryIdAdapter.java create mode 100644 hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/type/HugeTableType.java diff --git a/hugegraph-server/hugegraph-core/pom.xml b/hugegraph-server/hugegraph-core/pom.xml index dad67853e9..6b6a37e7ad 100644 --- a/hugegraph-server/hugegraph-core/pom.xml +++ b/hugegraph-server/hugegraph-core/pom.xml @@ -253,6 +253,32 @@ ${jjwt.version} runtime + + com.google.code.gson + gson + 2.8.9 + + + org.apache.hugegraph + hg-pd-client + ${revision} + + + org.apache.hugegraph + hg-store-common + ${revision} + + + io.etcd + jetcd-core + 0.5.9 + + + io.grpc + grpc-core + + + diff --git a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/id/IdGenerator.java b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/id/IdGenerator.java index 99bb1ad589..9261d31fe8 100644 --- a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/id/IdGenerator.java +++ b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/id/IdGenerator.java @@ -379,7 +379,7 @@ public String toString() { /** * This class is just used by backend store for wrapper object as Id */ - private static final class ObjectId implements Id { + public static final class ObjectId implements Id { private final Object object; diff --git a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/Condition.java b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/Condition.java index 94f64f3d76..3ef9cd5fa0 100644 --- a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/Condition.java +++ b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/Condition.java @@ -47,7 +47,8 @@ public enum ConditionType { NONE, RELATION, AND, - OR + OR, + NOT } public enum RelationType implements BiPredicate { @@ -300,7 +301,8 @@ public boolean isRelation() { public boolean isLogic() { return this.type() == ConditionType.AND || - this.type() == ConditionType.OR; + this.type() == ConditionType.OR || + this.type() == ConditionType.NOT; } public boolean isFlattened() { @@ -315,6 +317,10 @@ public static Condition or(Condition left, Condition right) { return new Or(left, right); } + public static Condition not(Condition condition) { + return new Not(condition); + } + public static Relation eq(HugeKeys key, Object value) { return new SyspropRelation(key, RelationType.EQ, value); } @@ -536,6 +542,80 @@ public Condition copy() { } } + public static class Not extends Condition { + + Condition condition; + + public Not(Condition condition) { + super(); + this.condition = condition; + } + + public Condition condition() { + return condition; + } + + @Override + public ConditionType type() { + return ConditionType.NOT; + } + + @Override + public boolean test(Object value) { + return !this.condition.test(value); + } + + @Override + public boolean test(HugeElement element) { + return !this.condition.test(element); + } + + @Override + public Condition copy() { + return new Not(this.condition.copy()); + } + + @Override + public boolean isSysprop() { + return this.condition.isSysprop(); + } + + @Override + public List relations() { + return new ArrayList(this.condition.relations()); + } + + @Override + public Condition replace(Relation from, Relation to) { + this.condition = this.condition.replace(from, to); + return this; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(64); + sb.append(this.type().name()).append(' '); + sb.append(this.condition); + return sb.toString(); + } + + @Override + public boolean equals(Object object) { + if (!(object instanceof Not)) { + return false; + } + Not other = (Not) object; + return this.type().equals(other.type()) && + this.condition.equals(other.condition()); + } + + @Override + public int hashCode() { + return this.type().hashCode() ^ + this.condition.hashCode(); + } + } + public abstract static class Relation extends Condition { // Relational operator (like: =, >, <, in, ...) diff --git a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/ConditionQuery.java b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/ConditionQuery.java index a4386831b5..8a5706a774 100644 --- a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/ConditionQuery.java +++ b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/ConditionQuery.java @@ -18,6 +18,7 @@ package org.apache.hugegraph.backend.query; import java.math.BigDecimal; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -34,6 +35,8 @@ import org.apache.hugegraph.backend.id.SplicingIdGenerator; import org.apache.hugegraph.backend.query.Condition.Relation; import org.apache.hugegraph.backend.query.Condition.RelationType; +import org.apache.hugegraph.backend.query.serializer.QueryAdapter; +import org.apache.hugegraph.backend.query.serializer.QueryIdAdapter; import org.apache.hugegraph.perf.PerfUtil.Watched; import org.apache.hugegraph.structure.HugeElement; import org.apache.hugegraph.structure.HugeProperty; @@ -48,6 +51,8 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; public class ConditionQuery extends IdQuery { @@ -73,6 +78,12 @@ public class ConditionQuery extends IdQuery { private static final List EMPTY_CONDITIONS = ImmutableList.of(); + private static final Gson gson = new GsonBuilder() + .registerTypeAdapter(Condition.class, new QueryAdapter()) + .registerTypeAdapter(Id.class, new QueryIdAdapter()) + .setDateFormat("yyyy-MM-dd HH:mm:ss.SSS") + .create(); + // Conditions will be contacted with `and` by default private List conditions = EMPTY_CONDITIONS; @@ -220,6 +231,31 @@ public Relation relation(Id key) { return null; } + public boolean containsLabelOrUserpropRelation() { + for (Condition c : this.conditions) { + while (c instanceof Condition.Not) { + c = ((Condition.Not) c).condition(); + } + if (c.isLogic()) { + Condition.BinCondition binCondition = + (Condition.BinCondition) c; + ConditionQuery query = new ConditionQuery(HugeType.EDGE); + query.query(binCondition.left()); + query.query(binCondition.right()); + if (query.containsLabelOrUserpropRelation()) { + return true; + } + } else { + Condition.Relation r = (Condition.Relation) c; + if (r.key().equals(HugeKeys.LABEL) || + c instanceof Condition.UserpropRelation) { + return true; + } + } + } + return false; + } + @Watched public T condition(Object key) { List valuesEQ = InsertionOrderUtil.newList(); @@ -300,6 +336,19 @@ public boolean containsCondition(HugeKeys key) { return false; } + public boolean containsCondition(Condition.RelationType type) { + for (Relation r : this.relations()) { + if (r.relation().equals(type)) { + return true; + } + } + return false; + } + + public boolean containsScanCondition() { + return this.containsCondition(Condition.RelationType.SCAN); + } + public boolean containsRelation(HugeKeys key, Condition.RelationType type) { for (Relation r : this.relations()) { if (r.key().equals(key) && r.relation().equals(type)) { @@ -702,6 +751,18 @@ public static String concatValues(Object value) { } } + public static ConditionQuery fromBytes(byte[] bytes) { + Gson gson = new GsonBuilder() + .registerTypeAdapter(Condition.class, new QueryAdapter()) + .registerTypeAdapter(Id.class, new QueryIdAdapter()) + .setDateFormat("yyyy-MM-dd HH:mm:ss.SSS") + .create(); + String cqs = new String(bytes, StandardCharsets.UTF_8); + ConditionQuery conditionQuery = gson.fromJson(cqs, ConditionQuery.class); + + return conditionQuery; + } + private static boolean needConvertNumber(Object value) { // Numeric or date values should be converted to number from string return NumericUtil.isNumber(value) || value instanceof Date; @@ -891,4 +952,9 @@ public interface ResultsFilter { boolean test(HugeElement element); } + + public byte[] bytes() { + String cqs = gson.toJson(this); + return cqs.getBytes(StandardCharsets.UTF_8); + } } diff --git a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/Query.java b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/Query.java index 56352e4c11..593887d92f 100644 --- a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/Query.java +++ b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/Query.java @@ -62,12 +62,15 @@ public class Query implements Cloneable { private long actualOffset; private long actualStoreOffset; private long limit; + private long skipDegree; private String page; private long capacity; private boolean showHidden; private boolean showDeleting; private boolean showExpired; private boolean olap; + private boolean withProperties; + private OrderType orderType; private Set olapPks; private Aggregate aggregate; @@ -88,6 +91,7 @@ public Query(HugeType resultType, Query originQuery) { this.actualOffset = 0L; this.actualStoreOffset = 0L; this.limit = NO_LIMIT; + this.skipDegree = NO_LIMIT; this.page = null; this.capacity = defaultCapacity(); @@ -95,6 +99,9 @@ public Query(HugeType resultType, Query originQuery) { this.showHidden = false; this.showDeleting = false; + this.withProperties = true; + this.orderType = OrderType.ORDER_STRICT; + this.aggregate = null; this.showExpired = false; this.olap = false; @@ -105,10 +112,13 @@ public void copyBasic(Query query) { E.checkNotNull(query, "query"); this.offset = query.offset(); this.limit = query.limit(); + this.skipDegree = query.skipDegree(); this.page = query.page(); this.capacity = query.capacity(); this.showHidden = query.showHidden(); this.showDeleting = query.showDeleting(); + this.withProperties = query.withProperties(); + this.orderType = query.orderType(); this.aggregate = query.aggregate(); this.showExpired = query.showExpired(); this.olap = query.olap(); @@ -441,6 +451,30 @@ public void showDeleting(boolean showDeleting) { this.showDeleting = showDeleting; } + public long skipDegree() { + return this.skipDegree; + } + + public void skipDegree(long skipDegree) { + this.skipDegree = skipDegree; + } + + public boolean withProperties() { + return this.withProperties; + } + + public void withProperties(boolean withProperties) { + this.withProperties = withProperties; + } + + public OrderType orderType() { + return this.orderType; + } + + public void orderType(OrderType orderType) { + this.orderType = orderType; + } + public boolean showExpired() { return this.showExpired; } @@ -493,7 +527,8 @@ public boolean equals(Object object) { this.limit == other.limit && Objects.equals(this.page, other.page) && this.ids().equals(other.ids()) && - this.conditions().equals(other.conditions()); + this.conditions().equals(other.conditions()) && + this.withProperties == other.withProperties; } @Override @@ -504,7 +539,8 @@ public int hashCode() { Long.hashCode(this.limit) ^ Objects.hashCode(this.page) ^ this.ids().hashCode() ^ - this.conditions().hashCode(); + this.conditions().hashCode() ^ + Boolean.hashCode(this.withProperties); } @Override @@ -580,6 +616,13 @@ public static void checkForceCapacity(long count) throws LimitExceedException { } } + public enum OrderType { + // 批量接口下,返回顺序的要求 + ORDER_NONE, // 允许无序 + ORDER_WITHIN_VERTEX, // 一个点内的边不会被打断,单不同点之间为无序 + ORDER_STRICT // 保证原始的输入点顺序 + } + public enum Order { ASC, DESC diff --git a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/serializer/AbstractSerializerAdapter.java b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/serializer/AbstractSerializerAdapter.java new file mode 100644 index 0000000000..1a66ddf074 --- /dev/null +++ b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/serializer/AbstractSerializerAdapter.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.backend.query.serializer; + +import java.lang.reflect.Type; +import java.util.Map; + +import org.apache.hugegraph.backend.BackendException; + +import com.google.gson.JsonDeserializationContext; +import com.google.gson.JsonDeserializer; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.JsonPrimitive; +import com.google.gson.JsonSerializationContext; +import com.google.gson.JsonSerializer; + +// TODO: optimize by binary protocol +public abstract class AbstractSerializerAdapter implements JsonSerializer, + JsonDeserializer { + + //Note: By overriding the method to get the mapping + public abstract Map validType(); + + @Override + public T deserialize(JsonElement json, Type typeOfT, JsonDeserializationContext context) throws + JsonParseException { + JsonObject object = json.getAsJsonObject(); + String type = object.get("cls").getAsString(); + JsonElement element = object.get("el"); + try { + return context.deserialize(element, validType().get(type)); + } catch (Exception e) { + throw new BackendException("Unknown element type: " + type, e); + } + } + + /* + * Note: Currently, only the first character of the class name is taken as the key + * to reduce serialization results + * */ + @Override + public JsonElement serialize(T src, Type typeOfSrc, JsonSerializationContext context) { + JsonObject result = new JsonObject(); + Class clazz = src.getClass(); + result.add("cls", new JsonPrimitive(clazz.getSimpleName().substring(0, 1).toUpperCase())); + result.add("el", context.serialize(src, clazz)); + return result; + } +} diff --git a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/serializer/QueryAdapter.java b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/serializer/QueryAdapter.java new file mode 100644 index 0000000000..47b7c76a00 --- /dev/null +++ b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/serializer/QueryAdapter.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.backend.query.serializer; + +import java.lang.reflect.Type; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.Map; + +import org.apache.hugegraph.backend.query.Condition; +import org.apache.hugegraph.type.define.Directions; + +import com.google.common.collect.ImmutableMap; +import com.google.gson.JsonDeserializationContext; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.JsonPrimitive; +import com.google.gson.JsonSerializationContext; +import com.google.gson.reflect.TypeToken; + +public class QueryAdapter extends AbstractSerializerAdapter { + + static ImmutableMap cls = + ImmutableMap.builder() + // TODO: uncomment later + .put("N", Condition.Not.class) + .put("A", Condition.And.class) + .put("O", Condition.Or.class) + .put("S", Condition.SyspropRelation.class) + .put("U", Condition.UserpropRelation.class) + .build(); + + static boolean isPrimitive(Class clz) { + try { + return (clz == Date.class) || ((Class) clz.getField("TYPE").get(null)).isPrimitive(); + } catch (Exception e) { + return false; + } + } + + @Override + public Map validType() { + return cls; + } + + @Override + public Condition deserialize(JsonElement json, Type typeOfT, JsonDeserializationContext context) + throws JsonParseException { + Condition condition = super.deserialize(json, typeOfT, context); + if (condition instanceof Condition.Relation) { + JsonObject object = json.getAsJsonObject(); + if (object.has("el")) { + JsonElement elElement = object.get("el"); + JsonElement valueElement = elElement.getAsJsonObject().get("value"); + if (valueElement.isJsonObject()) { + String cls = valueElement.getAsJsonObject().get("cls").getAsString(); + try { + Class actualClass = Class.forName(cls); + Object obj = context.deserialize(valueElement, actualClass); + ((Condition.Relation) condition).value(obj); + } catch (ClassNotFoundException e) { + throw new JsonParseException(e.getMessage()); + } + } else if (elElement.getAsJsonObject().has("valuecls")) { + if (valueElement.isJsonArray()) { + String cls = elElement.getAsJsonObject().get("valuecls").getAsString(); + try { + Class actualClass = Class.forName(cls); + Type type = TypeToken.getParameterized(ArrayList.class, actualClass) + .getType(); + Object value = context.deserialize(valueElement, type); + ((Condition.Relation) condition).value(value); + } catch (ClassNotFoundException e) { + throw new JsonParseException(e.getMessage()); + } + } else { + String cls = elElement.getAsJsonObject().get("valuecls").getAsString(); + try { + Class actualClass = Class.forName(cls); + Object obj = context.deserialize(valueElement, actualClass); + ((Condition.Relation) condition).value(obj); + } catch (ClassNotFoundException e) { + throw new JsonParseException(e.getMessage()); + } + } + + } else if (valueElement.isJsonPrimitive() && + valueElement.getAsJsonPrimitive().isString()) { + switch ((String) ((Condition.Relation) condition).value()) { + case "OUT": + ((Condition.Relation) condition).value(Directions.OUT); + break; + case "IN": + ((Condition.Relation) condition).value(Directions.IN); + break; + default: + break; + } + } + } + } + return condition; + } + + @Override + public JsonElement serialize(Condition src, Type typeOfSrc, JsonSerializationContext context) { + JsonElement result = super.serialize(src, typeOfSrc, context); + if (src instanceof Condition.Relation) { + JsonObject object = result.getAsJsonObject(); + JsonElement valueElement = object.get("el").getAsJsonObject().get("value"); + if (valueElement.isJsonObject()) { + valueElement.getAsJsonObject() + .add("cls", + new JsonPrimitive( + ((Condition.Relation) src).value().getClass().getName())); + } else if (isPrimitive(((Condition.Relation) src).value().getClass())) { + object.get("el").getAsJsonObject() + .add("valuecls", + new JsonPrimitive( + ((Condition.Relation) src).value().getClass().getName())); + } else if (valueElement.isJsonArray()) { + if (((Condition.Relation) src).value() instanceof List) { + String valueCls = + ((List) ((Condition.Relation) src).value()).get(0).getClass().getName(); + object.get("el").getAsJsonObject().add("valuecls", new JsonPrimitive(valueCls)); + } + } + } + return result; + } +} diff --git a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/serializer/QueryIdAdapter.java b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/serializer/QueryIdAdapter.java new file mode 100644 index 0000000000..e7ebaea766 --- /dev/null +++ b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/serializer/QueryIdAdapter.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.backend.query.serializer; + +import java.lang.reflect.Type; +import java.util.Map; + +import org.apache.hugegraph.backend.id.EdgeId; +import org.apache.hugegraph.backend.id.Id; +import org.apache.hugegraph.backend.id.IdGenerator; +import org.apache.hugegraph.backend.serializer.BinaryBackendEntry; + +import com.google.common.collect.ImmutableMap; + +public class QueryIdAdapter extends AbstractSerializerAdapter { + + static ImmutableMap cls = + ImmutableMap.builder() + .put("E", EdgeId.class) + .put("S", IdGenerator.StringId.class) + .put("L", IdGenerator.LongId.class) + .put("U", IdGenerator.UuidId.class) + .put("O", IdGenerator.ObjectId.class) + .put("B", BinaryBackendEntry.BinaryId.class) + .build(); + + @Override + public Map validType() { + return cls; + } +} diff --git a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/serializer/BinaryBackendEntry.java b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/serializer/BinaryBackendEntry.java index a3b3138e2f..6f8fc21c93 100644 --- a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/serializer/BinaryBackendEntry.java +++ b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/serializer/BinaryBackendEntry.java @@ -48,6 +48,11 @@ public BinaryBackendEntry(HugeType type, byte[] bytes, boolean enablePartition) this(type, BytesBuffer.wrap(bytes).parseId(type, enablePartition)); } + // FIXME: `enablePartition` is unused here + public BinaryBackendEntry(HugeType type, byte[] bytes, boolean enablePartition, boolean isOlap) { + this(type, BytesBuffer.wrap(bytes).parseOlapId(type, isOlap)); + } + public BinaryBackendEntry(HugeType type, BinaryId id) { this.type = type; this.id = id; @@ -169,7 +174,10 @@ public boolean mergeable(BackendEntry other) { return false; } if (!this.id().equals(other.id())) { - return false; + // 兼容从ap查回的数据, vertex id + if (!this.id().origin().equals(other.originId())) { + return false; + } } this.columns(other.columns()); return true; @@ -199,7 +207,7 @@ public int hashCode() { return this.id().hashCode() ^ this.columns.size(); } - protected static final class BinaryId implements Id { + public static final class BinaryId implements Id { private final byte[] bytes; private final Id id; diff --git a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/serializer/BytesBuffer.java b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/serializer/BytesBuffer.java index 7cc15188d7..f293dd2873 100644 --- a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/serializer/BytesBuffer.java +++ b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/serializer/BytesBuffer.java @@ -597,6 +597,10 @@ public void writeProperty(DataType dataType, Object value) { } } + public static byte getType(int value) { + return (byte) (value & 0x3f); + } + public Object readProperty(DataType dataType) { switch (dataType) { case BOOLEAN: @@ -752,11 +756,11 @@ public BytesBuffer writeIndexId(Id id, HugeType type, boolean withEnding) { public BinaryId readIndexId(HugeType type) { byte[] id; if (type.isRange4Index()) { - // IndexLabel 4 bytes + fieldValue 4 bytes - id = this.read(8); + // HugeTypeCode 1 byte + IndexLabel 4 bytes + fieldValue 4 bytes + id = this.read(9); } else if (type.isRange8Index()) { - // IndexLabel 4 bytes + fieldValue 8 bytes - id = this.read(12); + // HugeTypeCode 1 byte + IndexLabel 4 bytes + fieldValue 8 bytes + id = this.read(13); } else { assert type.isStringIndex(); id = this.readBytesWithEnding(); @@ -790,6 +794,34 @@ public BinaryId parseId(HugeType type, boolean enablePartition) { return new BinaryId(bytes, id); } + /** + * 解析 olap id + * @param type + * @param isOlap + * @return + */ + public BinaryId parseOlapId(HugeType type, boolean isOlap) { + if (type.isIndex()) { + return this.readIndexId(type); + } + // Parse id from bytes + int start = this.buffer.position(); + /** + * OLAP + * {PropertyKey}{VertexId} + */ + if (isOlap) { + // 先 read olap property id + Id pkId = this.readId(); + } + Id id = this.readId(); + int end = this.buffer.position(); + int len = end - start; + byte[] bytes = new byte[len]; + System.arraycopy(this.array(), start, bytes, 0, len); + return new BinaryId(bytes, id); + } + private void writeNumber(long val) { /* * 8 kinds of number, 2 ~ 9 bytes number: diff --git a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/AbstractBackendStore.java b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/AbstractBackendStore.java index 53ea9b4e4f..2ef9a6db73 100644 --- a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/AbstractBackendStore.java +++ b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/AbstractBackendStore.java @@ -80,6 +80,13 @@ public String toString() { protected abstract BackendTable table(HugeType type); + protected static HugeType convertTaskOrServerToVertex(HugeType type) { + if (HugeType.TASK.equals(type) || HugeType.SERVER.equals(type)) { + return HugeType.VERTEX; + } + return type; + } + // NOTE: Need to support passing null protected abstract Session session(HugeType type); } diff --git a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendFeatures.java b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendFeatures.java index 22f1cbff64..90001cc1e7 100644 --- a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendFeatures.java +++ b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendFeatures.java @@ -31,6 +31,8 @@ default boolean supportsSnapshot() { return false; } + default boolean supportsTaskAndServerVertex() { return false; } + boolean supportsScanToken(); boolean supportsScanKeyPrefix(); diff --git a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendMutation.java b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendMutation.java index 64c4dfb6c7..02b3429006 100644 --- a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendMutation.java +++ b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendMutation.java @@ -350,4 +350,8 @@ public void clear() { this.mutations.clear(); } } + + public Map>> mutations() { + return this.updates.mutations; + } } diff --git a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendStore.java b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendStore.java index a136aab1a3..5c05e37c5a 100644 --- a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendStore.java +++ b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendStore.java @@ -166,6 +166,10 @@ default void removeOlapTable(Id pkId) { throw new UnsupportedOperationException("BackendStore.removeOlapTable()"); } + default boolean existOlapTable(Id pkId) { + throw new UnsupportedOperationException("BackendStore.existOlapTable()"); + } + default Map createSnapshot(String snapshotDir) { throw new UnsupportedOperationException("createSnapshot"); } diff --git a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendTable.java b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendTable.java index 8715dc0e12..a9ee4b5c72 100644 --- a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendTable.java +++ b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendTable.java @@ -23,6 +23,7 @@ import java.util.Iterator; import java.util.List; +import org.apache.commons.lang3.NotImplementedException; import org.apache.hugegraph.backend.query.ConditionQuery; import org.apache.hugegraph.backend.query.Query; import org.apache.hugegraph.backend.serializer.BytesBuffer; @@ -136,6 +137,10 @@ public static final String joinTableName(String prefix, String table) { public abstract Iterator query(Session session, Query query); + public Iterator queryOlap(Session session, Query query) { + throw new NotImplementedException(); + } + public abstract Number queryNumber(Session session, Query query); public abstract boolean queryExist(Session session, Entry entry); diff --git a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/config/CoreOptions.java b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/config/CoreOptions.java index 9b9bc5ca32..f17853d4a5 100644 --- a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/config/CoreOptions.java +++ b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/config/CoreOptions.java @@ -297,6 +297,14 @@ public static synchronized CoreOptions instance() { 0 ); + public static final ConfigOption TASK_SCHEDULE_PERIOD = + new ConfigOption<>( + "task.schedule_period", + "Period time when scheduler to schedule task", + rangeInt(0L, Long.MAX_VALUE), + 10L + ); + public static final ConfigOption TASK_WAIT_TIMEOUT = new ConfigOption<>( "task.wait_timeout", @@ -331,6 +339,14 @@ public static synchronized CoreOptions instance() { 1 ); + public static final ConfigOption SCHEDULER_TYPE = + new ConfigOption<>( + "task.scheduler_type", + "The type of scheduler used in distribution system.", + allowValues("local", "distributed"), + "local" + ); + public static final ConfigOption TASK_SYNC_DELETION = new ConfigOption<>( "task.sync_deletion", @@ -339,6 +355,14 @@ public static synchronized CoreOptions instance() { false ); + public static final ConfigOption TASK_RETRY = + new ConfigOption<>( + "task.retry", + "Task retry times.", + rangeInt(0, 3), + 0 + ); + public static final ConfigOption STORE_CONN_DETECT_INTERVAL = new ConfigOption<>( "store.connection_detect_interval", @@ -650,4 +674,11 @@ public static synchronized CoreOptions instance() { CollectionType::valueOf, "EC" ); + + public static final ConfigOption PD_PEERS = new ConfigOption<>( + "pd.peers", + "The addresses of pd nodes, separated with commas.", + disallowEmpty(), + "127.0.0.1:8686" + ); } diff --git a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/type/HugeTableType.java b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/type/HugeTableType.java new file mode 100644 index 0000000000..a3edf746ff --- /dev/null +++ b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/type/HugeTableType.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.type; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.hugegraph.type.define.SerialEnum; + +public enum HugeTableType implements SerialEnum { + + UNKNOWN(0, "UNKNOWN"), + + /* Schema types */ + VERTEX(1, "V"), // 顶点表 + OUT_EDGE(2, "OE"), // 出边表 + IN_EDGE(3, "IE"), // 入边表 + ALL_INDEX_TABLE(4, "INDEX"), // 索引表 + TASK_INFO_TABLE(5, "TASK"), // 任务信息表 + OLAP_TABLE(6, "OLAP"), // OLAP 表 + SERVER_INFO_TABLE(7, "SERVER"); // SERVER 信息表 + + private static final Map ALL_NAME = new HashMap<>(); + + static { + SerialEnum.register(HugeTableType.class); + for (HugeTableType type : values()) { + ALL_NAME.put(type.name, type); + } + } + + private byte type = 0; + private String name; + + HugeTableType(int type, String name) { + assert type < 256; + this.type = (byte) type; + this.name = name; + } + + @Override + public byte code() { + return this.type; + } + + public String string() { + return this.name; + } +} diff --git a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/type/HugeType.java b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/type/HugeType.java index aae2a07aec..c7d9fcea51 100644 --- a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/type/HugeType.java +++ b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/type/HugeType.java @@ -65,7 +65,8 @@ public enum HugeType implements SerialEnum { SHARD_INDEX(175, "HI"), UNIQUE_INDEX(178, "UI"), - TASK(180, "T"), + TASK(180, "TASK"), + SERVER(181, "SERVER"), // System schema SYS_SCHEMA(250, "SS"), @@ -115,7 +116,7 @@ public boolean isGraph() { } public boolean isVertex() { - return this == HugeType.VERTEX; + return this == HugeType.VERTEX || this == HugeType.TASK || this == HugeType.SERVER; } public boolean isEdge() { @@ -192,4 +193,8 @@ public static HugeType fromString(String type) { public static HugeType fromCode(byte code) { return SerialEnum.fromCode(HugeType.class, code); } + + public boolean isLabelIndex() { + return this == VERTEX_LABEL_INDEX || this == EDGE_LABEL_INDEX; + } } From 28268897fa3b94452f0539ba074325c06e9fa311 Mon Sep 17 00:00:00 2001 From: VGalaxies Date: Fri, 26 Apr 2024 14:45:23 +0800 Subject: [PATCH 6/8] regenerate known dependencies --- .../scripts/dependency/known-dependencies.txt | 37 ++++++++++++++----- 1 file changed, 28 insertions(+), 9 deletions(-) diff --git a/hugegraph-server/hugegraph-dist/scripts/dependency/known-dependencies.txt b/hugegraph-server/hugegraph-dist/scripts/dependency/known-dependencies.txt index c1319ffea8..3d2fe0bb3a 100644 --- a/hugegraph-server/hugegraph-dist/scripts/dependency/known-dependencies.txt +++ b/hugegraph-server/hugegraph-dist/scripts/dependency/known-dependencies.txt @@ -1,5 +1,3 @@ -HdrHistogram-2.1.9.jar -ST4-4.0.8.jar accessors-smart-1.2.jar airline-0.8.jar animal-sniffer-annotations-1.19.jar @@ -18,6 +16,7 @@ asm-tree-5.0.3.jar asm-util-5.0.3.jar ast-9.0-9.0.20190305.jar audience-annotations-0.5.0.jar +auto-service-annotations-1.0.jar bolt-1.6.4.jar byte-buddy-1.10.5.jar byte-buddy-agent-1.10.5.jar @@ -42,6 +41,7 @@ commons-codec-1.13.jar commons-codec-1.15.jar commons-codec-1.9.jar commons-collections-3.2.2.jar +commons-collections4-4.4.jar commons-compress-1.21.jar commons-configuration-1.10.jar commons-configuration2-2.8.0.jar @@ -62,6 +62,7 @@ error_prone_annotations-2.10.0.jar error_prone_annotations-2.3.4.jar exp4j-0.4.8.jar expressions-9.0-9.0.20190305.jar +failsafe-2.4.1.jar failureaccess-1.0.1.jar fastparse_2.12-2.0.4.jar fastutil-8.5.9.jar @@ -88,13 +89,18 @@ groovy-jsr223-2.5.14-indy.jar groovy-swing-2.5.14.jar groovy-templates-2.5.14.jar groovy-xml-2.5.14.jar +grpc-api-1.39.0.jar grpc-api-1.47.0.jar +grpc-context-1.39.0.jar grpc-context-1.47.0.jar +grpc-core-1.39.0.jar grpc-core-1.47.0.jar +grpc-grpclb-1.39.0.jar +grpc-netty-shaded-1.39.0.jar grpc-netty-shaded-1.47.0.jar -grpc-protobuf-1.28.0.jar -grpc-protobuf-lite-1.28.0.jar -gson-2.9.0.jar +grpc-protobuf-1.39.0.jar +grpc-protobuf-lite-1.39.0.jar +gson-2.8.9.jar guava-27.0-jre.jar guava-30.0-jre.jar guava-31.0.1-android.jar @@ -102,7 +108,14 @@ hamcrest-2.2.jar hamcrest-core-1.3.jar hanlp-portable-1.8.3.jar hbase-shaded-endpoint-2.0.6.jar +HdrHistogram-2.1.9.jar hessian-3.3.6.jar +hg-pd-client-1.3.0.jar +hg-pd-common-1.3.0.jar +hg-pd-grpc-1.3.0.jar +hg-store-client-1.3.0.jar +hg-store-common-1.3.0.jar +hg-store-grpc-1.3.0.jar high-scale-lib-1.0.6.jar hk2-api-3.0.1.jar hk2-locator-3.0.1.jar @@ -145,6 +158,7 @@ javapoet-1.8.0.jar javassist-3.21.0-GA.jar javatuples-1.2.jar javax.activation-api-1.2.0.jar +javax.annotation-api-1.3.2.jar javax.inject-1.jar javax.json-1.0.jar jaxb-api-2.3.1.jar @@ -173,8 +187,10 @@ jersey-media-json-jackson-3.0.3.jar jersey-server-3.0.3.jar jersey-test-framework-core-3.0.3.jar jersey-test-framework-provider-grizzly2-3.0.3.jar -jffi-1.2.16-native.jar +jetcd-common-0.5.9.jar +jetcd-core-0.5.9.jar jffi-1.2.16.jar +jffi-1.2.16-native.jar jflex-1.8.2.jar jieba-analysis-1.0.2.jar jjwt-api-0.11.5.jar @@ -214,6 +230,7 @@ log4j-api-2.17.1.jar log4j-core-2.17.1.jar log4j-slf4j-impl-2.17.1.jar logging-interceptor-4.10.0.jar +lombok-1.18.20.jar lookout-api-1.4.1.jar lucene-analyzers-common-8.11.2.jar lucene-analyzers-smartcn-8.11.2.jar @@ -254,11 +271,12 @@ parser-9.0-9.0.20190305.jar perfmark-api-0.25.0.jar picocli-4.3.2.jar postgresql-42.4.3.jar -proto-google-common-protos-1.17.0.jar protobuf-java-3.21.7.jar +protobuf-java-util-3.17.2.jar +proto-google-common-protos-2.0.1.jar psjava-0.1.19.jar -reporter-config-base-3.0.3.jar reporter-config3-3.0.3.jar +reporter-config-base-3.0.3.jar rewriting-9.0-9.0.20190305.jar rocksdbjni-7.2.2.jar scala-java8-compat_2.12-0.8.0.jar @@ -271,9 +289,9 @@ sjk-cli-0.22.jar sjk-core-0.14.jar sjk-core-0.22.jar sjk-hflame-0.22.jar -sjk-jfr-standalone-0.7.jar sjk-jfr5-0.5.jar sjk-jfr6-0.7.jar +sjk-jfr-standalone-0.7.jar sjk-json-0.14.jar sjk-json-0.22.jar sjk-nps-0.9.jar @@ -288,6 +306,7 @@ snowball-stemmer-1.3.0.581.1.jar sofa-common-tools-1.0.12.jar sofa-rpc-all-5.7.6.jar sourcecode_2.12-0.1.4.jar +ST4-4.0.8.jar stream-2.5.2.jar swagger-annotations-1.5.18.jar swagger-annotations-jakarta-2.2.18.jar From 41ea80a11745d4ac436017a9940b9a0821b2e0d3 Mon Sep 17 00:00:00 2001 From: VGalaxies Date: Fri, 26 Apr 2024 15:46:42 +0800 Subject: [PATCH 7/8] reset --- .../hugegraph/backend/serializer/BytesBuffer.java | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/serializer/BytesBuffer.java b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/serializer/BytesBuffer.java index f293dd2873..32b5c04eef 100644 --- a/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/serializer/BytesBuffer.java +++ b/hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/serializer/BytesBuffer.java @@ -597,10 +597,6 @@ public void writeProperty(DataType dataType, Object value) { } } - public static byte getType(int value) { - return (byte) (value & 0x3f); - } - public Object readProperty(DataType dataType) { switch (dataType) { case BOOLEAN: @@ -756,11 +752,11 @@ public BytesBuffer writeIndexId(Id id, HugeType type, boolean withEnding) { public BinaryId readIndexId(HugeType type) { byte[] id; if (type.isRange4Index()) { - // HugeTypeCode 1 byte + IndexLabel 4 bytes + fieldValue 4 bytes - id = this.read(9); + // IndexLabel 4 bytes + fieldValue 4 bytes + id = this.read(8); } else if (type.isRange8Index()) { - // HugeTypeCode 1 byte + IndexLabel 4 bytes + fieldValue 8 bytes - id = this.read(13); + // IndexLabel 4 bytes + fieldValue 8 bytes + id = this.read(12); } else { assert type.isStringIndex(); id = this.readBytesWithEnding(); From a441f4e910be360e1f3047c34e30e6799ab4d64c Mon Sep 17 00:00:00 2001 From: imbajin Date: Mon, 6 May 2024 15:52:36 +0800 Subject: [PATCH 8/8] basic clean & desc --- .../store/hstore/HstoreSessionsImpl.java | 66 ++++++++----------- .../backend/store/hstore/HstoreTable.java | 25 +++---- 2 files changed, 42 insertions(+), 49 deletions(-) diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreSessionsImpl.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreSessionsImpl.java index bf45b6800f..f8a91f0e6e 100755 --- a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreSessionsImpl.java +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreSessionsImpl.java @@ -58,6 +58,14 @@ import org.apache.hugegraph.util.E; import org.apache.hugegraph.util.StringEncoding; +/** + * This class is an implementation of HstoreSessions, which is used to manage sessions for HStore + * backend (Raft + RocksDB). + * It provides methods to create, drop, and check the existence of tables, as well as to perform + * CRUD operations on the tables. + * It also provides methods to manage transactions, such as commit, rollback, and check if there + * are any changes in the session. + */ public class HstoreSessionsImpl extends HstoreSessions { private static final Set infoInitializedGraph = @@ -72,8 +80,7 @@ public class HstoreSessionsImpl extends HstoreSessions { private final AtomicInteger refCount; private final String graphName; - public HstoreSessionsImpl(HugeConfig config, String database, - String store) { + public HstoreSessionsImpl(HugeConfig config, String database, String store) { super(config, database, store); this.config = config; this.graphName = database + "/" + store; @@ -103,12 +110,10 @@ private void initStoreNode(HugeConfig config) { if (!initializedNode) { synchronized (this) { if (!initializedNode) { - PDConfig pdConfig = - PDConfig.of(config.get(CoreOptions.PD_PEERS)) - .setEnableCache(true); + PDConfig pdConfig = PDConfig.of(config.get(CoreOptions.PD_PEERS)) + .setEnableCache(true); defaultPdClient = PDClient.create(pdConfig); - hgStoreClient = - HgStoreClient.create(defaultPdClient); + hgStoreClient = HgStoreClient.create(defaultPdClient); initializedNode = Boolean.TRUE; } } @@ -120,16 +125,12 @@ public void open() throws Exception { if (!infoInitializedGraph.contains(this.graphName)) { synchronized (infoInitializedGraph) { if (!infoInitializedGraph.contains(this.graphName)) { - Integer partitionCount = - this.config.get(HstoreOptions.PARTITION_COUNT); + Integer partitionCount = this.config.get(HstoreOptions.PARTITION_COUNT); Assert.assertTrue("The value of hstore.partition_count" + - " cannot be less than 0.", - partitionCount > -1); + " cannot be less than 0.", partitionCount > -1); defaultPdClient.setGraph(Metapb.Graph.newBuilder() - .setGraphName( - this.graphName) - .setPartitionCount( - partitionCount) + .setGraphName(this.graphName) + .setPartitionCount(partitionCount) .build()); infoInitializedGraph.add(this.graphName); } @@ -179,7 +180,7 @@ public void clear() { this.session.deleteGraph(); try { hgStoreClient.getPdClient().delGraph(this.graphName); - } catch (PDException e) { + } catch (PDException ignored) { } } @@ -431,11 +432,6 @@ public void close() { this.opened = false; } - @Override - public boolean closed() { - return !this.opened; - } - @Override public void reset() { if (this.changedSize != 0) { @@ -585,8 +581,7 @@ public BackendColumnIterator scan(String table) { public BackendColumnIterator scan(String table, byte[] conditionQueryToByte) { assert !this.hasChanges(); - HgKvIterator results = - this.graph.scanIterator(table, conditionQueryToByte); + HgKvIterator results = this.graph.scanIterator(table, conditionQueryToByte); return new ColumnIterator<>(table, results); } @@ -594,10 +589,8 @@ public BackendColumnIterator scan(String table, public BackendColumnIterator scan(String table, byte[] ownerKey, byte[] prefix) { assert !this.hasChanges(); - HgKvIterator result = this.graph.scanIterator(table, - HgOwnerKey.of( - ownerKey, - prefix)); + HgKvIterator result = this.graph.scanIterator(table, HgOwnerKey.of(ownerKey, + prefix)); return new ColumnIterator<>(table, result); } @@ -671,8 +664,7 @@ public boolean hasNext() { @Override public BackendColumnIterator next() { - return new ColumnIterator(table, - scanIterators.next()); + return new ColumnIterator(table, scanIterators.next()); } }; } @@ -683,15 +675,13 @@ public BackendColumnIterator scan(String table, byte[] ownerKeyFrom, byte[] keyFrom, byte[] keyTo, int scanType) { assert !this.hasChanges(); - HgKvIterator result = this.graph.scanIterator(table, HgOwnerKey.of( - ownerKeyFrom, keyFrom), - HgOwnerKey.of( - ownerKeyTo, - keyTo), 0, - scanType, - null); - return new ColumnIterator<>(table, result, keyFrom, - keyTo, scanType); + HgKvIterator result = this.graph.scanIterator(table, + HgOwnerKey.of(ownerKeyFrom, + keyFrom), + HgOwnerKey.of(ownerKeyTo, + keyTo), + 0, scanType, null); + return new ColumnIterator<>(table, result, keyFrom, keyTo, scanType); } @Override diff --git a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTable.java b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTable.java index de9f745863..894964aa46 100755 --- a/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTable.java +++ b/hugegraph-server/hugegraph-hstore/src/main/java/org/apache/hugegraph/backend/store/hstore/HstoreTable.java @@ -64,17 +64,20 @@ import org.apache.tinkerpop.gremlin.util.iterator.IteratorUtils; import org.slf4j.Logger; +/** + * This class provides the implementation for the HStore table in the backend store. + * It provides methods for querying, inserting, deleting, and updating entries in the table. + * It also provides methods for handling metadata and managing shards. + */ public class HstoreTable extends BackendTable { private static final Logger LOG = Log.logger(HstoreStore.class); private final HstoreShardSplitter shardSpliter; - Function ownerDelegate = (entry) -> getOwner(entry); - Function ownerByIdDelegate = (id) -> getOwnerId(id); - BiFunction ownerByQueryDelegate = - (type, id) -> getOwnerId(type, id); - Supplier ownerScanDelegate = - () -> HgStoreClientConst.ALL_PARTITION_OWNER; + Function ownerDelegate = this::getOwner; + Function ownerByIdDelegate = this::getOwnerId; + BiFunction ownerByQueryDelegate = this::getOwnerId; + Supplier ownerScanDelegate = () -> HgStoreClientConst.ALL_PARTITION_OWNER; public HstoreTable(String database, String table) { super(String.format("%s+%s", database, table)); @@ -187,7 +190,7 @@ public byte[] getInsertEdgeOwner(BackendEntry entry) { } public byte[] getInsertOwner(BackendEntry entry) { - // 为适应label索引散列,不聚焦在一个分区 + // 为适应 label 索引散列,不聚焦在一个分区 if (entry.type().isLabelIndex() && (entry.columns().size() == 1)) { Iterator iterator = entry.columns().iterator(); while (iterator.hasNext()) { @@ -201,7 +204,7 @@ public byte[] getInsertOwner(BackendEntry entry) { } /** - * 返回Id所属的点ID + * 返回 Id 所属的点 ID * * @param id * @return @@ -218,7 +221,7 @@ protected byte[] getOwnerId(Id id) { } /** - * 返回Id所属的点ID + * 返回 Id 所属的点 ID * * @param id * @return @@ -422,7 +425,7 @@ protected BackendColumnIterator queryBy(Session session, Query query) { // Query by id if (query.conditions().isEmpty()) { assert !query.ids().isEmpty(); - // 单个id查询 走get接口查询 + // 单个 id 查询 走 get 接口查询 if (query.ids().size() == 1) { return this.getById(session, query.ids().iterator().next()); } @@ -500,7 +503,7 @@ protected BackendColumnIterator queryByPrefix(Session session, byte[] ownerKeyTo = this.ownerByQueryDelegate.apply(query.resultType(), query.prefix()); byte[] keyFrom = query.start().asBytes(); - // 前缀分页查询中, start为最初的位置。因为在不同的分区 都是从start位置开始查询 + // 前缀分页查询中,start 为最初的位置。因为在不同的分区 都是从 start 位置开始查询 if (query.paging()) { keyFrom = query.prefix().asBytes(); }