- * This class exists to configure JUB programmatically and avoid the annoying
- * behavior of the system-property-configured XMLConsumer.
- */
-public class JUnitBenchmarkProvider {
-
- public static final String ENV_EFFORT_GENERATE = "JUB_EFFORT_GENERATE";
- public static final String ENV_EFFORT_FILE = "JUB_EFFORT_FILE";
- public static final String ENV_DEFAULT_ROUNDS = "JUB_DEFAULT_ROUNDS";
- public static final String ENV_WARMUP_ROUNDS = "JUB_WARMUP_ROUNDS";
- public static final String ENV_TARGET_RUNTIME_MS = "JUB_TARGET_RUNTIME_MS";
-
- public static final String DEFAULT_EFFORT_FILE = "../janusgraph-test/data/jub-effort.txt";
- public static final long TARGET_RUNTIME_MS;
- public static final int DEFAULT_ROUNDS;
- public static final int WARMUP_ROUNDS;
-
- private static final Map
- * The returned rule will write results to an XML file named
- * jub.(abs(current nanotime)).xml and to the console.
- *
- * This method concentrates our JUB configuration in a single code block and
- * gives us programmatic flexibility that exceeds the limited flexibility of
- * configuring JUB through its hardcoded global system properties. It also
- * converts the IOException that XMLConsumer's constructor can throw into a
- * RuntimeException. In test classes, this conversion is the difference
- * between:
- *
- *
- * {@literal @}Rule
- * public TestRule benchmark; // Can't initialize here b/c of IOException
- * ...
- * public TestClassConstructor() throws IOException {
- * benchmark = new BenchmarkRule(new XMLConsumer(...));
- * }
- *
- * // or, if there are extant subclass constructors we want to leave alone...
- *
- * public TestClassConstructor() {
- * try {
- * benchmark = new BenchmarkRule(new XMLConsumer(...));
- * } catch (IOException e) {
- * throw new RuntimeException(e);
- * }
- * }
- *
- *
- * versus, with this method,
- *
- *
- * {@literal @}Rule
- * public TestRule benchmark = JUnitBenchmarkProvider.get(); // done
- *
- *
- * @return a BenchmarkRule ready for use with the JUnit @Rule annotation
- */
- public static TestRule get() {
- return new AdjustableRoundsBenchmarkRule(efforts, getConsumers());
- }
-
- /**
- * Like {@link #get()}, except extra JUB Results consumers can be attached
- * to the returned rule.
- *
- * @param additionalConsumers
- * extra JUB results consumers to apply in the returned rule
- * object
- * @return a BenchmarkRule ready for use with the JUnit @Rule annotation
- */
- public static TestRule get(IResultsConsumer... additionalConsumers) {
- return new AdjustableRoundsBenchmarkRule(efforts, getConsumers(additionalConsumers));
- }
-
- /**
- * Get a filename from {@link #ENV_EFFORT_FILE}, then open the file and read
- * method execution multipliers from it. Such a file can be produced using
- * {@link TimeScaleConsumer}.
- *
- * @return map of classname + '.' + methodname to the number of iterations
- * needed to run for at least {@link #TARGET_RUNTIME_MS}
- */
- private static Map
- * The metrics visible through the object returned by this method may - * also change their values between reads. In other words, this is not - * necessarily an immutable snapshot. - *
- * If the index job has failed and the implementation is capable of
- * quickly detecting that, then the implementation should throw an
- * {@code ExecutionException}. Returning metrics in case of failure is
- * acceptable, but throwing an exception is preferred.
- *
- * @return metrics for a potentially still-running job
- * @throws ExecutionException if the index job threw an exception
- */
- ScanMetrics getIntermediateResult() throws ExecutionException;
- }
-
/*
##################### CONSISTENCY SETTING ##########################
*/
@@ -310,6 +282,21 @@ interface IndexJobFuture extends Future
- *
+ * The metrics visible through the object returned by this method may
+ * also change their values between reads. In other words, this is not
+ * necessarily an immutable snapshot.
+ *
+ * If the job has failed and the implementation is capable of
+ * quickly detecting that, then the implementation should throw an
+ * {@code ExecutionException}. Returning metrics in case of failure is
+ * acceptable, but throwing an exception is preferred.
+ *
+ * @return metrics for a potentially still-running job
+ * @throws ExecutionException if the job threw an exception
+ */
+ ScanMetrics getIntermediateResult() throws ExecutionException;
+}
+
diff --git a/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/StandardScanner.java b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/StandardScanner.java
index 1c576549f0..9499294cfd 100644
--- a/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/StandardScanner.java
+++ b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/StandardScanner.java
@@ -16,7 +16,6 @@
import com.google.common.base.Preconditions;
import org.apache.commons.lang3.StringUtils;
-import org.janusgraph.core.schema.JanusGraphManagement;
import org.janusgraph.diskstorage.BackendException;
import org.janusgraph.diskstorage.configuration.Configuration;
import org.janusgraph.diskstorage.configuration.MergedConfiguration;
@@ -85,7 +84,7 @@ private void addJob(Object jobId, StandardScannerExecutor executor) {
Preconditions.checkArgument(runningJobs.putIfAbsent(jobId, executor) == null,"Another job with the same id is already running: %s",jobId);
}
- public JanusGraphManagement.IndexJobFuture getRunningJob(Object jobId) {
+ public ScanJobFuture getRunningJob(Object jobId) {
return runningJobs.get(jobId);
}
@@ -172,7 +171,7 @@ public Builder setFinishJob(Consumer
- * Given a set of vertices, one may be interested in all edges that are contained in the subgraph spanned
- * by those vertices.
- *
- * @author Matthias Bröcheler (me@matthiasb.com);
- */
-@Deprecated
-public class AllEdgesIterable {
-
- private AllEdgesIterable() {
- }
-
- /**
- * Returns an iterable over all edges incident on the vertices returned by the given Iterable over vertices.
- *
- * Note that this method assumes that the given Iterable will return all vertices in the connected component,
- * otherwise the behavior of this method is undefined.
- *
- * @param vertices Iterable over a set of vertices defining a connected component.
- * @return Iterable over all edges contained in this component.
- */
- public static Iterable
- * This method will return all edges whose end points are contained in the given set of vertices.
- *
- * @param vertices Set of vertices
- * @return All edges contained in the subgraph spanned by the set of vertices.
- */
- public static Iterable
- * Given a set of vertices, one may be interested in all edges that are contained in the subgraph spanned
- * by those vertices. This iterator will return these edges.
- *
- * @author Matthias Bröcheler (me@matthiasb.com);
- */
-@Deprecated
-public class AllEdgesIterator implements Iterator
- * Note that this method assumes that the given Iterable will return all vertices in the connected component,
- * otherwise the behavior of this method is undefined.
- *
- * @param vertexIterator Iterator over a set of vertices defining a connected component.
- */
- public AllEdgesIterator(Iterator extends Vertex> vertexIterator) {
- this.vertexIterator = vertexIterator;
- this.vertices = null;
- next = findNext();
- }
-
- /**
- * Returns an iterator over all edges contained in the subgraph spanned by the given vertices.
- *
- * This method will return all edges whose end points are contained in the given set of vertices.
- *
- * @param vertices Set of vertices
- */
- public AllEdgesIterator(Set extends Vertex> vertices) {
- this.vertexIterator = vertices.iterator();
- this.vertices = vertices;
- next = findNext();
- }
-
- private Edge findNext() {
- JanusGraphEdge rel = null;
- while (rel == null) {
- if (currentEdges.hasNext()) {
- rel = (JanusGraphEdge)currentEdges.next();
- if (vertices != null && !vertices.contains(rel.vertex(Direction.IN)))
- rel = null;
- } else {
- if (vertexIterator.hasNext()) {
- Vertex nextVertex = vertexIterator.next();
- currentEdges = nextVertex.edges(Direction.OUT);
- } else break;
- }
- }
- return rel;
- }
-
- @Override
- public boolean hasNext() {
- return next != null;
- }
-
- @Override
- public Edge next() {
- if (next == null) throw new NoSuchElementException();
- Edge current = next;
- next = findNext();
- return current;
- }
-
- /**
- * Removing edges is not supported!
- *
- * @throws UnsupportedOperationException if invoked
- */
- @Override
- public void remove() {
- throw new UnsupportedOperationException("Removals are not supported");
- }
-
-}
diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/util/ConcurrentLRUCache.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/util/ConcurrentLRUCache.java
deleted file mode 100644
index 04c76bcda1..0000000000
--- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/util/ConcurrentLRUCache.java
+++ /dev/null
@@ -1,662 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY LongIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.janusgraph.graphdb.util;
-
-import org.cliffc.high_scale_lib.NonBlockingHashMapLong;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.lang.ref.WeakReference;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.TreeSet;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReentrantLock;
-
-/**
- * A LRU cache implementation based upon ConcurrentHashMap and other techniques to reduce
- * contention and synchronization overhead to utilize multiple CPU cores more effectively.
- *
- * Note that the implementation does not follow a true LRU (least-recently-used) eviction
- * strategy. Instead it strives to remove least recently used items but when the initial
- * cleanup does not remove enough items to reach the 'acceptableWaterMark' limit, it can
- * remove more items forcefully regardless of access order.
- *
- * ADDED COMMENT:
- * This class has been copied from the Apache Solr project (see license above).
- * New method has been added "putIfAbsent" which has the same behaviour as normal CHM.putIfAbsent
- * but cache maintenance operations are only done in context of a winner thread, in other words,
- * whoever puts absent item to the map would run cache maintenance ops, everybody else would be just
- * newly added item returned.
- *
- * @since solr 1.4
- */
-@Deprecated
-public class ConcurrentLRUCache
- * It is done in two stages. In the first stage, least recently used items are evicted.
- * If, after the first stage, the cache size is still greater than 'acceptableSize'
- * config parameter, the second stage takes over.
- *
- * The second stage is more intensive and tries to bring down the cache size
- * to the 'lowerWaterMark' config parameter.
- */
- private void markAndSweep() {
- // if we want to keep at least 1000 entries, then timestamps of
- // current through current-1000 are guaranteed not to be the oldest (but that does
- // not mean there are 1000 entries in that group... it's actually anywhere between
- // 1 and 1000).
- // Also, if we want to remove 500 entries, then
- // oldestEntry through oldestEntry+500 are guaranteed to be
- // removed (however many there are there).
-
- if (!markAndSweepLock.tryLock()) return;
- try {
- long oldestEntry = this.oldestEntry;
- isCleaning = true;
- this.oldestEntry = oldestEntry; // volatile write to make isCleaning visible
-
- long timeCurrent = stats.accessCounter.get();
- int sz = stats.size.get();
-
- int numRemoved = 0;
- int numLongept = 0;
- long newestEntry = timeCurrent;
- long newNewestEntry = -1;
- long newOldestEntry = Long.MAX_VALUE;
-
- int wantToLongeep = lowerWaterMark;
- int wantToRemove = sz - lowerWaterMark;
-
- @SuppressWarnings("unchecked") // generic array's are annoying
- CacheEntry
- * This uses a TreeSet to collect the 'n' oldest items ordered by ascending last access time
- * and returns a LinkedHashMap containing 'n' or less than 'n' entries.
- *
- * @param n the number of oldest items needed
- * @return a LinkedHashMap containing 'n' or less than 'n' entries
- */
- public Map NOTE: This class will pre-allocate a full array of
- * length {
-
- private static final Logger log = LoggerFactory.getLogger(JanusGraphPSerializer.class);
- private final SerializerShim pSerializerShim;
-
- public DeprecatedJanusGraphPSerializer(SerializerShim pSerializerShim) {
- this.pSerializerShim = pSerializerShim;
- }
-
- @Override
- public aClass) {
- final String predicate = input.readString();
- final boolean isCollection = input.readByte() == (byte) 0;
- final Object value;
- if (isCollection) {
- value = new ArrayList();
- final int size = input.readInt();
- for (int ix = 0; ix < size; ix++) {
- ((List) value).add(kryo.readClassAndObject(input));
- }
- } else {
- value = kryo.readClassAndObject(input);
- }
-
- try {
- return createPredicateWithValue(predicate, value);
- } catch (final Exception e) {
- log.info("Couldn't deserialize class: " + aClass + ", predicate: " + predicate + ", isCollection: "
- + isCollection + ",value: " + value, e);
- throw new IllegalStateException(e.getMessage(), e);
- }
- }
-
- public static P createPredicateWithValue(String predicate, Object value) throws IllegalAccessException, InvocationTargetException, NoSuchMethodException {
- if (JanusGraphPSerializer.checkForJanusGraphPredicate(predicate)){
- return JanusGraphPSerializer.createPredicateWithValue(predicate, value);
- }
- if (!predicate.equals("and") && !predicate.equals("or")) {
- if (value instanceof Collection) {
- switch (predicate) {
- case "between":
- return P.between(((List) value).get(0), ((List) value).get(1));
- case "inside":
- return P.inside(((List) value).get(0), ((List) value).get(1));
- case "outside":
- return P.outside(((List) value).get(0), ((List) value).get(1));
- case "within":
- return P.within((Collection) value);
- default:
- return predicate.equals("without") ? P.without((Collection) value) : (P) P.class.getMethod(predicate, Collection.class).invoke(null, value);
- }
- } else {
- return (P) P.class.getMethod(predicate, Object.class).invoke(null, value);
- }
- } else {
- return (P) (predicate.equals("and") ? new AndP((List) value) : new OrP((List) value));
- }
- }
-}
diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistry.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistry.java
index ce44a0d9f9..b4c841edae 100644
--- a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistry.java
+++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistry.java
@@ -14,12 +14,10 @@
package org.janusgraph.graphdb.tinkerpop;
-import org.apache.tinkerpop.gremlin.process.traversal.P;
import org.apache.tinkerpop.gremlin.structure.io.AbstractIoRegistry;
import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryIo;
import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONIo;
import org.apache.tinkerpop.gremlin.structure.io.gryo.GryoIo;
-import org.apache.tinkerpop.gremlin.structure.io.gryo.GryoSerializersV3d0;
import org.janusgraph.core.attribute.Geoshape;
import org.janusgraph.graphdb.relations.RelationIdentifier;
import org.janusgraph.graphdb.tinkerpop.io.JanusGraphP;
@@ -44,16 +42,9 @@ private JanusGraphIoRegistry() {
register(GryoIo.class, RelationIdentifier.class, null);
register(GryoIo.class, Geoshape.class, new Geoshape.GeoShapeGryoSerializer());
register(GryoIo.class, JanusGraphP.class, new JanusGraphPSerializer());
- //fallback for older janusgraph drivers
- register(GryoIo.class, P.class, new DeprecatedJanusGraphPSerializer(new GryoSerializersV3d0.PSerializer()));
}
public static JanusGraphIoRegistry instance() {
return INSTANCE;
}
-
- @Deprecated()
- public static JanusGraphIoRegistry getInstance() {
- return instance();
- }
}
diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistryV1d0.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistryV1d0.java
index e473cc242e..95465505f6 100644
--- a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistryV1d0.java
+++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistryV1d0.java
@@ -14,11 +14,9 @@
package org.janusgraph.graphdb.tinkerpop;
-import org.apache.tinkerpop.gremlin.process.traversal.P;
import org.apache.tinkerpop.gremlin.structure.io.AbstractIoRegistry;
import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONIo;
import org.apache.tinkerpop.gremlin.structure.io.gryo.GryoIo;
-import org.apache.tinkerpop.gremlin.structure.io.gryo.GryoSerializersV1d0;
import org.janusgraph.core.attribute.Geoshape;
import org.janusgraph.graphdb.relations.RelationIdentifier;
import org.janusgraph.graphdb.tinkerpop.io.JanusGraphP;
@@ -37,16 +35,9 @@ private JanusGraphIoRegistryV1d0() {
register(GryoIo.class, RelationIdentifier.class, null);
register(GryoIo.class, Geoshape.class, new Geoshape.GeoShapeGryoSerializer());
register(GryoIo.class, JanusGraphP.class, new JanusGraphPSerializer());
- //fallback for older JanusGraph drivers
- register(GryoIo.class, P.class, new DeprecatedJanusGraphPSerializer(new GryoSerializersV1d0.PSerializer()));
}
public static JanusGraphIoRegistryV1d0 instance() {
return INSTANCE;
}
-
- @Deprecated()
- public static JanusGraphIoRegistryV1d0 getInstance() {
- return instance();
- }
}
diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinaryConstants.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinaryConstants.java
new file mode 100644
index 0000000000..7c7e23518a
--- /dev/null
+++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinaryConstants.java
@@ -0,0 +1,32 @@
+// Copyright 2021 JanusGraph Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.janusgraph.graphdb.tinkerpop.io.binary;
+
+public class GeoshapeGraphBinaryConstants {
+
+ // Geoshape format versions 0 and 1 were used by the legacy GeoshapeGraphBinarySerializer.
+ public static final byte GEOSHAPE_FORMAT_VERSION = 2;
+
+ // Geoshape type codes
+ public static final int GEOSHAPE_POINT_TYPE_CODE = 0;
+ public static final int GEOSHAPE_CIRCLE_TYPE_CODE = 1;
+ public static final int GEOSHAPE_BOX_TYPE_CODE = 2;
+ public static final int GEOSHAPE_LINE_TYPE_CODE = 3;
+ public static final int GEOSHAPE_POLYGON_TYPE_CODE = 4;
+ public static final int GEOSHAPE_MULTI_POINT_TYPE_CODE = 5;
+ public static final int GEOSHAPE_MULTI_LINE_TYPE_CODE = 6;
+ public static final int GEOSHAPE_MULTI_POLYGON_TYPE_CODE = 7;
+ public static final int GEOSHAPE_GEOMETRY_COLLECTION_TYPE_CODE = 8;
+}
diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinarySerializer.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinarySerializer.java
index 3bba02bdd9..3b5d02a2a8 100644
--- a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinarySerializer.java
+++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinarySerializer.java
@@ -1,4 +1,4 @@
-// Copyright 2020 JanusGraph Authors
+// Copyright 2021 JanusGraph Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,57 +14,82 @@
package org.janusgraph.graphdb.tinkerpop.io.binary;
+import org.apache.tinkerpop.gremlin.driver.ser.SerializationException;
import org.apache.tinkerpop.gremlin.structure.io.Buffer;
import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader;
import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter;
import org.janusgraph.core.attribute.Geoshape;
+import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.BoxSerializer;
+import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.CircleSerializer;
+import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.GeometryCollectionSerializer;
+import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.LineSerializer;
+import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.MultiLineSerializer;
+import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.MultiPointSerializer;
+import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.MultiPolygonSerializer;
+import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.PointSerializer;
+import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.PolygonSerializer;
+import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.GeoshapeTypeSerializer;
import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
+import java.util.HashMap;
+import java.util.Map;
public class GeoshapeGraphBinarySerializer extends JanusGraphTypeSerializer {
-
- public DeprecatedJanusGraphPDeserializerV2d0() {
- super(P.class);
- }
-
- @Override
- public P deserialize(final JsonParser jsonParser, final DeserializationContext deserializationContext) throws IOException {
- String predicate = null;
- Object value = null;
-
- while (jsonParser.nextToken() != JsonToken.END_OBJECT) {
- if (jsonParser.getCurrentName().equals(GraphSONTokens.PREDICATE)) {
- jsonParser.nextToken();
- predicate = jsonParser.getText();
- } else if (jsonParser.getCurrentName().equals(GraphSONTokens.VALUE)) {
- jsonParser.nextToken();
- value = deserializationContext.readValue(jsonParser, Object.class);
- }
- }
-
- try {
- return DeprecatedJanusGraphPSerializer.createPredicateWithValue(predicate, value);
- } catch (final Exception e) {
- throw new IllegalStateException(e.getMessage(), e);
- }
- }
-
- @Override
- public boolean isCachable() {
- return true;
- }
- }
}
diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/graphson/JanusGraphSONModuleV2d0.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/graphson/JanusGraphSONModuleV2d0.java
index e3741464a4..6104bcd7e7 100644
--- a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/graphson/JanusGraphSONModuleV2d0.java
+++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/graphson/JanusGraphSONModuleV2d0.java
@@ -14,7 +14,6 @@
package org.janusgraph.graphdb.tinkerpop.io.graphson;
-import org.apache.tinkerpop.gremlin.process.traversal.P;
import org.janusgraph.core.attribute.Geoshape;
import org.janusgraph.graphdb.relations.RelationIdentifier;
import org.janusgraph.graphdb.tinkerpop.io.JanusGraphP;
@@ -33,8 +32,6 @@ private JanusGraphSONModuleV2d0() {
addDeserializer(RelationIdentifier.class, new RelationIdentifierDeserializerV2d0());
addDeserializer(Geoshape.class, new Geoshape.GeoshapeGsonDeserializerV2d0());
addDeserializer(JanusGraphP.class, new JanusGraphPDeserializerV2d0());
- //fallback for older janusgraph drivers
- addDeserializer(P.class, new DeprecatedJanusGraphPDeserializerV2d0());
}
private static final JanusGraphSONModuleV2d0 INSTANCE = new JanusGraphSONModuleV2d0();
diff --git a/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistryTest.java b/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistryTest.java
index d8c4aea7ff..6f68034c87 100644
--- a/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistryTest.java
+++ b/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistryTest.java
@@ -28,11 +28,6 @@
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.apache.tinkerpop.gremlin.structure.Graph;
-import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONMapper;
-import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONReader;
-import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONVersion;
-import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONWriter;
-import org.apache.tinkerpop.gremlin.structure.io.graphson.TypeInfo;
import org.apache.tinkerpop.gremlin.structure.io.gryo.GryoMapper;
import org.apache.tinkerpop.gremlin.structure.io.gryo.GryoMapper.Builder;
import org.apache.tinkerpop.gremlin.structure.util.empty.EmptyGraph;
@@ -100,7 +95,6 @@ public void testTinkerPopPredicatesAsGryo() throws SerializationException {
@Test
public void testJanusGraphPredicatesAsGryo() throws SerializationException {
-
Graph graph = EmptyGraph.instance();
GraphTraversalSource g = graph.traversal();
@@ -185,54 +179,4 @@ public void testTokenIoRegistyInConfig() throws SerializationException {
Bytecode result = (Bytecode)requestMessage1.getArgs().get(Tokens.ARGS_GREMLIN);
assertEquals(expectedBytecode, result);
}
-
- @Test
- public void testOldFormatJanusGraphPredicatesAsGryo() throws SerializationException {
- Graph graph = EmptyGraph.instance();
- GraphTraversalSource g = graph.traversal();
-
- Bytecode oldBytecode = serializeByteCodeAfterDeserializeAsGryo(g.V().has("name", new P<>(Text.CONTAINS, "test")));
- Bytecode newBytecode = serializeByteCodeAfterDeserializeAsGryo(g.V().has("name", Text.textContains("test")));
-
- assertEquals(newBytecode, oldBytecode);
- }
-
- private Bytecode serializeByteCodeAfterDeserializeAsGryo(GraphTraversal traversal) throws SerializationException {
- Builder mapper = GryoMapper.build().addRegistry(JanusGraphIoRegistry.instance());
- MessageSerializer binarySerializer = new GryoMessageSerializerV1d0(mapper);
- Bytecode expectedBytecode = traversal.asAdmin().getBytecode();
- RequestMessage requestMessage = RequestMessage.build(Tokens.OPS_BYTECODE).processor("traversal")
- .addArg(Tokens.ARGS_GREMLIN, expectedBytecode).create();
-
- ByteBuf bb = binarySerializer.serializeRequestAsBinary(requestMessage, allocator);
- final int mimeLen = bb.readByte();
- bb.readBytes(new byte[mimeLen]);
- RequestMessage deser = binarySerializer.deserializeRequest(bb);
- return (Bytecode) deser.getArgs().get(Tokens.ARGS_GREMLIN);
- }
-
- @Test
- public void testOldFormatJanusGraphPredicatesAsGraphSON() throws Exception {
- Graph graph = EmptyGraph.instance();
- GraphTraversalSource g = graph.traversal();
-
- Bytecode oldBytecode = serializeByteCodeAfterDeserializeAsGraphSON(g.V().has("name", new P<>(Text.CONTAINS, "test")));
- Bytecode newBytecode = serializeByteCodeAfterDeserializeAsGraphSON(g.V().has("name", Text.textContains("test")));
-
- assertEquals(newBytecode, oldBytecode);
- }
-
- private Bytecode serializeByteCodeAfterDeserializeAsGraphSON(GraphTraversal traversal) throws Exception {
- final GraphSONMapper mapper = GraphSONMapper.build().version(GraphSONVersion.V3_0)
- .typeInfo(TypeInfo.PARTIAL_TYPES).addRegistry(JanusGraphIoRegistry.instance()).create();
- final GraphSONWriter writer = GraphSONWriter.build().mapper(mapper).create();
- final GraphSONReader reader = GraphSONReader.build().mapper(mapper).create();
- Bytecode expectedBytecode = traversal.asAdmin().getBytecode();
- ByteArrayOutputStream serializationStream = new ByteArrayOutputStream();
- writer.writeObject(serializationStream, expectedBytecode);
-
- ByteArrayInputStream inputStream = new ByteArrayInputStream(serializationStream.toByteArray());
-
- return reader.readObject(inputStream, Bytecode.class);
- }
}
diff --git a/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/JanusGraphSerializerBaseIT.java b/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/JanusGraphSerializerBaseIT.java
index a7b774e585..cc0f2eb481 100644
--- a/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/JanusGraphSerializerBaseIT.java
+++ b/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/JanusGraphSerializerBaseIT.java
@@ -21,7 +21,6 @@
import org.janusgraph.core.attribute.Geoshape;
import org.janusgraph.core.attribute.Text;
import org.janusgraph.graphdb.relations.RelationIdentifier;
-import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInfo;
import org.testcontainers.junit.jupiter.Container;
@@ -92,7 +91,6 @@ public void testRelationIdentifier(TestInfo testInfo) {
}
@Test
- @Disabled("JanusGraphPredicate serialization won't work any older version than 0.6.0.")
public void testJanusGraphTextPredicates() {
GraphTraversalSource g = traversal();
g.addV("predicateTestLabel").property("name", "neptune").iterate();
diff --git a/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinarySerializerTest.java b/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinarySerializerTest.java
index 609923efd0..1b2d1d3853 100644
--- a/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinarySerializerTest.java
+++ b/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinarySerializerTest.java
@@ -29,6 +29,7 @@
import org.junit.jupiter.params.provider.MethodSource;
import java.io.IOException;
+import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
@@ -46,19 +47,30 @@ private static Stream Basic usage:
- * Stopwatch methods are not idempotent; it is an error to start or stop a
- * stopwatch that is already in the desired state.
- *
- * When testing code that uses this class, use
- * {@link #createUnstarted(Ticker)} or {@link #createStarted(Ticker)} to
- * supply a fake or mock ticker.
- * This allows you to
- * simulate any valid behavior of the stopwatch.
- *
- * Note: This class is not thread-safe.
- *
- * @author Kevin Bourrillion
- * @since 10.0
- */
-@Beta
-@GwtCompatible(emulated = true)
-public final class Stopwatch {
- private final Ticker ticker;
- private boolean isRunning;
- private long elapsedNanos;
- private long startTick;
-
- /**
- * Creates (but does not start) a new stopwatch using {@link System#nanoTime}
- * as its time source.
- *
- * @return
- * @since 15.0
- */
- public static Stopwatch createUnstarted() {
- return new Stopwatch();
- }
-
- /**
- * Creates (but does not start) a new stopwatch, using the specified time
- * source.
- *
- * @return
- * @since 15.0
- */
- public static Stopwatch createUnstarted(Ticker ticker) {
- return new Stopwatch(ticker);
- }
-
- /**
- * Creates (and starts) a new stopwatch using {@link System#nanoTime}
- * as its time source.
- *
- * @return
- * @since 15.0
- */
- public static Stopwatch createStarted() {
- return new Stopwatch().start();
- }
-
- /**
- * Creates (and starts) a new stopwatch, using the specified time
- * source.
- *
- * @return
- * @since 15.0
- */
- public static Stopwatch createStarted(Ticker ticker) {
- return new Stopwatch(ticker).start();
- }
-
- /**
- * Creates (but does not start) a new stopwatch using {@link System#nanoTime}
- * as its time source.
- *
- * @deprecated Use {@link Stopwatch#createUnstarted()} instead.
- */
- @Deprecated
- public Stopwatch() {
- this(Ticker.systemTicker());
- }
-
- /**
- * Creates (but does not start) a new stopwatch, using the specified time
- * source.
- *
- * @param ticker
- * @deprecated Use {@link Stopwatch#createUnstarted(Ticker)} instead.
- */
- @Deprecated
- public Stopwatch(Ticker ticker) {
- this.ticker = checkNotNull(ticker, "ticker");
- }
-
- /**
- * Returns {@code true} if {@link #start()} has been called on this stopwatch,
- * and {@link #stop()} has not been called since the last call to {@code
- * start()}.
- * @return
- */
- public boolean isRunning() {
- return isRunning;
- }
-
- /**
- * Starts the stopwatch.
- *
- * @return this {@code Stopwatch} instance
- * @throws IllegalStateException if the stopwatch is already running.
- */
- public Stopwatch start() {
- checkState(!isRunning, "This stopwatch is already running.");
- isRunning = true;
- startTick = ticker.read();
- return this;
- }
-
- /**
- * Stops the stopwatch. Future reads will return the fixed duration that had
- * elapsed up to this point.
- *
- * @return this {@code Stopwatch} instance
- * @throws IllegalStateException if the stopwatch is already stopped.
- */
- public Stopwatch stop() {
- long tick = ticker.read();
- checkState(isRunning, "This stopwatch is already stopped.");
- isRunning = false;
- elapsedNanos += tick - startTick;
- return this;
- }
-
- /**
- * Sets the elapsed time for this stopwatch to zero,
- * and places it in a stopped state.
- *
- * @return this {@code Stopwatch} instance
- */
- public Stopwatch reset() {
- elapsedNanos = 0;
- isRunning = false;
- return this;
- }
-
- private long elapsedNanos() {
- return isRunning ? ticker.read() - startTick + elapsedNanos : elapsedNanos;
- }
-
- /**
- * Returns the current elapsed time shown on this stopwatch, expressed
- * in the desired time unit, with any fraction rounded down.
- *
- * Note that the overhead of measurement can be more than a microsecond, so
- * it is generally not useful to specify {@link TimeUnit#NANOSECONDS}
- * precision here.
- *
- * @return
- * @since 14.0 (since 10.0 as {@code elapsedTime()})
- */
- public long elapsed(TimeUnit desiredUnit) {
- return desiredUnit.convert(elapsedNanos(), NANOSECONDS);
- }
-
- // Guava is an outstanding library, but Stopwatch has caused an absurd compat headache relative to the problem it
- // solves. Remember the createStarted() change before this? This particular class isn't even close to being worth
- // the shading/debugging/compat-problem-solving time it has consumed due to these little stylistic ABI changes.
- @Deprecated
- public long elapsedMillis()
- {
- return TimeUnit.MILLISECONDS.convert(elapsedNanos(), NANOSECONDS);
- }
-
- /**
- * Returns a string representation of the current elapsed time.
- * @return
- */
- @GwtIncompatible("String.format()")
- @Override public String toString() {
- long nanos = elapsedNanos();
-
- TimeUnit unit = chooseUnit(nanos);
- double value = (double) nanos / NANOSECONDS.convert(1, unit);
-
- // Too bad this functionality is not exposed as a regular method call
- return String.format("%.4g %s", value, abbreviate(unit));
- }
-
- private static TimeUnit chooseUnit(long nanos) {
- if (DAYS.convert(nanos, NANOSECONDS) > 0) {
- return DAYS;
- }
- if (HOURS.convert(nanos, NANOSECONDS) > 0) {
- return HOURS;
- }
- if (MINUTES.convert(nanos, NANOSECONDS) > 0) {
- return MINUTES;
- }
- if (SECONDS.convert(nanos, NANOSECONDS) > 0) {
- return SECONDS;
- }
- if (MILLISECONDS.convert(nanos, NANOSECONDS) > 0) {
- return MILLISECONDS;
- }
- if (MICROSECONDS.convert(nanos, NANOSECONDS) > 0) {
- return MICROSECONDS;
- }
- return NANOSECONDS;
- }
-
- private static String abbreviate(TimeUnit unit) {
- switch (unit) {
- case NANOSECONDS:
- return "ns";
- case MICROSECONDS:
- return "\u03bcs"; // μs
- case MILLISECONDS:
- return "ms";
- case SECONDS:
- return "s";
- case MINUTES:
- return "min";
- case HOURS:
- return "h";
- case DAYS:
- return "d";
- default:
- throw new AssertionError();
- }
- }
-}
diff --git a/janusgraph-hbase/src/main/java/com/google/common/io/Closeables.java b/janusgraph-hbase/src/main/java/com/google/common/io/Closeables.java
deleted file mode 100644
index 99ed43c3f3..0000000000
--- a/janusgraph-hbase/src/main/java/com/google/common/io/Closeables.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Copyright (C) 2007 The Guava Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.google.common.io;
-
-import com.google.common.annotations.Beta;
-import com.google.common.annotations.VisibleForTesting;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.Reader;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-import javax.annotation.Nullable;
-
-/**
- * Utility methods for working with {@link Closeable} objects.
- *
- * @author Michael Lancaster
- * @since 1.0
- */
-@Beta
-public final class Closeables {
- @VisibleForTesting static final Logger logger
- = Logger.getLogger(Closeables.class.getName());
-
- private Closeables() {}
-
- /**
- * Closes a {@link Closeable}, with control over whether an {@code IOException} may be thrown.
- * This is primarily useful in a finally block, where a thrown exception needs to be logged but
- * not propagated (otherwise the original exception will be lost).
- *
- * If {@code swallowIOException} is true then we never throw {@code IOException} but merely log
- * it.
- *
- * Example: While it's not safe in the general case to ignore exceptions that are thrown when closing
- * an I/O resource, it should generally be safe in the case of a resource that's being used only
- * for reading, such as an {@code InputStream}. Unlike with writable resources, there's no
- * chance that a failure that occurs when closing the stream indicates a meaningful problem such
- * as a failure to flush all bytes to the underlying resource.
- *
- * @param inputStream the input stream to be closed, or {@code null} in which case this method
- * does nothing
- * @since 17.0
- */
- public static void closeQuietly(@Nullable InputStream inputStream) {
- try {
- close(inputStream, true);
- } catch (IOException impossible) {
- throw new AssertionError(impossible);
- }
- }
-
- /**
- * Closes the given {@link Reader}, logging any {@code IOException} that's thrown rather than
- * propagating it.
- *
- * While it's not safe in the general case to ignore exceptions that are thrown when closing
- * an I/O resource, it should generally be safe in the case of a resource that's being used only
- * for reading, such as a {@code Reader}. Unlike with writable resources, there's no chance that
- * a failure that occurs when closing the reader indicates a meaningful problem such as a failure
- * to flush all bytes to the underlying resource.
- *
- * @param reader the reader to be closed, or {@code null} in which case this method does nothing
- * @since 17.0
- */
- public static void closeQuietly(@Nullable Reader reader) {
- try {
- close(reader, true);
- } catch (IOException impossible) {
- throw new AssertionError(impossible);
- }
- }
-}
diff --git a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/AdminMask.java b/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/AdminMask.java
deleted file mode 100644
index 68b7136b75..0000000000
--- a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/AdminMask.java
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2017 JanusGraph Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * Copyright DataStax, Inc.
- *
- * Please see the included license file for details.
- */
-package org.janusgraph.diskstorage.hbase;
-
-import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-
-import java.io.Closeable;
-import java.io.IOException;
-
-/**
- * This interface hides ABI/API breaking changes that HBase has made to its Admin/HBaseAdmin over the course
- * of development from 0.94 to 1.0 and beyond.
- */
-public interface AdminMask extends Closeable
-{
-
- void clearTable(String tableName, long timestamp) throws IOException;
-
- /**
- * Drop given table. Table can be either enabled or disabled.
- * @param tableName Name of the table to delete
- * @throws IOException
- */
- void dropTable(String tableName) throws IOException;
-
- HTableDescriptor getTableDescriptor(String tableName) throws IOException;
-
- boolean tableExists(String tableName) throws IOException;
-
- void createTable(HTableDescriptor desc) throws IOException;
-
- void createTable(HTableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions) throws IOException;
-
- /**
- * Estimate the number of regionservers in the HBase cluster.
- *
- * This is usually implemented by calling
- * {@link HBaseAdmin#getClusterStatus()} and then
- * {@link ClusterStatus#getServers()} and finally {@code size()} on the
- * returned server list.
- *
- * @return the number of servers in the cluster or -1 if it could not be determined
- */
- int getEstimatedRegionServerCount();
-
- void disableTable(String tableName) throws IOException;
-
- void enableTable(String tableName) throws IOException;
-
- boolean isTableDisabled(String tableName) throws IOException;
-
- void addColumn(String tableName, HColumnDescriptor columnDescriptor) throws IOException;
-
- void snapshot(String snapshotName, String table) throws IllegalArgumentException, IOException;
-
- void deleteSnapshot(String snapshotName) throws IOException;
-}
diff --git a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/ConnectionMask.java b/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/ConnectionMask.java
deleted file mode 100644
index f24e2eadcd..0000000000
--- a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/ConnectionMask.java
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2017 JanusGraph Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/**
- * Copyright DataStax, Inc.
- *
- * Please see the included license file for details.
- */
-package org.janusgraph.diskstorage.hbase;
-
-import org.apache.hadoop.hbase.HRegionLocation;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-
-/**
- * This interface hides ABI/API breaking changes that HBase has made to its (H)Connection class over the course
- * of development from 0.94 to 1.0 and beyond.
- */
-public interface ConnectionMask extends Closeable
-{
-
- /**
- * Retrieve the TableMask compatibility layer object for the supplied table name.
- * @param name
- * @return The TableMask for the specified table.
- * @throws IOException in the case of backend exceptions.
- */
- TableMask getTable(String name) throws IOException;
-
- /**
- * Retrieve the AdminMask compatibility layer object for this Connection.
- * @return The AdminMask for this Connection
- * @throws IOException in the case of backend exceptions.
- */
- AdminMask getAdmin() throws IOException;
-
- /**
- * Retrieve the RegionLocations for the supplied table name.
- * @param tableName
- * @return A map of HRegionInfo to ServerName that describes the storage regions for the named table.
- * @throws IOException in the case of backend exceptions.
- */
- List
- * When this is not set, JanusGraph attempts to automatically detect the
- * HBase runtime version by calling {@link VersionInfo#getVersion()}. JanusGraph
- * then checks the returned version string against a hard-coded list of
- * supported version prefixes and instantiates the associated compat layer
- * if a match is found.
- *
- * When this is set, JanusGraph will not call
- * {@code VersionInfo.getVersion()} or read its hard-coded list of supported
- * version prefixes. JanusGraph will instead attempt to instantiate the class
- * specified (via the no-arg constructor which must exist) and then attempt
- * to cast it to HBaseCompat and use it as such. JanusGraph will assume the
- * supplied implementation is compatible with the runtime HBase version and
- * make no attempt to verify that assumption.
- *
- * Setting this key incorrectly could cause runtime exceptions at best or
- * silent data corruption at worst. This setting is intended for users
- * running exotic HBase implementations that don't support VersionInfo or
- * implementations which return values from {@code VersionInfo.getVersion()}
- * that are inconsistent with Apache's versioning convention. It may also be
- * useful to users who want to run against a new release of HBase that JanusGraph
- * doesn't yet officially support.
- *
- */
- public static final ConfigOption
- * Please see the included license file for details.
- */
-package org.janusgraph.diskstorage.hbase;
-
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Row;
-import org.apache.hadoop.hbase.client.Scan;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-
-/**
- * This interface hides ABI/API breaking changes that HBase has made to its Table/HTableInterface over the course
- * of development from 0.94 to 1.0 and beyond.
- */
-public interface TableMask extends Closeable
-{
-
- ResultScanner getScanner(Scan filter) throws IOException;
-
- Result[] get(List
- *
- */
-@Deprecated
-public class JanusGraphId {
-
- /**
- * Converts a user provided long id into a JanusGraph vertex id. The id must be positive and can be at most 61 bits long.
- * This method is useful when providing ids during vertex creation via {@link org.apache.tinkerpop.gremlin.structure.Graph#addVertex(Object...)}.
- *
- * @param id long id
- * @return a corresponding JanusGraph vertex id
- * @deprecated Use {@link org.janusgraph.graphdb.idmanagement.IDManager#toVertexId(long)}.
- */
- public static long toVertexId(long id) {
- Preconditions.checkArgument(id > 0, "Vertex id must be positive: %s", id);
- Preconditions.checkArgument(IDManager.VertexIDType.NormalVertex.removePadding(Long.MAX_VALUE) >= id, "Vertex id is too large: %s", id);
- return IDManager.VertexIDType.NormalVertex.addPadding(id);
- }
-
- /**
- * Converts a JanusGraph vertex id to the user provided id as the inverse mapping of {@link #toVertexId(long)}.
- *
- * @param id JanusGraph vertex id (must be positive)
- * @return original user provided id
- * @deprecated Use {@link org.janusgraph.graphdb.idmanagement.IDManager#fromVertexId(long)}
- */
- public static long fromVertexId(long id) {
- Preconditions.checkArgument(id > 0, "Invalid vertex id provided: %s", id);
- return IDManager.VertexIDType.NormalVertex.removePadding(id);
- }
-
- /**
- * Converts a JanusGraph vertex id of a given vertex to the user provided id as the inverse mapping of {@link #toVertexId(long)}.
- *
- * @param v Vertex
- * @return original user provided id
- * @deprecated Use {@link org.janusgraph.graphdb.idmanagement.IDManager#fromVertexId(long)}
- */
- public static long fromVertexID(JanusGraphVertex v) {
- Preconditions.checkArgument(v.hasId(), "Invalid vertex provided: %s", v);
- return fromVertexId(v.longId());
- }
-}
diff --git a/janusgraph-core/src/main/java/org/janusgraph/diskstorage/Backend.java b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/Backend.java
index c84d284d21..d2e639a370 100644
--- a/janusgraph-core/src/main/java/org/janusgraph/diskstorage/Backend.java
+++ b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/Backend.java
@@ -20,7 +20,6 @@
import org.apache.commons.lang3.StringUtils;
import org.janusgraph.core.JanusGraphConfigurationException;
import org.janusgraph.core.JanusGraphException;
-import org.janusgraph.core.schema.JanusGraphManagement;
import org.janusgraph.diskstorage.configuration.BasicConfiguration;
import org.janusgraph.diskstorage.configuration.ConfigOption;
import org.janusgraph.diskstorage.configuration.Configuration;
@@ -48,6 +47,7 @@
import org.janusgraph.diskstorage.keycolumnvalue.cache.NoKCVSCache;
import org.janusgraph.diskstorage.keycolumnvalue.keyvalue.OrderedKeyValueStoreManager;
import org.janusgraph.diskstorage.keycolumnvalue.keyvalue.OrderedKeyValueStoreManagerAdapter;
+import org.janusgraph.diskstorage.keycolumnvalue.scan.ScanJobFuture;
import org.janusgraph.diskstorage.keycolumnvalue.scan.StandardScanner;
import org.janusgraph.diskstorage.locking.Locker;
import org.janusgraph.diskstorage.locking.LockerProvider;
@@ -436,7 +436,7 @@ private StandardScanner.Builder buildStoreIndexScanJob(String storeName) {
.setWorkBlockSize(this.configuration.get(PAGE_SIZE));
}
- public JanusGraphManagement.IndexJobFuture getScanJobStatus(Object jobId) {
+ public ScanJobFuture getScanJobStatus(Object jobId) {
return scanner.getRunningJob(jobId);
}
diff --git a/janusgraph-core/src/main/java/org/janusgraph/diskstorage/indexing/IndexTransaction.java b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/indexing/IndexTransaction.java
index 7adce66b28..9e46824dab 100644
--- a/janusgraph-core/src/main/java/org/janusgraph/diskstorage/indexing/IndexTransaction.java
+++ b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/indexing/IndexTransaction.java
@@ -22,14 +22,12 @@
import org.janusgraph.diskstorage.util.BackendOperation;
import org.janusgraph.graphdb.database.idhandling.VariableLong;
import org.janusgraph.graphdb.database.serialize.DataOutput;
-import org.janusgraph.graphdb.util.StreamIterable;
import java.time.Duration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
-import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
@@ -98,14 +96,6 @@ public void register(String store, String key, KeyInformation information) throw
index.register(store,key,information,indexTx);
}
- /**
- * @deprecated use {@link #queryStream(IndexQuery query)} instead.
- */
- @Deprecated
- public ListIDManager idManager = ((StandardJanusGraph) graph).getIDManager();
- * maxSize+1 if instantiated via the
- * {@link #PriorityQueue(int,boolean)} constructor with
- * prepopulate set to true.
- *
-*/
-@Deprecated
-public abstract class PriorityQueuetrue iff parameter a is less than parameter b.
- */
- protected abstract boolean lessThan(T a, T b);
-
- /**
- * This method can be overridden by extending classes to return a sentinel
- * object which will be used by the {@link PriorityQueue#PriorityQueue(int,boolean)}
- * constructor to fill the queue, so that the code which uses that queue can always
- * assume it's full and only change the top without attempting to insert any new
- * object.
- *
- * Those sentinel values should always compare worse than any non-sentinel
- * value (i.e., {@link #lessThan} should always favor the
- * non-sentinel values).
- *
- * By default, this method returns false, which means the queue will not be
- * filled with sentinel values. Otherwise, the value returned will be used to
- * pre-populate the queue. Adds sentinel values to the queue.
- *
- * If this method is extended to return a non-null value, then the following
- * usage pattern is recommended:
- *
- *
- * // extends getSentinelObject() to return a non-null value.
- * PriorityQueue<MyObject> pq = new MyQueue<MyObject>(numHits);
- * // save the 'top' element, which is guaranteed to not be null.
- * MyObject pqTop = pq.top();
- * <...>
- * // now in order to add a new element, which is 'better' than top (after
- * // you've verified it is better), it is as simple as:
- * pqTop.change().
- * pqTop = pq.updateTop();
- *
- *
- * NOTE: if this method returns a non-null value, it will be called by
- * the {@link PriorityQueue#PriorityQueue(int,boolean)} constructor
- * {@link #size()} times, relying on a new object to be returned and will not
- * check if it's null again. Therefore you should ensure any call to this
- * method creates a new instance and behaves consistently, e.g., it cannot
- * return null if it previously returned non-null.
- *
- * @return the sentinel object to use to pre-populate the queue, or null if
- * sentinel objects are not supported.
- */
- protected T getSentinelObject() {
- return null;
- }
-
- /**
- * Adds an Object to a PriorityQueue in log(size) time. If one tries to add
- * more objects than maxSize from initialize an
- * {@link ArrayIndexOutOfBoundsException} is thrown.
- *
- * @return the new 'top' element in the queue.
- */
- public final T add(T element) {
- size++;
- heap[size] = element;
- upHeap();
- return heap[1];
- }
-
- /**
- * Adds an Object to a PriorityQueue in log(size) time.
- * It returns the object (if any) that was
- * dropped off the heap because it was full. This can be
- * the given parameter (in case it is smaller than the
- * full heap's minimum, and couldn't be added), or another
- * object that was previously the smallest value in the
- * heap and now has been replaced by a larger one, or null
- * if the queue wasn't yet full with maxSize elements.
- */
- public T insertWithOverflow(T element) {
- if (size < maxSize) {
- add(element);
- return null;
- } else if (size > 0 && !lessThan(element, heap[1])) {
- T ret = heap[1];
- heap[1] = element;
- updateTop();
- return ret;
- } else {
- return element;
- }
- }
-
- /** Returns the least element of the PriorityQueue in constant time. */
- public final T top() {
- // We don't need to check size here: if maxSize is 0,
- // then heap is length 2 array with both entries null.
- // If size is 0 then heap[1] is already null.
- return heap[1];
- }
-
- /** Removes and returns the least element of the PriorityQueue in log(size)
- time. */
- public final T pop() {
- if (size > 0) {
- T result = heap[1]; // save first value
- heap[1] = heap[size]; // move last to first
- heap[size] = null; // permit GC of objects
- size--;
- downHeap(); // adjust heap
- return result;
- } else
- return null;
- }
-
- /**
- * Should be called when the Object at top changes values. Still log(n) worst
- * case, but it's at least twice as fast to
- *
- *
- * pq.top().change();
- * pq.updateTop();
- *
- *
- * instead of
- *
- *
- * o = pq.pop();
- * o.change();
- * pq.push(o);
- *
- *
- * @return the new 'top' element.
- */
- public final T updateTop() {
- downHeap();
- return heap[1];
- }
-
- /** Returns the number of elements currently stored in the PriorityQueue. */
- public final int size() {
- return size;
- }
-
- /** Removes all entries from the PriorityQueue. */
- public final void clear() {
- for (int i = 0; i <= size; i++) {
- heap[i] = null;
- }
- size = 0;
- }
-
- private void upHeap() {
- int i = size;
- T node = heap[i]; // save bottom node
- int j = i >>> 1;
- while (j > 0 && lessThan(node, heap[j])) {
- heap[i] = heap[j]; // shift parents down
- i = j;
- j = j >>> 1;
- }
- heap[i] = node; // install saved node
- }
-
- private void downHeap() {
- int i = 1;
- T node = heap[i]; // save top node
- int j = i << 1; // find smaller child
- int k = j + 1;
- if (k <= size && lessThan(heap[k], heap[j])) {
- j = k;
- }
- while (j <= size && lessThan(heap[j], node)) {
- heap[i] = heap[j]; // shift up child
- i = j;
- j = i << 1;
- k = j + 1;
- if (k <= size && lessThan(heap[k], heap[j])) {
- j = k;
- }
- }
- heap[i] = node; // install saved node
- }
-
- /** This method returns the internal heap array as Object[].
- */
- protected final Object[] getHeapArray() {
- return heap;
- }
-}
diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/vertices/RemovableRelationIterable.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/vertices/RemovableRelationIterable.java
deleted file mode 100644
index 77ba8b77d9..0000000000
--- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/vertices/RemovableRelationIterable.java
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2017 JanusGraph Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package org.janusgraph.graphdb.vertices;
-
-import org.janusgraph.core.JanusGraphRelation;
-import org.janusgraph.graphdb.internal.InternalRelation;
-
-import java.util.Iterator;
-
-@Deprecated
-public class RemovableRelationIterable
- *
- *
- *
- * Stopwatch stopwatch = Stopwatch.{@link #createStarted createStarted}();
- * doSomething();
- * stopwatch.{@link #stop stop}(); // optional
- *
- * long millis = stopwatch.elapsed(MILLISECONDS);
- *
- * log.info("time: " + stopwatch); // formatted string like "12.3 ms"
- *
- * {@code
- *
- * public void useStreamNicely() throws IOException {
- * SomeStream stream = new SomeStream("foo");
- * boolean threw = true;
- * try {
- * // ... code which does something with the stream ...
- * threw = false;
- * } finally {
- * // If an exception occurs, rethrow it only if threw==false:
- * Closeables.close(stream, threw);
- * }
- * }}
- *
- * @param closeable the {@code Closeable} object to be closed, or null, in which case this method
- * does nothing
- * @param swallowIOException if true, don't propagate IO exceptions thrown by the {@code close}
- * methods
- * @throws IOException if {@code swallowIOException} is false and {@code close} throws an
- * {@code IOException}.
- */
- public static void close(@Nullable Closeable closeable,
- boolean swallowIOException) throws IOException {
- if (closeable == null) {
- return;
- }
- try {
- closeable.close();
- } catch (IOException e) {
- if (swallowIOException) {
- logger.log(Level.WARNING,
- "IOException thrown while closing Closeable.", e);
- } else {
- throw e;
- }
- }
- }
-
- public static void closeQuietly(@Nullable Closeable closeable) {
- try {
- close(closeable, true);
- } catch (IOException ignored) {}
- }
-
- /**
- * Closes the given {@link InputStream}, logging any {@code IOException} that's thrown rather
- * than propagating it.
- *
- *