diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..4583a3a711 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,8 @@ +version: 2 +updates: +- package-ecosystem: maven + directory: "/" + schedule: + interval: daily + time: "11:00" + open-pull-requests-limit: 10 \ No newline at end of file diff --git a/.github/workflows/ci-backend-cql.yml b/.github/workflows/ci-backend-cql.yml index 80a6b458a7..c0b93f75a4 100644 --- a/.github/workflows/ci-backend-cql.yml +++ b/.github/workflows/ci-backend-cql.yml @@ -97,37 +97,88 @@ jobs: name: murmur-client-auth java: 8 - module: cql + args: "-Pscylladb -Dtest=\"**/diskstorage/cql/*\"" + name: scylladb-diskstorage + java: 8 + - module: cql + args: "-Pscylladb -Dtest=\"**/graphdb/cql/*\"" + name: scylladb-graphdb + java: 8 + # FIXME: this takes forever to run + # - module: cql + # args: "-Pscylladb -Dtest=\"**/hadoop/*\"" + # name: scylladb-hadoop + # java: 8 + - module: cql + args: "-Pscylladb -Dtest=\"**/core/cql/*\"" + name: scylladb-core + java: 8 + - module: cql + args: "-Pcassandra3-byteordered -Dtest=\"**/diskstorage/cql/*\"" + name: byteordered-diskstorage + install-args: "-Pjava-11" + java: 11 + - module: cql + args: "-Pcassandra3-murmur -Dtest=\"**/diskstorage/cql/*\"" + name: murmur-diskstorage install-args: "-Pjava-11" - args: "-Pcassandra3-murmur -Pjava-11 -Dtest=\"**/diskstorage/cql/*\"" - name: murmur-diskstorage-java-11 java: 11 - module: cql + args: "-Pcassandra3-byteordered -Dtest=\"**/graphdb/cql/*\"" + name: byteordered-graphdb install-args: "-Pjava-11" - args: "-Pcassandra3-murmur -Pjava-11 -Dtest=\"**/graphdb/cql/*\"" - name: murmur-graphdb-java-11 java: 11 - module: cql + args: "-Pcassandra3-murmur -Dtest=\"**/graphdb/cql/*\"" + name: murmur-graphdb + install-args: "-Pjava-11" + java: 11 + - module: cql + args: "-Pcassandra3-murmur -Dtest=\"**/hadoop/*\"" + name: murmur-hadoop + install-args: "-Pjava-11" + java: 11 + - module: cql + args: "-Pcassandra3-byteordered -Dtest=\"**/core/cql/*\"" + name: byteordered-core + install-args: "-Pjava-11" + java: 11 + - module: cql + args: "-Pcassandra3-murmur -Dtest=\"**/core/cql/*\"" + name: murmur-core + install-args: "-Pjava-11" + java: 11 + - module: cql + args: "-Pcassandra3-murmur-ssl -Dtest=\"**/diskstorage/cql/CQLStoreTest.java\"" + name: murmur-ssl + install-args: "-Pjava-11" + java: 11 + - module: cql + args: "-Pcassandra3-murmur-client-auth -Dtest=\"**/diskstorage/cql/CQLStoreTest.java\"" + name: murmur-client-auth install-args: "-Pjava-11" - args: "-Pcassandra3-murmur -Pjava-11 -Dtest=\"**/hadoop/*\"" - name: murmur-hadoop-java-11 java: 11 - module: cql args: "-Pscylladb -Dtest=\"**/diskstorage/cql/*\"" name: scylladb-diskstorage - java: 8 + install-args: "-Pjava-11" + java: 11 - module: cql args: "-Pscylladb -Dtest=\"**/graphdb/cql/*\"" name: scylladb-graphdb - java: 8 + install-args: "-Pjava-11" + java: 11 # FIXME: this takes forever to run # - module: cql # args: "-Pscylladb -Dtest=\"**/hadoop/*\"" # name: scylladb-hadoop - # java: 8 + # install-args: "-Pjava-11" + # java: 11 - module: cql args: "-Pscylladb -Dtest=\"**/core/cql/*\"" name: scylladb-core - java: 8 + install-args: "-Pjava-11" + java: 11 steps: - uses: actions/checkout@v2 with: @@ -142,14 +193,14 @@ jobs: with: java-version: ${{ matrix.java }} - run: mvn clean install --projects janusgraph-${{ matrix.module }} ${{ env.BUILD_MAVEN_OPTS }} ${{ matrix.install-args }} - - run: mvn verify --projects janusgraph-${{ matrix.module }} ${{ env.VERIFY_MAVEN_OPTS }} ${{ matrix.args }} + - run: mvn verify --projects janusgraph-${{ matrix.module }} ${{ env.VERIFY_MAVEN_OPTS }} ${{ matrix.install-args }} ${{ matrix.args }} - uses: actions/upload-artifact@v2 with: name: jacoco-reports path: target/jacoco-combined.exec - uses: codecov/codecov-action@v1 with: - name: codecov-cql-${{ matrix.name }} + name: codecov-cql-${{ matrix.name }}-java-${{ matrix.java }} full-tests: runs-on: ubuntu-20.04 diff --git a/.github/workflows/ci-backend-hbase.yml b/.github/workflows/ci-backend-hbase.yml index 8b29d3ff23..c6570be663 100644 --- a/.github/workflows/ci-backend-hbase.yml +++ b/.github/workflows/ci-backend-hbase.yml @@ -62,25 +62,31 @@ jobs: include: - module: hbase args: "-Dtest=\"**/diskstorage/hbase/*\"" - name: hbase1-diskstorage + name: hbase2-diskstorage + java: 8 - module: hbase args: "-Dtest=\"**/graphdb/hbase/*\"" - name: hbase1-graphdb + name: hbase2-graphdb + java: 8 - module: hbase args: "-Dtest=\"**/hadoop/*\"" - name: hbase1-hadoop + name: hbase2-hadoop + java: 8 - module: hbase - install-args: "-Dhbase.profile -Phbase2" + install-args: "-Pjava-11" args: "-Dtest=\"**/diskstorage/hbase/*\"" name: hbase2-diskstorage + java: 11 - module: hbase - install-args: "-Dhbase.profile -Phbase2" + install-args: "-Pjava-11" args: "-Dtest=\"**/graphdb/hbase/*\"" name: hbase2-graphdb + java: 11 - module: hbase - install-args: "-Dhbase.profile -Phbase2" + install-args: "-Pjava-11" args: "-Dtest=\"**/hadoop/*\"" name: hbase2-hadoop + java: 11 steps: - uses: actions/checkout@v2 with: @@ -93,13 +99,13 @@ jobs: ${{ runner.os }}-maven- - uses: actions/setup-java@v1 with: - java-version: 1.8 + java-version: ${{ matrix.java }} - run: mvn clean install --projects janusgraph-${{ matrix.module }} ${{ env.BUILD_MAVEN_OPTS }} ${{ matrix.install-args }} - - run: mvn verify --projects janusgraph-${{ matrix.module }} ${{ env.VERIFY_MAVEN_OPTS }} ${{ matrix.args }} -Dhbase.docker.uid=$(id -u) -Dhbase.docker.gid=$(id -g) + - run: mvn verify --projects janusgraph-${{ matrix.module }} ${{ env.VERIFY_MAVEN_OPTS }} ${{ matrix.install-args }} ${{ matrix.args }} -Dhbase.docker.uid=$(id -u) -Dhbase.docker.gid=$(id -g) - uses: actions/upload-artifact@v2 with: name: jacoco-reports path: target/jacoco-combined.exec - uses: codecov/codecov-action@v1 with: - name: codecov-hbase-${{ matrix.name }} + name: codecov-hbase-${{ matrix.name }}-java-${{ matrix.java }} diff --git a/.github/workflows/ci-core.yml b/.github/workflows/ci-core.yml index 0625c52c0e..d5c918a9e5 100644 --- a/.github/workflows/ci-core.yml +++ b/.github/workflows/ci-core.yml @@ -52,6 +52,23 @@ jobs: - run: mvn clean install --projects janusgraph-all -Pjanusgraph-cache -Dmaven.javadoc.skip=true ${{ env.BUILD_MAVEN_OPTS }} - run: mvn verify --projects janusgraph-all -Pjanusgraph-cache ${{ env.VERIFY_MAVEN_OPTS }} + build-java11: + needs: build-all + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + - uses: actions/cache@v2 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven- + - uses: actions/setup-java@v1 + with: + java-version: 11 + - run: mvn clean install --projects janusgraph-all -Pjava-11 -Pjanusgraph-cache -Dmaven.javadoc.skip=true ${{ env.BUILD_MAVEN_OPTS }} + - run: mvn verify --projects janusgraph-all -Pjava-11 -Pjanusgraph-cache ${{ env.VERIFY_MAVEN_OPTS }} + tests: runs-on: ubuntu-20.04 needs: build-all @@ -60,12 +77,37 @@ jobs: matrix: include: - module: driver + java: 8 + - module: server + java: 8 + - module: test + java: 8 + - module: inmemory + args: "-Dtest.skip.tp=false" + java: 8 + - module: berkeleyje + java: 8 + - module: lucene + java: 8 + - module: driver + install-args: "-Pjava-11" + java: 11 - module: server + install-args: "-Pjava-11" + java: 11 - module: test + install-args: "-Pjava-11" + java: 11 - module: inmemory + install-args: "-Pjava-11" args: "-Dtest.skip.tp=false" + java: 11 - module: berkeleyje + install-args: "-Pjava-11" + java: 11 - module: lucene + install-args: "-Pjava-11" + java: 11 steps: - uses: actions/checkout@v2 with: @@ -78,13 +120,13 @@ jobs: ${{ runner.os }}-maven- - uses: actions/setup-java@v1 with: - java-version: 1.8 + java-version: ${{ matrix.java }} - run: mvn clean install --projects janusgraph-${{ matrix.module }} ${{ env.BUILD_MAVEN_OPTS }} ${{ matrix.install-args }} - - run: mvn verify --projects janusgraph-${{ matrix.module }} ${{ env.VERIFY_MAVEN_OPTS }} ${{ matrix.args }} + - run: mvn verify --projects janusgraph-${{ matrix.module }} ${{ env.VERIFY_MAVEN_OPTS }} ${{ matrix.install-args }} ${{ matrix.args }} - uses: actions/upload-artifact@v2 with: name: jacoco-reports path: target/jacoco-combined.exec - uses: codecov/codecov-action@v1 with: - name: codecov-core-${{ matrix.module }} + name: codecov-core-${{ matrix.module }}-java-${{ matrix.java }} diff --git a/.github/workflows/ci-docs.yml b/.github/workflows/ci-docs.yml index 42768abbae..bb712a8b71 100644 --- a/.github/workflows/ci-docs.yml +++ b/.github/workflows/ci-docs.yml @@ -53,8 +53,8 @@ jobs: ${{ runner.os }}-maven- - uses: actions/setup-java@v1 with: - java-version: 1.8 - - run: mvn clean install -pl janusgraph-doc ${{ env.BUILD_MAVEN_OPTS }} + java-version: 11 + - run: mvn clean install -Pjava-11 -pl janusgraph-doc ${{ env.BUILD_MAVEN_OPTS }} - run: git diff --exit-code docs/configs/janusgraph-cfg.md - run: docker build -t doc-site:mkdocs -f docs.Dockerfile . - run: docker run --rm -v $PWD:/mkdocs doc-site:mkdocs mkdocs build diff --git a/.github/workflows/ci-index-es.yml b/.github/workflows/ci-index-es.yml index 846a8fea33..6c4527328f 100644 --- a/.github/workflows/ci-index-es.yml +++ b/.github/workflows/ci-index-es.yml @@ -64,12 +64,30 @@ jobs: - module: es args: "-Pelasticsearch7" name: es7 + java: 8 - module: es args: "-Pelasticsearch6" name: es6 + java: 8 - module: es args: "-Pelasticsearch60" name: es60 + java: 8 + - module: es + install-args: "-Pjava-11" + args: "-Pelasticsearch7" + name: es7 + java: 11 + - module: es + install-args: "-Pjava-11" + args: "-Pelasticsearch6" + name: es6 + java: 11 + - module: es + install-args: "-Pjava-11" + args: "-Pelasticsearch60" + name: es60 + java: 11 steps: - uses: actions/checkout@v2 with: @@ -82,13 +100,13 @@ jobs: ${{ runner.os }}-maven- - uses: actions/setup-java@v1 with: - java-version: 1.8 + java-version: ${{ matrix.java }} - run: mvn clean install --projects janusgraph-${{ matrix.module }} ${{ env.BUILD_MAVEN_OPTS }} ${{ matrix.install-args }} - - run: mvn verify --projects janusgraph-${{ matrix.module }} ${{ env.VERIFY_MAVEN_OPTS }} ${{ matrix.args }} + - run: mvn verify --projects janusgraph-${{ matrix.module }} ${{ env.VERIFY_MAVEN_OPTS }} ${{ matrix.install-args }} ${{ matrix.args }} - uses: actions/upload-artifact@v2 with: name: jacoco-reports path: target/jacoco-combined.exec - uses: codecov/codecov-action@v1 with: - name: codecov-index-${{ matrix.name }} + name: codecov-index-${{ matrix.name }}-java-${{ matrix.java }} diff --git a/.github/workflows/ci-index-solr.yml b/.github/workflows/ci-index-solr.yml index 48edf9cbdc..bf4b0a4348 100644 --- a/.github/workflows/ci-index-solr.yml +++ b/.github/workflows/ci-index-solr.yml @@ -62,11 +62,14 @@ jobs: matrix: include: - module: solr - args: "-Psolr7" - name: solr7 - - module: solr args: "-Psolr8" name: solr8 + java: 8 + - module: es + install-args: "-Pjava-11" + args: "-Psolr8" + name: solr8 + java: 11 steps: - uses: actions/checkout@v2 with: @@ -79,13 +82,13 @@ jobs: ${{ runner.os }}-maven- - uses: actions/setup-java@v1 with: - java-version: 1.8 + java-version: ${{ matrix.java }} - run: mvn clean install --projects janusgraph-${{ matrix.module }} ${{ env.BUILD_MAVEN_OPTS }} ${{ matrix.install-args }} - - run: mvn verify --projects janusgraph-${{ matrix.module }} ${{ env.VERIFY_MAVEN_OPTS }} ${{ matrix.args }} + - run: mvn verify --projects janusgraph-${{ matrix.module }} ${{ env.VERIFY_MAVEN_OPTS }} ${{ matrix.install-args }} ${{ matrix.args }} - uses: actions/upload-artifact@v2 with: name: jacoco-reports path: target/jacoco-combined.exec - uses: codecov/codecov-action@v1 with: - name: codecov-index-${{ matrix.name }} + name: codecov-index-${{ matrix.name }}-java-${{ matrix.java }} diff --git a/.github/workflows/ci-java-11.yml b/.github/workflows/ci-java-11.yml deleted file mode 100644 index d76fcb8496..0000000000 --- a/.github/workflows/ci-java-11.yml +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2021 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: CI Java 11 - -on: - pull_request: - paths-ignore: - - 'docs/**' - - '.github/workflows/ci-docs.yml' - - '.github/ISSUE_TEMPLATE/**' - - 'requirements.txt' - - 'docs.Dockerfile' - - '*.md' - push: - paths-ignore: - - 'docs/**' - - '.github/workflows/ci-docs.yml' - - 'requirements.txt' - - 'docs.Dockerfile' - - '*.md' - -env: - BUILD_MAVEN_OPTS: "-DskipTests=true --batch-mode --also-make" - VERIFY_MAVEN_OPTS: "-Pcoverage" - -jobs: - build-all: - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@v2 - - uses: actions/cache@v2 - with: - path: ~/.m2/repository - key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} - restore-keys: | - ${{ runner.os }}-maven- - - uses: actions/setup-java@v1 - with: - java-version: 1.8 - - run: mvn clean install --projects janusgraph-all -Pjanusgraph-cache -Dmaven.javadoc.skip=true ${{ env.BUILD_MAVEN_OPTS }} - - run: mvn verify --projects janusgraph-all -Pjanusgraph-cache ${{ env.VERIFY_MAVEN_OPTS }} - - build-java11: - needs: build-all - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@v2 - - uses: actions/cache@v2 - with: - path: ~/.m2/repository - key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} - restore-keys: | - ${{ runner.os }}-maven- - - uses: actions/setup-java@v1 - with: - java-version: 11 - - run: mvn clean install --projects janusgraph-all -Pjava-11 -Pjanusgraph-cache -Dmaven.javadoc.skip=true ${{ env.BUILD_MAVEN_OPTS }} - - run: mvn verify --projects janusgraph-all -Pjava-11 -Pjanusgraph-cache ${{ env.VERIFY_MAVEN_OPTS }} - - tests: - runs-on: ubuntu-20.04 - needs: build-all - strategy: - fail-fast: false - matrix: - include: - - module: driver - - module: server - - module: test - - module: inmemory - args: "-Dtest.skip.tp=false" - - module: berkeleyje - - module: lucene - steps: - - uses: actions/checkout@v2 - - uses: actions/cache@v2 - with: - path: ~/.m2/repository - key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} - restore-keys: | - ${{ runner.os }}-maven- - - uses: actions/setup-java@v1 - with: - java-version: 11 - - run: mvn clean install --projects janusgraph-${{ matrix.module }} -Pjava-11 ${{ env.BUILD_MAVEN_OPTS }} ${{ matrix.install-args }} - - run: mvn verify --projects janusgraph-${{ matrix.module }} -Pjava-11 ${{ env.VERIFY_MAVEN_OPTS }} ${{ matrix.args }} - - uses: codecov/codecov-action@v1 diff --git a/.github/workflows/ci-mapped.yml b/.github/workflows/ci-mapped.yml new file mode 100644 index 0000000000..891ad8fe61 --- /dev/null +++ b/.github/workflows/ci-mapped.yml @@ -0,0 +1,43 @@ +# Copyright 2021 JanusGraph Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: Publish package to GitHub Packages +on: + release: + types: [created] +jobs: + publish: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-java@v2 + with: + java-version: '8' + distribution: 'adopt' + + - name: Setup unique version + run: mvn versions:set -DnewVersion=$(date +%s) -DgenerateBackupPoms=false + + - name: Compile and prepare release + run: mvn clean install -Pjanusgraph-release -Dgpg.skip=true -DskipTests=true + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Deploy release + run: mvn deploy -Pjanusgraph-release -Dgpg.skip=true -DskipTests=true + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/ci-release.yml b/.github/workflows/ci-release.yml index d914c09696..1e284a1d76 100644 --- a/.github/workflows/ci-release.yml +++ b/.github/workflows/ci-release.yml @@ -86,6 +86,23 @@ jobs: name: distribution-builds path: janusgraph-dist/target/janusgraph-*.zip + benchmark: + runs-on: ubuntu-20.04 + needs: build-all + steps: + - uses: actions/checkout@v2 + - uses: actions/cache@v2 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven- + - uses: actions/setup-java@v1 + with: + java-version: 1.8 + - run: mvn clean install -Pjanusgraph-benchmark ${{ env.BUILD_MAVEN_OPTS }} -Dgpg.skip=true + - run: mvn verify --projects janusgraph-benchmark + tp-tests: runs-on: ubuntu-20.04 if: "github.event_name == 'push' && contains(github.event.head_commit.message, '[tp-tests]') || github.event_name == 'pull_request' && contains(github.event.pull_request.title, '[tp-tests]') || github.event_name == 'schedule'" @@ -96,17 +113,39 @@ jobs: include: - module: inmemory args: "-Dtest.skip.tp=false -DskipTests=true" + java: 8 - module: berkeleyje args: "-Dtest.skip.tp=false -DskipTests=true" + java: 8 - module: cql args: "-Pcassandra3-byteordered -Dtest.skip.tp=false -DskipTests=true" + java: 8 - module: cql args: "-Pcassandra3-murmur -Dtest.skip.tp=false -DskipTests=true" + java: 8 - module: hbase args: "-Dtest.skip.tp=false -DskipTests=true" + java: 8 + - module: inmemory + install-args: "-Pjava-11" + args: "-Dtest.skip.tp=false -DskipTests=true" + java: 11 + - module: berkeleyje + install-args: "-Pjava-11" + args: "-Dtest.skip.tp=false -DskipTests=true" + java: 11 + - module: cql + install-args: "-Pjava-11" + args: "-Pcassandra3-byteordered -Dtest.skip.tp=false -DskipTests=true" + java: 11 + - module: cql + install-args: "-Pjava-11" + args: "-Pcassandra3-murmur -Dtest.skip.tp=false -DskipTests=true" + java: 11 - module: hbase - install-args: "-Dhbase.profile -Phbase2" + install-args: "-Pjava-11" args: "-Dtest.skip.tp=false -DskipTests=true" + java: 11 steps: - uses: actions/checkout@v2 - uses: actions/cache@v2 @@ -117,6 +156,6 @@ jobs: ${{ runner.os }}-maven- - uses: actions/setup-java@v1 with: - java-version: 1.8 + java-version: ${{ matrix.java }} - run: mvn clean install --projects janusgraph-${{ matrix.module }} ${{ env.BUILD_MAVEN_OPTS }} ${{ matrix.install-args }} - - run: mvn verify --projects janusgraph-${{ matrix.module }} ${{ matrix.args }} + - run: mvn verify --projects janusgraph-${{ matrix.module }} ${{ matrix.install-args }} ${{ matrix.args }} diff --git a/.github/workflows/lint-proto.yml b/.github/workflows/lint-proto.yml index e01009eb3e..74439c26a1 100644 --- a/.github/workflows/lint-proto.yml +++ b/.github/workflows/lint-proto.yml @@ -26,11 +26,11 @@ on: jobs: lint-proto: - name: buf check lint + name: buf lint runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: ory/build-buf-action@v0 with: - bufVersion: v0.31.1 - bufArgs: check lint --config janusgraph-grpc/buf.yaml ./janusgraph-grpc/src/main/proto/ + bufVersion: v0.56.0 + bufArgs: lint --config janusgraph-grpc/buf.yaml ./janusgraph-grpc/src/main/proto/ diff --git a/.gitignore b/.gitignore index feca7d6c35..836c90439a 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,6 @@ target/ .vscode/ /_bsp/ /output/ -/docker/*.zip # When executing tests in alphabetical order, Maven generates temporary # files with names like this: # @@ -36,12 +35,6 @@ target/ # Backup files created by `mvn versions:update-child-modules` # http://mojo.codehaus.org/versions-maven-plugin/update-child-modules-mojo.html janusgraph-*/pom.xml.versionsBackup -# JUnitBenchmarks output -/janusgraph-*/jub.*.xml -# .deb/.rpm packaging folders -/debian/ -/redhat/ -/pkgcommon/ # Emacs file backups *~ @@ -50,12 +43,7 @@ janusgraph-*/pom.xml.versionsBackup # Vim file backups *.swp -# Coverity tools and analysis outputs -coverity_tool.tar.gz -cov-analysis-linux64/ -cov-int.tar.gz -cov-int/ - /site/ *.hprof +.factorypath \ No newline at end of file diff --git a/BUILDING.md b/BUILDING.md index 6c27bdf231..4b78a82581 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -39,47 +39,7 @@ For more details information, please see [here](janusgraph-dist/README.md#buildi ## Building Docker Image for JanusGraph Server -In order to build Docker image for JanusGraph Server, a -distribution archive is needed. If you wish to build an image from source -refer to `To build the distribution archive` section to build the distribution -archive first. You can also use an [official release](https://github.com/JanusGraph/janusgraph/releases) to avoid building. -To do so check out the release tag you wish to build, example: `git checkout v0.2.0`. Then create target -directory that houses the distribution zip with `mkdir janusgraph-dist/target`. -The [downloaded release](https://github.com/JanusGraph/janusgraph/releases) -is then placed in the recently created target directory. Note that if the -tag is not found you can run `git fetch --all --tags --prune` and then rerun the checkout command. - -Once the distribution is in place use the following command -to build and run Docker images with JanusGraph Server, configured -to run the BerkeleyJE backend and Elasticsearch (requires [Docker Compose](https://docs.docker.com/compose/)): - -```bash -mvn docker:build -Pjanusgraph-docker -pl janusgraph-dist -docker-compose -f janusgraph-dist/docker-compose.yml up -``` - -If you are building the Docker image behind a proxy please set an environment variable for either http_proxy or https_proxy accordingly. - -Note the above `docker-compose` call launches containers in the foreground and is convenient for monitoring logs but add "-d" to instead run in the background. - -To connect to the server in the same container on the console: - -```bash -docker exec -i -t janusgraph /var/janusgraph/bin/gremlin.sh -``` - -Then you can interact with the graph on the console through the `:remote` interface: - -```groovy -gremlin> :remote connect tinkerpop.server conf/remote.yaml -==>Configured localhost/127.0.0.1:8182 -gremlin> :remote console -==>All scripts will now be sent to Gremlin Server - [localhost/127.0.0.1:8182] - type ':remote console' to return to local mode -gremlin> GraphOfTheGodsFactory.load(graph) -==>null -gremlin> g = graph.traversal() -==>graphtraversalsource[standardjanusgraph[berkeleyje:db/berkeley], standard] -``` +We moved the docker build into an external repo: https://github.com/JanusGraph/janusgraph-docker. ## Building on Eclipse IDE Note that this has only been tested on Eclipse Neon.2 Release (4.6.2) with m2e (1.7.0.20160603-1933) and m2e-wtp (1.3.1.20160831-1005) plugin. diff --git a/NOTICE.txt b/NOTICE.txt index 85ef772f74..99b1ffcf7e 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -36,7 +36,6 @@ It also includes software from other open source projects including, but not lim * HPPC [https://labs.carrotsearch.com/hppc.html] * JUnit [https://www.junit.org/] * Jackson [https://github.com/FasterXML/jackson] - * Junit Benchmark [https://labs.carrotsearch.com/junit-benchmarks.html] * Kryo [https://github.com/EsotericSoftware/kryo] * Metrics [https://metrics.dropwizard.io] * Mockito [https://site.mockito.org/] diff --git a/README.md b/README.md index b108b9ba86..e7b885b05d 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,6 @@ can support thousands of concurrent users, complex traversals, and analytic grap [![Javadoc][javadoc-shield]][javadoc-link] [![GitHub Workflow Status][actions-shield]][actions-link] [![Codecov][codecov-shield]][codecov-link] -[![Coverity Scan][coverity-shield]][coverity-link] [![Mentioned in Awesome Bigtable][awesome-shield]][awesome-link] [![CII Best Practices][bestpractices-shield]][bestpractices-link] [![Codacy Badge][codacy-shield]][codacy-link] @@ -31,8 +30,6 @@ can support thousands of concurrent users, complex traversals, and analytic grap [downloads-link]: https://github.com/JanusGraph/janusgraph/releases [codecov-shield]:https://codecov.io/gh/JanusGraph/janusgraph/branch/master/graph/badge.svg [codecov-link]:https://codecov.io/gh/JanusGraph/janusgraph -[coverity-shield]: https://img.shields.io/coverity/scan/janusgraph-janusgraph.svg -[coverity-link]: https://scan.coverity.com/projects/janusgraph-janusgraph ## Learn More @@ -51,6 +48,7 @@ tools: * [Graphexp](https://github.com/bricaud/graphexp) * [Graph Explorer](https://github.com/invanalabs/graph-explorer) * [Gremlin-Visualizer](https://github.com/prabushitha/gremlin-visualizer) +* [G.V() - Gremlin IDE](https://gdotv.com) * [KeyLines by Cambridge Intelligence](https://cambridge-intelligence.com/keylines/janusgraph/) * [Linkurious](https://doc.linkurio.us/ogma/latest/tutorials/janusgraph/) * [ReGraph by Cambridge Intelligence](https://cambridge-intelligence.com/regraph/) diff --git a/TESTING.md b/TESTING.md index e468433285..25abc1baaf 100644 --- a/TESTING.md +++ b/TESTING.md @@ -24,7 +24,7 @@ All of JanusGraph's tests are written for JUnit. JanusGraph's JUnit tests are a | Category Name | Maven Property | Default | Comment | | ------------- | ------------------- |:------------:| ------- | | MEMORY_TESTS | test.skip.mem | true (disabled) | Tests intended to exert memory pressure | -| PERFORMANCE_TESTS | test.skip.perf | true (disabled) | Tests written as simple speed tests using JUnitBenchmarks| +| PERFORMANCE_TESTS | test.skip.perf | true (disabled) | Tests written as simple speed tests| | (No tag) | test.skip.default | false (enabled) | Tests without any Tag annotations | **Tag Name** above is a Java interface defined in the package [org.janusgraph.testcategory](janusgraph-backend-testutils/src/main/java/org/janusgraph/TestCategory.java). These interfaces appear as arguments to the JUnit `@Tag(...)` annotation, e.g. `@Tag(TestCategory.MEMORY_TESTS)`. @@ -87,17 +87,16 @@ mvn clean install -pl janusgraph-solr ``` Additional Maven profiles are defined for testing against default versions of other supported major Solr releases. -(Currently, only Solr 7 and Solr 8 are supported.) +(Currently, only Solr 8 are supported.) ```bash -mvn clean install -pl janusgraph-solr -Psolr7 mvn clean install -pl janusgraph-solr -Psolr8 ``` Finally the `solr.docker.version` property can be used to test against arbitrary Solr versions. ```bash -mvn clean install -pl janusgraph-solr -Dsolr.docker.version=7.0.0 +mvn clean install -pl janusgraph-solr -Dsolr.docker.version=8.0.0 ``` ## Running Elasticsearch Tests diff --git a/analysis/Dockerfile b/analysis/Dockerfile deleted file mode 100644 index 4e431eb390..0000000000 --- a/analysis/Dockerfile +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2017 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM centos:7 - -RUN yum -y update && \ - yum install -y java-1.8.0-openjdk-devel maven git && \ - yum clean all - -WORKDIR /opt/janusgraph -CMD ["analysis/coverity-scan.sh"] diff --git a/analysis/coverity-scan.sh b/analysis/coverity-scan.sh deleted file mode 100755 index 34857d6b61..0000000000 --- a/analysis/coverity-scan.sh +++ /dev/null @@ -1,145 +0,0 @@ -#!/bin/bash -eu -# -# Copyright 2017 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -############################################################################## -# -# Usage (defaults to https://github.com/JanusGraph/janusgraph project): -# -# $ cd janusgraph -# $ env COVERITY_SCAN_TOKEN="..." COVERITY_EMAIL="..." \ -# analysis/coverity-scan.sh -# -# This script assumes that: -# -# * you are scanning a GitHub project -# * your project is a Java project using Maven -# * you have a `pom.xml` file at the root of your repo -# * you are running the build on a Linux 64-bit machine -# -# To use for another GitHub repository, run as follows: -# -# $ cd my-repo -# $ env COVERITY_SCAN_TOKEN="..." COVERITY_EMAIL="..." \ -# GITHUB_ORG="..." GITHUB_PROJECT="..." \ -# analysis/coverity-scan.sh -# -# Note: your Coverity project is assumed to be of the form: -# -# "${GITHUB_ORG}/${GITHUB_PROJECT}" -# -# If that is not the case, you can override it as well on the command line via -# the COVERITY_PROJECT env var. -# -# All other settings are either constants in this script or derived from the -# repository, the `pom.xml` file, or other sources. -# -############################################################################## - -declare -r COVERITY_TOOL_TAR_GZ="coverity_tool.tar.gz" -declare -r COVERITY_ANALYSIS_DIR="cov-analysis-linux64" - -# Required to be `cov-int` by Coverity docs: -# https://scan.coverity.com/download -declare -r COVERITY_BUILD_OUTPUT_DIR="cov-int" -declare -r COVERITY_TAR_GZ="cov-int.tar.gz" - -if [ -z "${COVERITY_SCAN_TOKEN:-}" ]; then - echo "Error: env var COVERITY_SCAN_TOKEN not specified; exiting." >&2 - exit 1 -elif [ -z "${COVERITY_EMAIL:-}" ]; then - echo "Error: env var COVERITY_EMAIL not specified; exiting." >&2 - exit 2 -fi - -# Get the version number from the `pom.xml` file rather than hardcoding it here; see -# https://stackoverflow.com/questions/41114695/get-pom-xml-version-with-xmllint -# for more info. Note that simpler, traditional XPath-based solutions such as -# https://stackoverflow.com/questions/15461737/how-to-execute-xpath-one-liners-from-shell -# don't work, because Maven's `pom.xml` files use namespaces. -declare -r PROJECT_VERSION="$(xmllint --xpath '/*[local-name()="project"]/*[local-name()="version"]/text()' pom.xml)" -declare -r COVERITY_VERSION="${PROJECT_VERSION}/$(date +'%Y-%m-%dT%H:%M:%S')" - -declare -r GIT_HASH="$(git rev-parse HEAD)" -declare -r COVERITY_DESCRIPTION="Automatic build upload via script; git hash: ${GIT_HASH}" - -declare -r GITHUB_ORG="${GITHUB_ORG:-JanusGraph}" -declare -r GITHUB_PROJECT="${GITHUB_PROJECT:-janusgraph}" -declare -r COVERITY_PROJECT="${GITHUB_ORG}%2F${GITHUB_PROJECT}" - -if ! [ -f "${COVERITY_TOOL_TAR_GZ}" ]; then - echo "Downloading Coverity analysis tool ..." - curl \ - -X POST --data "token=${COVERITY_SCAN_TOKEN}&project=${COVERITY_PROJECT}" \ - -o "${COVERITY_TOOL_TAR_GZ}" -s \ - https://scan.coverity.com/download/linux64 -else - echo "Coverity tool tarball already exists; skipping download." -fi - -if ! [ -d "${COVERITY_ANALYSIS_DIR}" ]; then - mkdir "${COVERITY_ANALYSIS_DIR}" - echo "Uncompressing archive ..." - # Sample file contents are of the form: "./cov-analysis-linux64-2017.07/bin/..." - tar zx --strip-components=2 --directory="${COVERITY_ANALYSIS_DIR}" -f "${COVERITY_TOOL_TAR_GZ}" -else - echo "Coverity tool dir already exists; skipping uncompress step." -fi - -if ! [ -d "${COVERITY_BUILD_OUTPUT_DIR}" ]; then - echo "Running Maven build with Coverity analysis ..." - # reconfigure Coverity to skip files that cause build errors - "${COVERITY_ANALYSIS_DIR}"/bin/cov-configure --delete-compiler-config template-javac-config-0 - "${COVERITY_ANALYSIS_DIR}"/bin/cov-configure --delete-compiler-config template-java-config-0 - "${COVERITY_ANALYSIS_DIR}"/bin/cov-configure --java \ - --xml-option=skip_file:AbstractVertex.java \ - --xml-option=skip_file:CacheVertex.java \ - --xml-option=skip_file:CacheVertexProperty.java \ - --xml-option=skip_file:EdgeLabelVertex.java \ - --xml-option=skip_file:GraphDatabaseConfiguration.java \ - --xml-option=skip_file:ImplicitKey.java \ - --xml-option=skip_file:JanusGraph.java \ - --xml-option=skip_file:JanusGraphSchemaVertex.java \ - --xml-option=skip_file:JanusGraphVertex.java \ - --xml-option=skip_file:PreloadedVertex.java \ - --xml-option=skip_file:PropertyKeyVertex.java \ - --xml-option=skip_file:RelationTypeVertex.java \ - --xml-option=skip_file:StandardJanusGraphTx.java \ - --xml-option=skip_file:StandardVertex.java \ - --xml-option=skip_file:StaticArrayBuffer.java \ - --xml-option=skip_file:VertexLabelVertex.java - # run Coverity build - "${COVERITY_ANALYSIS_DIR}"/bin/cov-build \ - --dir "${COVERITY_BUILD_OUTPUT_DIR}" \ - --java-cmd-line-buf-size 102400 \ - mvn -DskipTests=true -Dmaven.test.skip=true -Dmaven.javadoc.skip=true -B -V clean install -else - echo "Maven build already done; remove the '${COVERITY_BUILD_OUTPUT_DIR}' directory to re-run." -fi - -echo "Creating archive for uploading to Coverity ..." -tar czf "${COVERITY_TAR_GZ}" "${COVERITY_BUILD_OUTPUT_DIR}" - -echo "Uploading results to Coverity ..." -curl \ - --form token="${COVERITY_SCAN_TOKEN}" \ - --form email="${COVERITY_EMAIL}" \ - --form file="@${COVERITY_TAR_GZ}" \ - --form version="${COVERITY_VERSION}" \ - --form description="${COVERITY_DESCRIPTION}" \ - -s \ - "https://scan.coverity.com/builds?project=${COVERITY_PROJECT}" - -echo "Done." diff --git a/docs/changelog.md b/docs/changelog.md index 2349a87421..ec0609a6a4 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -28,6 +28,7 @@ All currently supported verions of JanusGraph are listed below. | ----- | ---- | ---- | ---- | ---- | ---- | ---- | --- | ---- | ---- | | 0.5.z | 2 | 2.1.z, 2.2.z, 3.0.z, 3.11.z | 1.2.z, 1.3.z, 1.4.z, 2.1.z | 1.3.0, 1.4.0, 1.5.z, 1.6.z, 1.7.z, 1.8.z, 1.9.z, 1.10.z, 1.11.z, 1.14.z | 6.y, 7.y | 7.y | 3.4.z | 2.2.z | 2.11.z | | 0.6.z | 2 | 3.0.z, 3.11.z | 1.6.z, 2.2.z | 1.3.0, 1.4.0, 1.5.z, 1.6.z, 1.7.z, 1.8.z, 1.9.z, 1.10.z, 1.11.z, 1.14.z | 6.y, 7.y | 7.y, 8.y | 3.5.z | 3.0.z | 2.12.z | +| 1.0.z | 2 | 3.0.z, 3.11.z | 1.6.z, 2.2.z | 1.3.0, 1.4.0, 1.5.z, 1.6.z, 1.7.z, 1.8.z, 1.9.z, 1.10.z, 1.11.z, 1.14.z | 6.y, 7.y | 8.y | 3.5.z | 3.0.z | 2.12.z | #### End-of-Life The versions of JanusGraph listed below are outdated and will no longer receive bugfixes. @@ -41,6 +42,120 @@ The versions of JanusGraph listed below are outdated and will no longer receive ## Release Notes +### Version 1.0.0 (Release Date: ???) + +```xml tab='Maven' + + org.janusgraph + janusgraph-core + 1.0.0 + +``` + +```groovy tab='Gradle' +compile "org.janusgraph:janusgraph-core:1.0.0" +``` + +**Tested Compatibility:** + +* Apache Cassandra 3.0.14, 3.11.10 +* Apache HBase 1.6.0, 2.2.7 +* Google Bigtable 1.3.0, 1.4.0, 1.5.0, 1.6.0, 1.7.0, 1.8.0, 1.9.0, 1.10.0, 1.11.0, 1.14.0 +* Oracle BerkeleyJE 7.5.11 +* Elasticsearch 6.0.1, 6.6.0, 7.14.0 +* Apache Lucene 8.11.1 +* Apache Solr 8.11.1 +* Apache TinkerPop 3.5.1 +* Java 8, 11 + +#### Changes + +For more information on features and bug fixes in 1.0.0, see the GitHub milestone: + +- + +#### Assets + +* [JavaDoc](https://javadoc.io/doc/org.janusgraph/janusgraph-core/1.0.0) +* [GitHub Release](https://github.com/JanusGraph/janusgraph/releases/tag/v1.0.0) +* [JanusGraph zip](https://github.com/JanusGraph/janusgraph/releases/download/v1.0.0/janusgraph-1.0.0.zip) +* [JanusGraph zip with embedded Cassandra and ElasticSearch](https://github.com/JanusGraph/janusgraph/releases/download/v1.0.0/janusgraph-full-1.0.0.zip) + +#### Upgrade Instructions + +##### Drop support for HBase 1 + +We are dropping support for HBase 1. + +##### Drop support for Solr 7 + +We are dropping support for Solr 7. + +##### Breaking change for Geoshape GraphBinary serialization + +Support for the [GraphBinary](http://tinkerpop.apache.org/docs/3.5.1/dev/io/#graphbinary) serialization format was +added in JanusGraph 0.6.0. This also included support to serialize Geoshapes via GraphBinary. The implementation of the +Geoshape serializer was unfortunately closely tied to the Java library `Spatial4j` that we are using to implement +Geoshapes in Java. This made it very complicated to add support for GraphBinary in other languages than Java. To make +it easier to support GraphBinary in non-Java environments like .NET, we have completely reimplemented the GraphBinary +serialization of Geoshapes in this version. + +This is a breaking change for users who have already adopted GraphBinary and who are using Geoshapes. It is necessary +to update JanusGraph Server and all (Java) clients that use GraphBinary at the same time since JanusGraph Server with +an older version will not be able to read a Geoshape created by a client that is already on version 1.0.0 and vice +versa. +Users who do not use GraphBinary yet or who are not using Geoshapes are not affected by this change. + +##### Removal of deprecated classes/methods/functionalities + +###### Methods + +* JanusGraphIndexQuery.vertices replaced by JanusGraphIndexQuery.vertexStream +* JanusGraphIndexQuery.edges replaced by JanusGraphIndexQuery.edgeStream +* JanusGraphIndexQuery.properties replaced by JanusGraphIndexQuery.propertyStream +* IndexQueryBuilder.vertices replaced by IndexQueryBuilder.vertexStream +* IndexQueryBuilder.edges replaced by IndexQueryBuilder.edgeStream +* IndexQueryBuilder.properties replaced by IndexQueryBuilder.propertyStream +* IndexTransaction.query replaced by IndexTransaction.queryStream + +###### Classes/Interfaces + +* EdgeLabelDefinition class +* PropertyKeyDefinition class +* RelationTypeDefinition class +* SchemaContainer class +* SchemaElementDefinition class +* SchemaProvider interface +* VertexLabelDefinition class +* JanusGraphId class +* AllEdgesIterable class +* AllEdgesIterator class +* ConcurrentLRUCache class +* PriorityQueue class +* RemovableRelationIterable class +* RemovableRelationIterator class +* ImmutableConfiguration class + +##### Remove support for old serialization format of JanusGraph predicates + +We are dropping support for old serialization format of JanusGraph predicates. The old predicates serialization format is only used by client older than 0.6. +The change only affects Gryo and GraphSON. + +##### Upgrade of log4j to version 2 + +This change requires a new log4j configuration. You can find an example configuration in `conf/log4j2-server.xml`. As a result of the changed configuration format, +we clean up all configurations. This could lead to unexpected new log lines. Please open an issue, if you see any unwanted log line. + +!!! note + Log4j is only used for standalone server deployments and JanusGraph testing. + +##### Add support for Java 11 + +JanusGraph now officially supports Java 11 in addition to Java 8. We encourage everyone to update to Java 11. + +!!! note + The distribution zip archives are however still built with Java 8 since the full distribution includes Cassandra which will only support Java 11 in version 4. + ### Version 0.6.1 (Release Date: January 18, 2022) ```xml tab='Maven' @@ -124,7 +239,7 @@ compile "org.janusgraph:janusgraph-core:0.6.0" **Tested Compatibility:** * Apache Cassandra 3.0.14, 3.11.10 -* Apache HBase 1.6.0, 2.2.7 +* Apache HBase 2.2.7 * Google Bigtable 1.3.0, 1.4.0, 1.5.0, 1.6.0, 1.7.0, 1.8.0, 1.9.0, 1.10.0, 1.11.0, 1.14.0 * Oracle BerkeleyJE 7.5.11 * Elasticsearch 6.0.1, 6.6.0, 7.14.0 @@ -157,7 +272,7 @@ for more details. ##### Breaking change for Configuration objects Prior to JanusGraph 0.6.0, `Configuration` objects were from the Apache `commons-configuration` library. -To comply with the [TinkerPop change](http://tinkerpop.apache.org/docs/3.5.0-SNAPSHOT/upgrade/#_versions_and_dependencies), +To comply with the [TinkerPop change](http://tinkerpop.apache.org/docs/3.5.0/upgrade/#_versions_and_dependencies), JanusGraph now uses the `commons-configuration2` library. A typical usage of configuration object is to create configuration using `ConfigurationGraphFactory`. Now you would need to use the new configuration2 library. Please refer to the @@ -255,6 +370,10 @@ after the keyword `serializers`. This will add the support on the server site. The java driver is the only driver that currently supports GraphBinary, see [Connecting to JanusGraph using Java](interactions/connecting/java.md). +!!! note + Version 1.0.0 adds a breaking change to GraphBinary for Geoshape serialization, + see [the 1.0.0 changelog for more information](#breaking-change-for-geoshape-graphbinary-serialization). + ##### New index selection algorithm In version 0.6.0, the index selection algorithm has changed. If the number of possible indexes for a query is small enough, the new algorithm will perform an exhaustive search @@ -394,7 +513,6 @@ distance, where previously they used the backend's default max distance of 2: | surprises | surpprises | true | true | | surprises | surpprisess | false | false | - ### Version 0.5.3 (Release Date: December 24, 2020) === "Maven" diff --git a/docs/configs/janusgraph-cfg.md b/docs/configs/janusgraph-cfg.md index ab28f48a8b..40fe11bc9d 100644 --- a/docs/configs/janusgraph-cfg.md +++ b/docs/configs/janusgraph-cfg.md @@ -19,9 +19,9 @@ Configuration options that modify JanusGraph's caching behavior | Name | Description | Datatype | Default Value | Mutability | | ---- | ---- | ---- | ---- | ---- | | cache.db-cache | Whether to enable JanusGraph's database-level cache, which is shared across all transactions. Enabling this option speeds up traversals by holding hot graph elements in memory, but also increases the likelihood of reading stale data. Disabling it forces each transaction to independently fetch graph elements from storage before reading/writing them. | Boolean | false | MASKABLE | -| cache.db-cache-clean-wait | How long, in milliseconds, database-level cache will keep entries after flushing them. This option is only useful on distributed storage backends that are capable of acknowledging writes without necessarily making them immediately visible. | Integer | 50 | GLOBAL_OFFLINE | +| cache.db-cache-clean-wait | How long, in milliseconds, database-level cache will keep entries after flushing them. This option is only useful on distributed storage backends that are capable of acknowledging writes without necessarily making them immediately visible. | Integer | 50 | MASKABLE | | cache.db-cache-size | Size of JanusGraph's database level cache. Values between 0 and 1 are interpreted as a percentage of VM heap, while larger values are interpreted as an absolute size in bytes. | Double | 0.3 | MASKABLE | -| cache.db-cache-time | Default expiration time, in milliseconds, for entries in the database-level cache. Entries are evicted when they reach this age even if the cache has room to spare. Set to 0 to disable expiration (cache entries live forever or until memory pressure triggers eviction when set to 0). | Long | 10000 | GLOBAL_OFFLINE | +| cache.db-cache-time | Default expiration time, in milliseconds, for entries in the database-level cache. Entries are evicted when they reach this age even if the cache has room to spare. Set to 0 to disable expiration (cache entries live forever or until memory pressure triggers eviction when set to 0). | Long | 10000 | MASKABLE | | cache.tx-cache-size | Maximum size of the transaction-level cache of recently-used vertices. | Integer | 20000 | MASKABLE | | cache.tx-dirty-size | Initial size of the transaction-level cache of uncommitted dirty vertices. This is a performance hint for write-heavy, performance-sensitive transactional workloads. If set, it should roughly match the median vertices modified per transaction. | Integer | (no default value) | MASKABLE | @@ -514,7 +514,6 @@ HBase storage options | Name | Description | Datatype | Default Value | Mutability | | ---- | ---- | ---- | ---- | ---- | -| storage.hbase.compat-class | The package and class name of the HBaseCompat implementation. HBaseCompat masks version-specific HBase API differences. When this option is unset, JanusGraph calls HBase's VersionInfo.getVersion() and loads the matching compat class at runtime. Setting this option forces JanusGraph to instead reflectively load and instantiate the specified class. | String | (no default value) | MASKABLE | | storage.hbase.compression-algorithm | An HBase Compression.Algorithm enum string which will be applied to newly created column families. The compression algorithm must be installed and available on the HBase cluster. JanusGraph cannot install and configure new compression algorithms on the HBase cluster by itself. | String | GZ | MASKABLE | | storage.hbase.region-count | The number of initial regions set when creating JanusGraph's HBase table | Integer | (no default value) | MASKABLE | | storage.hbase.regions-per-server | The number of regions per regionserver to set when creating JanusGraph's HBase table | Integer | (no default value) | MASKABLE | diff --git a/docs/index-backend/direct-index-query.md b/docs/index-backend/direct-index-query.md index 5be6582dc0..9790a8f891 100644 --- a/docs/index-backend/direct-index-query.md +++ b/docs/index-backend/direct-index-query.md @@ -30,9 +30,9 @@ The builder allows configuration of the maximum number of elements to be returned via its `limit(int)` method. The builder’s `offset(int)` controls number of initial matches in the result set to skip. To retrieve all vertex or edges matching the given query in the specified -indexing backend, invoke `vertices()` or `edges()`, respectively. It is +indexing backend, invoke `vertexStream()` or `edgeStream()`, respectively. It is not possible to query for both vertices and edges at the same time. -These methods return an `Iterable` over `Result` objects. A result +These methods return a `Stream` over `Result` objects. A result object contains the matched handle, retrievable via `getElement()`, and the associated score - `getScore()`. @@ -43,7 +43,7 @@ PropertyKey text = mgmt.makePropertyKey("text").dataType(String.class).make(); mgmt.buildIndex("vertexByText", Vertex.class).addKey(text).buildMixedIndex("search"); mgmt.commit(); // ... Load vertices ... -for (Result result : graph.indexQuery("vertexByText", "v.text:(farm uncle berry)").vertices()) { +for (Result result : graph.indexQuery("vertexByText", "v.text:(farm uncle berry)").vertexStream()) { System.out.println(result.getElement() + ": " + result.getScore()); } ``` @@ -66,8 +66,9 @@ Refer to the [Lucene documentation](http://lucene.apache.org/core/4_1_0/querypar or the [Elasticsearch documentation](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html) for more information. The query used in the example above follows the Lucene query syntax. - - graph.indexQuery("vertexByText", "v.text:(farm uncle berry)").vertices() +```groovy +graph.indexQuery("vertexByText", "v.text:(farm uncle berry)").vertexStream() +``` This query matches all vertices where the text contains any of the three words (grouped by parentheses) and score matches higher the more words @@ -98,7 +99,7 @@ graph.indexQuery("vertexByText", "v.text:(farm uncle berry)").vertexTotals() Names of property keys that contain non-alphanumeric characters must be placed in quotation marks to ensure that the query is parsed correctly. ```groovy -graph.indexQuery("vertexByText", "v.\"first_name\":john").vertices() +graph.indexQuery("vertexByText", "v.\"first_name\":john").vertexStream() ``` Some property key names may be transformed by the JanusGraph indexing @@ -125,14 +126,14 @@ value contains the same sequence of characters, this can cause a collision in the query string and parsing errors as in the following example: ```groovy -graph.indexQuery("vertexByText", "v.name:v.john").vertices() //DOES NOT WORK! +graph.indexQuery("vertexByText", "v.name:v.john").vertexStream() //DOES NOT WORK! ``` To avoid such identifier collisions, use the `setElementIdentifier` method to define a unique element identifier string that does not occur in any other parts of the query: ```groovy -graph.indexQuery("vertexByText", "$v$name:v.john").setElementIdentifier("$v$").vertices() +graph.indexQuery("vertexByText", "$v$name:v.john").setElementIdentifier("$v$").vertexStream() ``` ### Mixed Index Availability Delay diff --git a/docs/index-backend/solr.md b/docs/index-backend/solr.md index f91ad4a134..7032974e8c 100644 --- a/docs/index-backend/solr.md +++ b/docs/index-backend/solr.md @@ -466,7 +466,7 @@ dse cassandra -s -Ddse.solr.data.dir="$DSE_HOME"/dse-data/solr The previous command will write some startup information to the console and to the logfile path `log4j.appender.R.File` configured in -`$DSE_HOME/resources/cassandra/conf/log4j-server.properties`. +`$DSE_HOME/resources/cassandra/conf/log4j2-server.xml`. Once DSE with Cassandra and Solr has started normally, check the cluster health with `nodetool status`. A single-instance ring should show one diff --git a/docs/interactions/search-predicates.md b/docs/interactions/search-predicates.md index 3b93c441ec..4590ffe81e 100644 --- a/docs/interactions/search-predicates.md +++ b/docs/interactions/search-predicates.md @@ -139,7 +139,7 @@ Geoshape.geoshape(Geoshape.getGeometryCollectionBuilder() .add(Geoshape.getShapeFactory().pointXY(60.0, 60.0)) .add(Geoshape.getShapeFactory().lineString().pointXY(119.0, 60.0).pointXY(121.0, 60.0).build()) .add(Geoshape.getShapeFactory().polygon().pointXY(119.0, 59.0).pointXY(121.0, 59.0) - .pointXY(121.0, 61.0).pointXY(119.0, 61.0).pointXY(119.0, 59.0)).build()) + .pointXY(121.0, 61.0).pointXY(119.0, 61.0).pointXY(119.0, 59.0).build()).build()) ``` In addition, when importing a graph via GraphSON the geometry may be represented by GeoJSON: diff --git a/docs/operations/management.md b/docs/operations/management.md index e3264c4825..952132f970 100644 --- a/docs/operations/management.md +++ b/docs/operations/management.md @@ -1,15 +1,54 @@ # Management System -!!! warning - This site is massivly under development. +JanusGraph Management System provides methods to define, update, and inspect the schema of +a JanusGraph graph, and more. Checkout the +JanusGraph Management [API documentation](https://javadoc.io/doc/org.janusgraph/janusgraph-core/{{ latest_version }}/org/janusgraph/core/schema/JanusGraphManagement.html) +for all core APIs available. -This section will describe how to interact with the ManagementSystem. -Therefore, if you want interact with the ManagementSystem checkout the -JanusGraph [API documentation](https://javadoc.io/doc/org.janusgraph/janusgraph-core/{{ latest_version }}) which shows all core APIs exposed by JanusGraph. - -You can checkout the schema and index management pages for some examples, see [schema management](../schema/index.md) and [index management](../schema/index-management/index-lifecycle.md). +JanusGraph Management System behaves like a transaction in that it opens a transactional scope. As such, it needs to +be closed via its commit or rollback methods, unless otherwise specified. +```groovy +mgmt = graph.openManagement() +// do something +mgmt.commit() +``` !!! note - We **strongly** encourage all users of JanusGraph to use the Gremlin query - language for any queries executed on JanusGraph and to not use - JanusGraph’s APIs outside of the management system. + We **strongly** encourage all users of JanusGraph not to use JanusGraph’s APIs outside of the management system, + and always use standard Gremlin query language for any queries. + +## Schema Management + +Management System allows you to view, update, and create vertex labels, edge labels, and property keys. +See [schema management](../schema/index.md) for details. + +## Index Management + +Management System allows you to manage both vertex-centric indexes and graph indexes. See +[index management](../schema/index-management/index-performance.md) for details. + +## Consistency Management + +Management System allows you to set the consistency level of individual schema elements. See +[Eventual Consistency](../advanced-topics/eventual-consistency.md) for details. + +## Ghost Vertex Removal + +Management System allows you to purge [ghost vertices](../advanced-topics/eventual-consistency.md#ghost-vertices) +in the graph. It uses a local thread pool to initiate multiple threads to scan your entire graph, detecting and +purging ghost vertices as well as their incident edges, leveraging +[GhostVertexRemover](https://javadoc.io/doc/org.janusgraph/janusgraph-core/{{ latest_version }}/org/janusgraph/graphdb/olap/job/GhostVertexRemover.html) +By default, the concurrency level is the number of available +processors on your machine. You can also configure the number of threads as shown in the example below. If your graph +is huge, you could consider running GhostVertexRemover on a MapReduce cluster. +```groovy +mgmt = graph.openManagement() +// by default, concurrency level = the number of available processors +mgmt.removeGhostVertices().get() +// alternatively, you could also configure the concurrency +mgmt.removeGhostVertices(4).get() +// it is not necessary to commit here, since GhostVertexRemover commits +// periodically and automatically, but it is a good habit to do so +// calling rollback() won't really rollback the ghost vertex removal process +mgmt.commit() +``` diff --git a/docs/schema/index-management/index-reindexing.md b/docs/schema/index-management/index-reindexing.md index 938f936f6b..8d3293874e 100644 --- a/docs/schema/index-management/index-reindexing.md +++ b/docs/schema/index-management/index-reindexing.md @@ -149,10 +149,10 @@ graph = JanusGraphFactory.open("conf/janusgraph-cql-es.properties") g.V().has("desc", containsText("baz")) ``` -## Executing a Reindex job on JanusGraphManagement +## Executing a Reindex job on ManagementSystem -To run a reindex job on JanusGraphManagement, invoke -`JanusGraphManagement.updateIndex` with the `SchemaAction.REINDEX` +To run a reindex job on ManagementSystem, invoke +`ManagementSystem.updateIndex` with the `SchemaAction.REINDEX` argument. For example: ```groovy m = graph.openManagement() @@ -161,11 +161,21 @@ m.updateIndex(i, SchemaAction.REINDEX).get() m.commit() ``` -### Example for JanusGraphManagement +ManagementSystem uses a local thread pool to run reindexing +jobs concurrently. By default, the concurrency level equals +the number of available processors. If you want to change the +concurrency level, you can add a parameter like this: +```groovy +// only use one thread to run reindexing +m.updateIndex(i, SchemaAction.REINDEX, 1).get() +``` + + +### Example for ManagementSystem The following loads some sample data into a BerkeleyDB-backed JanusGraph database, defines an index after the fact, reindexes using -JanusGraphManagement, and finally enables and uses the index: +ManagementSystem, and finally enables and uses the index: ```groovy import org.janusgraph.graphdb.database.management.ManagementSystem diff --git a/docs/schema/index-management/index-removal.md b/docs/schema/index-management/index-removal.md index 08e036d0ea..1abc0b5abc 100644 --- a/docs/schema/index-management/index-removal.md +++ b/docs/schema/index-management/index-removal.md @@ -50,10 +50,10 @@ After a composite index is `DISABLED`, there is a choice between two execution frameworks for its removal: - MapReduce -- JanusGraphManagement +- ManagementSystem Index removal on MapReduce supports large, horizontally-distributed -databases. Index removal on JanusGraphManagement spawns a single-machine +databases. Index removal on ManagementSystem spawns a single-machine OLAP job. This is intended for convenience and speed on those databases small enough to be handled by one machine. @@ -124,10 +124,10 @@ m.rollback() g.V().has('name', 'jupiter') ``` -## Executing an Index Removal job on JanusGraphManagement +## Executing an Index Removal job on ManagementSystem -To run an index removal job on JanusGraphManagement, invoke -`JanusGraphManagement.updateIndex` with the `SchemaAction.REMOVE_INDEX` +To run an index removal job on ManagementSystem, invoke +`ManagementSystem.updateIndex` with the `SchemaAction.REMOVE_INDEX` argument. For example: ```groovy m = graph.openManagement() @@ -136,11 +136,20 @@ m.updateIndex(i, SchemaAction.REMOVE_INDEX).get() m.commit() ``` -### Example for JanusGraphManagement +Similar to reindex, ManagementSystem uses a local thread pool to +execute index removal job concurrently. The concurrency level is +equal to the number of available processors. If you want to change the +default concurrency level, you can add a parameter as follows: +```groovy +// Use only one thread to execute index removal job +m.updateIndex(i, SchemaAction.REMOVE_INDEX, 1).get() +``` + +### Example for ManagementSystem The following loads some indexed sample data into a BerkeleyDB-backed JanusGraph database, then disables and removes the index through -JanusGraphManagement: +ManagementSystem: ```groovy import org.janusgraph.graphdb.database.management.ManagementSystem diff --git a/docs/storage-backend/cassandra.md b/docs/storage-backend/cassandra.md index 0609970c49..c44a7ed0aa 100644 --- a/docs/storage-backend/cassandra.md +++ b/docs/storage-backend/cassandra.md @@ -40,8 +40,7 @@ Cassandra communicate with one another via a `localhost` socket. Running JanusGraph over Cassandra requires the following setup steps: 1. [Download Cassandra](http://cassandra.apache.org/download/), unpack - it, and set filesystem paths in `conf/cassandra.yaml` and - `conf/log4j-server.properties` + it, and set filesystem paths in `conf/cassandra.yaml`. 2. Connecting Gremlin Server to Cassandra using the default configuration files provided in the pre-packaged distribution. 3. Start Cassandra by invoking `bin/cassandra -f` on the command line diff --git a/janusgraph-all/pom.xml b/janusgraph-all/pom.xml index a7a12c2838..5669c1dfc0 100644 --- a/janusgraph-all/pom.xml +++ b/janusgraph-all/pom.xml @@ -3,7 +3,7 @@ org.janusgraph janusgraph - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT ../pom.xml janusgraph-all @@ -205,13 +205,16 @@ - org.slf4j - slf4j-log4j12 + org.apache.logging.log4j + log4j-slf4j-impl + runtime true ch.qos.logback logback-classic + runtime + true @@ -253,64 +256,5 @@ - - hbase1 - - - org.apache.hbase - hbase-shaded-client - ${hbase1.version} - - - junit - junit - - - - - org.apache.hbase - hbase-shaded-server - ${hbase1.version} - - - junit - junit - - - - - - - hbase2 - - - !hbase.profile - - - - - org.apache.hbase - hbase-shaded-client - ${hbase2.version} - - - junit - junit - - - - - org.apache.hbase - hbase-shaded-mapreduce - ${hbase2.version} - - - junit - junit - - - - - diff --git a/janusgraph-all/src/test/resources/log4j.properties b/janusgraph-all/src/test/resources/log4j.properties deleted file mode 100644 index d5467091fe..0000000000 --- a/janusgraph-all/src/test/resources/log4j.properties +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A1 is set to be a FileAppender. -#log4j.appender.A1=org.apache.log4j.ConsoleAppender -log4j.appender.A1=org.apache.log4j.FileAppender -log4j.appender.A1.File=target/test.log - -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n - -# Set root logger level to the designated level and its only appender to A1. -log4j.rootLogger=DEBUG, A1 diff --git a/janusgraph-all/src/test/resources/log4j2-test.xml b/janusgraph-all/src/test/resources/log4j2-test.xml new file mode 100644 index 0000000000..8f0c992a9f --- /dev/null +++ b/janusgraph-all/src/test/resources/log4j2-test.xml @@ -0,0 +1,15 @@ + + + + + + %d{HH:mm:ss} %-5level %class.%method{36} - %msg%n + + + + + + + + + \ No newline at end of file diff --git a/janusgraph-backend-testutils/pom.xml b/janusgraph-backend-testutils/pom.xml index 971e5e49fd..33530d7513 100644 --- a/janusgraph-backend-testutils/pom.xml +++ b/janusgraph-backend-testutils/pom.xml @@ -4,7 +4,7 @@ org.janusgraph janusgraph - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT ../pom.xml janusgraph-backend-testutils @@ -15,6 +15,17 @@ ${basedir}/.. + + + org.apache.logging.log4j + log4j-slf4j-impl + + + org.apache.logging.log4j + log4j-core + + + org.janusgraph janusgraph-core @@ -48,14 +59,6 @@ org.junit.vintage junit-vintage-engine - - com.carrotsearch - junit-benchmarks - - - com.carrotsearch.randomizedtesting - randomizedtesting-runner - org.mockito mockito-core @@ -76,11 +79,6 @@ io.github.artsok rerunner-jupiter - - ch.qos.logback - logback-classic - true - @@ -90,29 +88,6 @@ - - - org.codehaus.gmavenplus - gmavenplus-plugin - ${gmavenplus.version} - - - - addSources - addTestSources - generateStubs - compile - generateTestStubs - compileTests - removeStubs - removeTestStubs - - - - - true - - maven-surefire-plugin diff --git a/janusgraph-backend-testutils/src/main/groovy/org/janusgraph/graphdb/GroovySpeedTestSupport.groovy b/janusgraph-backend-testutils/src/main/groovy/org/janusgraph/graphdb/GroovySpeedTestSupport.groovy deleted file mode 100644 index 48c4492493..0000000000 --- a/janusgraph-backend-testutils/src/main/groovy/org/janusgraph/graphdb/GroovySpeedTestSupport.groovy +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2019 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.graphdb - -import org.janusgraph.diskstorage.configuration.WriteConfiguration -import org.janusgraph.graphdb.query.QueryUtil -import org.apache.tinkerpop.gremlin.util.Gremlin -import org.junit.jupiter.api.BeforeEach -import org.junit.jupiter.api.AfterEach - -import org.slf4j.Logger -import org.slf4j.LoggerFactory - -import com.google.common.base.Preconditions -import org.janusgraph.core.JanusGraphVertex -import org.janusgraph.core.JanusGraph -import org.janusgraph.graphdb.database.StandardJanusGraph -import org.janusgraph.diskstorage.BackendException - -import com.google.common.collect.Iterables - -import java.util.zip.GZIPInputStream - -abstract class GroovySpeedTestSupport { - - private static final Logger log = LoggerFactory.getLogger(GroovySpeedTestSupport) - - // Graph generation settings - public static final int VERTEX_COUNT = SpeedTestSchema.VERTEX_COUNT - public static final int EDGE_COUNT = SpeedTestSchema.EDGE_COUNT - - // Query execution setting defaults - public static final int DEFAULT_TX_COUNT = 3 - public static final int DEFAULT_VERTICES_PER_TX = 100 - public static final int DEFAULT_ITERATIONS = DEFAULT_TX_COUNT * DEFAULT_VERTICES_PER_TX - - public static final String RELATION_FILE = "../janusgraph-test/data/v10k.graphml.gz" - - // Mutable state - - /* JUnit constructs a new test class instance before executing each test method. - * Ergo, each test method gets its own Random instance. - * The seed is arbitrary and carries no special significance, - * but we keep the see fixed for repeatability. - */ - protected Random random = new Random(7) - protected SpeedTestSchema schema - protected JanusGraph graph - protected WriteConfiguration conf - - static { - Gremlin.load() - } - - GroovySpeedTestSupport(WriteConfiguration conf) throws BackendException { - this.conf = conf - } - - @BeforeEach - void open() { -// Preconditions.checkArgument(TX_COUNT * DEFAULT_OPS_PER_TX <= VERTEX_COUNT); - - if (null == graph) { - try { - graph = getGraph() - } catch (BackendException e) { - throw new RuntimeException(e) - } - } - if (null == schema) { - schema = getSchema() - } - } - - @AfterEach - void rollback() { - if (null != graph) - graph.rollback() - } - - void close() { - if (null != graph) - graph.shutdown() - } - - protected abstract StandardJanusGraph getGraph() throws BackendException - - protected abstract SpeedTestSchema getSchema() - - /* - * Helper methods - */ - - protected void sequentialUidTask(int verticesPerTx = DEFAULT_VERTICES_PER_TX, closure) { - chunkedSequentialUidTask(1, verticesPerTx, { tx, vbuf, vloaded -> - assert 1 == vloaded - assert 1 == vbuf.length - def v = vbuf[0] - closure.call(tx, v) - }) - } - - protected void chunkedSequentialUidTask(int chunksize = DEFAULT_VERTICES_PER_TX, int verticesPerTx = DEFAULT_VERTICES_PER_TX, closure) { - - /* - * Need this condition because of how we handle transactions and buffer - * Vertex objects. If this divisibility constraint were violated, then - * we would end up passing Vertex instances from one or more committed - * transactions as if those instances were not stale. - */ - Preconditions.checkArgument(0 == verticesPerTx % chunksize) - - long count = DEFAULT_TX_COUNT * verticesPerTx - long offset = Math.abs(random.nextLong()) % schema.getMaxUid() - def uids = new SequentialLongIterator(count, schema.getMaxUid(), offset) - def tx = graph.newTransaction() - JanusGraphVertex[] vbuf = new JanusGraphVertex[chunksize] - int vloaded = 0 - - while (uids.hasNext()) { - long u = uids.next() - JanusGraphVertex v = Iterables.getOnlyElement(QueryUtil.getVertices(tx,Schema.UID_PROP, u)) - assertNotNull(v) - vbuf[vloaded++] = v - if (vloaded == chunksize) { - closure.call(tx, vbuf, vloaded) - vloaded = 0 - tx.commit() - tx = graph.newTransaction() - } - } - - if (0 < vloaded) { - closure.call(tx, vbuf, vloaded) - tx.commit() - } else { - tx.rollback() - } - } - - protected void supernodeTask(closure) { - long uid = schema.getSupernodeUid() - String label = schema.getSupernodeOutLabel() - assertNotNull(label) - String pkey = schema.getSortKeyForLabel(label) - assertNotNull(pkey) - - def tx = graph.newTransaction() - def v = Iterables.getOnlyElement(QueryUtil.getVertices(tx,Schema.UID_PROP, uid)) -// def v = graph.V(Schema.UID_PROP, uid).next() - assertNotNull(v) - closure(v, label, pkey) - tx.commit() - } - - protected void standardIndexEdgeTask(closure) { - final int keyCount = schema.getEdgePropKeys() - - def tx = graph.newTransaction() - int value = -1 - for (int p = 0; p < schema.getEdgePropKeys(); p++) { - for (int i = 0; i < 5; i++) { - if (++value >= schema.getMaxEdgePropVal()) - value = 0 - closure(tx, schema.getEdgePropertyName(p), value) - } - } - tx.commit() - } - - protected void standardIndexVertexTask(closure) { - final int keyCount = schema.getVertexPropKeys() - - def tx = graph.newTransaction() - int value = -1 - for (int p = 0; p < schema.getVertexPropKeys(); p++) { - for (int i = 0; i < 5; i++) { - if (++value >= schema.getMaxVertexPropVal()) { - value = 0 - } - closure(tx, schema.getVertexPropertyName(p), value) - } - - } - tx.commit() - } - - protected void initializeGraph(JanusGraph g) throws BackendException { - log.info("Initializing graph...") - long before = System.currentTimeMillis() - SpeedTestSchema schema = getSchema() - - try { - InputStream data = new GZIPInputStream(new FileInputStream(RELATION_FILE)) - schema.makeTypes(g) - GraphMLReader.inputGraph(g, data) - } catch (IOException e) { - throw new RuntimeException(e) - } - long after = System.currentTimeMillis() - long duration = after - before - if (15 * 1000 <= duration) { - log.warn("Initialized graph (" + duration + " ms).") - } else { - log.info("Initialized graph (" + duration + " ms).") - } - } -} diff --git a/janusgraph-backend-testutils/src/main/groovy/org/janusgraph/graphdb/JanusGraphSpeedTest.groovy b/janusgraph-backend-testutils/src/main/groovy/org/janusgraph/graphdb/JanusGraphSpeedTest.groovy deleted file mode 100644 index 9d44c0a00e..0000000000 --- a/janusgraph-backend-testutils/src/main/groovy/org/janusgraph/graphdb/JanusGraphSpeedTest.groovy +++ /dev/null @@ -1,382 +0,0 @@ -// Copyright 2019 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.graphdb - -import org.janusgraph.TestCategory -import org.janusgraph.diskstorage.configuration.WriteConfiguration -import org.janusgraph.graphdb.util.ElementHelper -import org.junit.jupiter.api.Tag - -import static org.junit.Assert.* - -import org.junit.FixMethodOrder -import org.junit.Rule -import org.junit.Test -import org.junit.rules.TestRule -import org.junit.runners.MethodSorters -import org.slf4j.Logger -import org.slf4j.LoggerFactory - -import com.carrotsearch.junitbenchmarks.BenchmarkOptions -import com.google.common.base.Preconditions -import com.google.common.collect.Iterables -import org.janusgraph.core.JanusGraphEdge -import org.janusgraph.core.JanusGraphTransaction -import org.janusgraph.core.JanusGraphVertex -import org.janusgraph.testutil.JUnitBenchmarkProvider -import org.janusgraph.diskstorage.BackendException - - -/** - * This class was formerly known as GroovySerialTest. - * Several issues and commitlogs refer to it that way. - * - */ -@FixMethodOrder(MethodSorters.NAME_ASCENDING) -@BenchmarkOptions(warmupRounds = 1, benchmarkRounds = 1) -@Tag(TestCategory.PERFORMANCE_TESTS) -abstract class JanusGraphSpeedTest extends GroovySpeedTestSupport { - - private static final Logger log = LoggerFactory.getLogger(JanusGraphSpeedTest) - - @Rule - public TestRule benchmark = JUnitBenchmarkProvider.get() - - JanusGraphSpeedTest(WriteConfiguration conf) throws BackendException { - super(conf) - } - - /* - * Summary of graph schema and data - * - * - * - 10k vertices - * - 50k edges - * - 3 edge labels - * - 10 out-unique property keys with standard index for edges - * - 20 out-unique property keys with standard index for vertices - * - one both-unique, standard-indexed vertex property keycalled "uid" - * - * The edges are all directed. Each (edge label, edge direction) pair - * forms a scale-free graph. There is one exception to this rule: after - * generating scale free distributions, the generator adds one more - * vertex with an outgoing edge to every other vertex in the graph. - * Its permanent ID is returned by schema.getSupernodeUid(). All its - * outgoing edges have the same label. The label is returned by - * schema.getSupernodeOutLabel(). - * - * The "uid" property uniquely identifies each vertex globally and - * exists to allow standard index lookups and iteration. The minimum - * value is 0 (the supernode). The maximum value is - * schema.getMaxUid() - 1 -- that is, getMaxUid() is exclusive. - * - * The labels are all primary keyed. Their names are returned by - * schema.getEdgeLabelName(n), where n is on the interval - * [0, schema.getEdgeLabels()). The names of the primary keys are - * returned by schema.getPrimaryKeyForLabel(String). - * - * Each vertex or edge has property values set on a randomly-sized - * subset of the keys available for vertices or edges, respectively. - * - * All properties except uid are integer-valued and have values - * on these intervals: - * - * - vertices: [0, schema.getMaxVertexPropVal()) - * - edges: [0, schema.getMaxEdgePropVal()) - * - * The values were generated with a random distribution. - * - * In each case above, n is a numerical index corresponding to a key. - * The index n takes these values: - * - * - vertices: [0, schema.getVertexPropKeys()) - * - edges: [0, schema.getEdgePropKeys()) - * - * The same n indexes retrieve the property key names when passed - * to these methods: - * - * - vertices: schema.getVertexPropertyName(n) - * - edges: schema.getEdgePropertyName(n) - * - * The graph was generated and written to a file with - * c.t.janusgraph.testutil.gen.{Schema, GraphGenerator}. - */ - - @Test - void testVertexUidLookup() throws Exception { - sequentialUidTask { tx, vertex -> assertNotNull(vertex) } - } - - /** - * Query for edges using a vertex-centric index on a fixed supernode. - * - */ - @Test - void testVertexCentricIndexQuery() { - - final long maxUid = 1000L // exclusive - final long minUid = 1L // inclusive - - Preconditions.checkArgument(maxUid - minUid <= VERTEX_COUNT) - - supernodeTask { v, indexLabel, indexPK -> - - def c = v.outE(indexLabel) - .has(indexPK, T.gte, 25) - .has(indexPK, T.lt, 75) - .count() - assertEquals(50, c) - - c = v.outE(indexLabel) - .has(indexPK, T.gte, 125) - .has(indexPK, T.lt, 225) - .count() - assertEquals(100, c) - - c = v.outE(indexLabel) - .has(indexPK, T.gte, 500) - .has(indexPK, T.lt, 1000) - .count() - assertEquals(500, c) - - c = v.outE(indexLabel) - .has(indexPK, T.gt, 0) - .has(indexPK, T.lt, 2) - .count() - assertEquals(1, c) - } - } - - @Test - void testLabeledEdgeTraversal() { - int i = 0 - supernodeTask { v, indexLabel, indexPK -> - int start = 100 * i++ - int end = start + 99 - def c = v.outE(indexLabel)[start..end].inV().outE(indexLabel).inV().outE(indexLabel).count() - assertTrue(0 < c) - } - } - - @Test - void testEdgeTraversalUsingVertexCentricIndex() { - supernodeTask { v, label, pkey -> - def c = v.outE(label) - .has(pkey, T.gte, 0).has(pkey, T.lte, 100) - .inV() - .outE(label) - .inV() - .outE(label) - .count() - assertTrue(0 < c) - } - } - - @Test - void testLimitedGlobalEdgePropertyQuery() { - standardIndexEdgeTask { tx, indexedPropName, indexedPropVal -> - int n = Iterables.size(tx.query().limit(1).has(indexedPropName, indexedPropVal).edges()) - assertTrue(0 <= n) - assertTrue(n <= 1) - } - } - - @Test - void testLimitedGlobalVertexPropertyQuery() { - standardIndexVertexTask { tx, indexedPropName, indexedPropVal -> - int n = Iterables.size(tx.query().limit(1).has(indexedPropName, indexedPropVal).vertices()) - assertTrue(0 <= n) - assertTrue(n <= 1) - } - } - - @Test - void testGlobalVertexPropertyQuery() { - standardIndexVertexTask { tx, indexedPropName, indexedPropVal -> - int n = Iterables.size(tx.query().has(indexedPropName, indexedPropVal).vertices()) - assertTrue(0 < n) - } - } - - @Test - void testGlobalEdgePropertyQuery() { - standardIndexEdgeTask { tx, indexedPropName, indexedPropVal -> - int n = Iterables.size(tx.query().has(indexedPropName, indexedPropVal).edges()) - assertTrue(0 < n) - } - } - - @Test - void testMultiVertexQuery() { - chunkedSequentialUidTask(50, 50, this.&multiVertexQueryTask) - } - - @Test - void testPathologicalMultiVertexQuery() { - chunkedSequentialUidTask(1, 50, this.&multiVertexQueryTask) - } - - @Test - void testSingleVertexQuery() { - sequentialUidTask(50, this.&singleVertexQueryTask) - } - - @Test - void testSingleVertexMultiProperty() { - sequentialUidTask(50, { tx, v -> - int c = 0 - for (int i = 0; i < schema.getVertexPropKeys(); i++) { - if (v.valueOrNull(schema.getVertexPropertyName(i)) != null) { - c++ - } - } - def k = ElementHelper.getPropertyKeys(v) - 1 - assertTrue(k + "vs" + c, k <= c) - assertTrue(0 <= c) - }) - } - - @Test - void testSingleVertexProperty() { - sequentialUidTask(50, { tx, v -> - assertNotNull(v.valueOrNull(Schema.UID_PROP)) - }) - } - - /* - * I'm prefixing test methods that modify the graph with "testZ". In - * conjunction with JUnit's @FixMethodOrder annotation, this makes - * graph-mutating test methods run after the rest of the test methods. - */ - - @Test - void testZVertexPropertyModification() { - int propsModified = 0 - int visited = 0 - int n = 314159 - sequentialUidTask { tx, vertex -> - visited++ - for (p in ElementHelper.getPropertyKeys(v)) { - if (p.equals(Schema.UID_PROP)) { - continue - } - int old = vertex.valueOrNull(p) - vertex.property(p).remove() - vertex.property(p, old * n) - n *= n - propsModified++ - break - } - } - assertTrue(0 < propsModified) - } - - @Test - void testZEdgeAddition() { - int edgesAdded = 0 - int skipped = 0 - long last = -1 - String labelName = schema.getEdgeLabelName(0) - sequentialUidTask { tx, vertex -> - if (-1 != last && last != vertex.id()) { - JanusGraphVertex target = tx.v(last) - vertex.addEdge(labelName, target) - edgesAdded++ - } else { - skipped++ - } - last = vertex.id() - } - assertTrue(0 < edgesAdded + skipped) - assertTrue(edgesAdded > skipped) - } - - /** - * Retrieve a vertex by randomly chosen uid, then remove the vertex. After - * removing all vertices, add new vertices with the same uids as those - * removed (but no incident edges or properties besides uid) - * - */ -// @Test -// void testVertexRemoval() { -// -// Set visited = new HashSet(); -// commitTx({ txIndex, tx -> -// long uid -// Vertex v = null -// while (null == v) { -// uid = Math.abs(random.nextLong()) % gen.getMaxUid(); -// JanusGraphKey uidKey = tx.getPropertyKey(Schema.UID_PROP); -// v = tx.getVertex(uidKey, uid); -// } -// assertNotNull(v) -// tx.removeVertex(v) -// visited.add(uid) -// }) -// -// def tx = graph.newTransaction() -// // Insert new vertices with the same uids as removed vertices, but no edges or properties besides uid -// JanusGraphKey uidKey = tx.getPropertyKey(Schema.UID_PROP) -// for (long uid : visited) { -// JanusGraphVertex v = tx.addVertex() -// v.setProperty(uidKey, uid) -// } -// tx.commit() -// } - - /** - * JUnitBenchmarks appears to include {@code Before} method execution in round-avg times. - * This method has no body and exists only to measure that overhead. - */ - @Test - static void testNoop() { - // Do nothing - log.debug("Noop test executed") - } - - private void multiVertexQueryTask(JanusGraphTransaction tx, JanusGraphVertex[] vbuf, int vcount) { - if (vcount != vbuf.length) { - def newbuf = new JanusGraphVertex[vcount] - for (int i = 0; i < vcount; i++) { - newbuf[i] = vbuf[i] - Preconditions.checkArgument(null != newbuf[i]) - } - vbuf = newbuf - } - - // I tried labels(schema.edgeLabelNames), but it causes a - // Preconditions failure because Query.isQueryNormalForm returns false - int n = 0 - for (int i = 0; i < schema.edgeLabels; i++) { - Map> m = tx.multiQuery(vbuf).labels(schema.edgeLabelNames[i]).edges() - for (Iterable iter : m.values()) { - for (JanusGraphEdge e : iter) { - n++ - } - } - } - assertTrue(0 < n) - } - - private void singleVertexQueryTask(JanusGraphTransaction tx, JanusGraphVertex v) { - int n = 0 - for (int i = 0; i < schema.edgeLabels; i++) { - for (JanusGraphEdge iter : v.query().labels(schema.edgeLabelNames[i]).edges()) { - n++ - } - } - assertTrue(0 < n) - } -} diff --git a/janusgraph-backend-testutils/src/main/groovy/org/janusgraph/graphdb/LongIterator.groovy b/janusgraph-backend-testutils/src/main/groovy/org/janusgraph/graphdb/LongIterator.groovy deleted file mode 100644 index f249294e29..0000000000 --- a/janusgraph-backend-testutils/src/main/groovy/org/janusgraph/graphdb/LongIterator.groovy +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2019 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.graphdb - -/** - * This interface exists solely to avoid autoboxing primitive longs with Long. - * - * A standard-library alternative to this interface is slated for JDK 8. - * It currently exists in beta builds as java.util.stream.LongStream.LongIterator. - */ -interface LongIterator { - long next() - boolean hasNext() -} - -/** - * Returns a sequence of longs modulo some fixed limit and starting at some fixed value. - */ -class SequentialLongIterator implements LongIterator { - - private long i = 0L - private long offset - private long count - private long max - - SequentialLongIterator(long count, long max, long offset) { - this.count = count - this.max = max - this.offset = offset - } - - long next() { - (offset + i++) % max - } - - boolean hasNext() { - i < count - } -} diff --git a/janusgraph-backend-testutils/src/main/groovy/org/janusgraph/graphdb/SpeedTestSchema.groovy b/janusgraph-backend-testutils/src/main/groovy/org/janusgraph/graphdb/SpeedTestSchema.groovy deleted file mode 100644 index 8e836065fa..0000000000 --- a/janusgraph-backend-testutils/src/main/groovy/org/janusgraph/graphdb/SpeedTestSchema.groovy +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright 2019 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.graphdb - -import com.google.common.base.Preconditions -import org.janusgraph.core.Cardinality -import org.janusgraph.core.PropertyKey -import org.janusgraph.core.JanusGraph -import org.janusgraph.core.schema.ConsistencyModifier -import org.janusgraph.core.schema.JanusGraphManagement -import org.janusgraph.graphdb.types.StandardEdgeLabelMaker -import org.apache.tinkerpop.gremlin.structure.Edge -import org.apache.tinkerpop.gremlin.structure.Vertex -import org.slf4j.Logger -import org.slf4j.LoggerFactory - -class SpeedTestSchema { - - // Graph element counts - public static final int VERTEX_COUNT = 10 * 100 - public static final int EDGE_COUNT = VERTEX_COUNT * 5 - - public static final String VERTEX_KEY_PREFIX = "vp_" - public static final String EDGE_KEY_PREFIX = "ep_" - public static final String LABEL_PREFIX = "el_" - public static final String UID_PROP = "uid" - - public static long SUPERNODE_UID = 0L - private static final int SUPERNODE_INDEX = 0 - - private final int edgeCount = EDGE_COUNT - private final int vertexCount = VERTEX_COUNT - private final int maxEdgePropVal = 100 - private final int maxVertexPropVal = 100 - /* - * edgeCount must have type int instead of long because - * DistributionGenerator expects int. It's probably not a great idea to go - * over 4B per label in memory anyway. - */ - private final int vertexPropKeys = 20 - private final int edgePropKeys = 10 - private final int edgeLabels = 3 - - private final String[] vertexPropNames - private final String[] edgePropNames - private final String[] edgeLabelNames - private final Map labelPkeys - - private static final Logger log = - LoggerFactory.getLogger(SpeedTestSchema.class) - - - /* - * This builder is a relic from back when GraphGenerator existed as - * a counterpart to this class, and the graph generation parameters - * were potentially configurable. Leaving this in until tests pass - * on 0.9.0 again. - */ -// public static class Builder { -// -// private int maxVertexPropVal = 100; -// private int maxEdgePropVal = 100; -// private int vertexPropKeys = 20; -// private int edgePropKeys = 10; -// private int edgeLabels = 3; -// private int vertexCount = -1; -// private int edgeCount = -1; -// -// /** -// * Set the maximum value of vertex properties. This is an exclusive -// * limit. The minimum is always 0. -// * -// * @param m maximum vertex property value, exclusive -// * @return self -// */ -// public Builder setMaxVertexPropVal(int m) { -// maxVertexPropVal = m; -// return this; -// } -// -// /** -// * Set the maximum value of edge properties. This is an exclusive limit. -// * The minimum is always 0. -// * -// * @param m maximum edge property value, exclusive -// * @return self -// */ -// public Builder setMaxEdgePropVal(int m) { -// maxEdgePropVal = m; -// return this; -// } -// -// /** -// * Set the total number of distinct property keys to use for vertex -// * properties. -// * -// * @param vertexPropKeys number of property keys -// * @return self -// */ -// public Builder setVertexPropKeys(int vertexPropKeys) { -// this.vertexPropKeys = vertexPropKeys; -// return this; -// } -// -// /** -// * Set the total number of distinct property keys to use for edge -// * properties. -// * -// * @param edgePropKeys number of property keys -// * @return self -// */ -// public Builder setEdgePropKeys(int edgePropKeys) { -// this.edgePropKeys = edgePropKeys; -// return this; -// } -// -// /** -// * Set the total number of edge labels to create. -// * -// * @param edgeLabels number of edge labels -// * @return self -// */ -// public Builder setEdgeLabels(int edgeLabels) { -// this.edgeLabels = edgeLabels; -// return this; -// } -// -// /** -// * Set the number of vertices to create. -// * -// * @param vertexCount global vertex total -// * @return self -// */ -// public Builder setVertexCount(int vertexCount) { -// this.vertexCount = vertexCount; -// Preconditions.checkArgument(0 <= this.vertexCount); -// return this; -// } -// -// /** -// * Set the number of edges to create for each edge label. -// * -// * @param edgeCount global edge total for each label -// * @return self -// */ -// public Builder setEdgeCount(int edgeCount) { -// this.edgeCount = edgeCount; -// Preconditions.checkArgument(0 <= this.edgeCount); -// return this; -// } -// -// public Builder(int vertexCount, int edgeCount) { -// setVertexCount(vertexCount); -// setEdgeCount(edgeCount); -// } -// -// /** -// * Construct a schema instance with this {@code Builder}'s -// * settings. -// * -// * @return a new GraphGenerator -// */ -// public SerialSpeedTestSchema build() { -// return new SerialSpeedTestSchema(maxEdgePropVal, maxVertexPropVal, vertexCount, edgeCount, vertexPropKeys, edgePropKeys, edgeLabels); -// } -// } - - static SpeedTestSchema get() { - return new SpeedTestSchema(VERTEX_COUNT, EDGE_COUNT) - } - - final String getVertexPropertyName(int i) { - return vertexPropNames[i] - } - - final String getEdgePropertyName(int i) { - return edgePropNames[i] - } - - final String getEdgeLabelName(int i) { - return edgeLabelNames[i] - } - - static final String getSortKeyForLabel(String l) { - return l.replace("el_", "ep_") - } - - final int getVertexPropKeys() { - return vertexPropKeys - } - - final int getEdgePropKeys() { - return edgePropKeys - } - - final int getMaxEdgePropVal() { - return maxEdgePropVal - } - - final int getMaxVertexPropVal() { - return maxVertexPropVal - } - - final int getEdgeLabels() { - return edgeLabels - } - - static final long getSupernodeUid() { - return SUPERNODE_UID - } - - final String getSupernodeOutLabel() { - return getEdgeLabelName(SUPERNODE_INDEX) - } - - final long getMaxUid() { - return vertexCount - } - - final int getVertexCount() { - return vertexCount - } - - final int getEdgeCount() { - return edgeCount - } - - private SpeedTestSchema() { - - this.vertexPropNames = generateNames(VERTEX_KEY_PREFIX, this.vertexPropKeys) - this.edgePropNames = generateNames(EDGE_KEY_PREFIX, this.edgePropKeys) - this.edgeLabelNames = generateNames(LABEL_PREFIX, this.edgeLabels) - - Preconditions.checkArgument(this.edgeLabels <= this.edgePropKeys) - - this.labelPkeys = new HashMap(this.edgeLabels) - for (int i = 0; i < this.edgeLabels; i++) { - labelPkeys.put(edgeLabelNames[i], edgePropNames[i]) - } - } - - - void makeTypes(JanusGraph g) { - Preconditions.checkArgument(edgeLabels <= edgePropKeys) - - JanusGraphManagement mgmt = g.openManagement() - for (int i = 0; i < vertexPropKeys; i++) { - PropertyKey key = mgmt.makePropertyKey(getVertexPropertyName(i)).dataType(Integer.class).cardinality(Cardinality.SINGLE).make() - mgmt.setConsistency(key, ConsistencyModifier.LOCK) - mgmt.buildIndex("v-"+getVertexPropertyName(i),Vertex.class).addKey(key).buildCompositeIndex() - } - for (int i = 0; i < edgePropKeys; i++) { - PropertyKey key = mgmt.makePropertyKey(getEdgePropertyName(i)).dataType(Integer.class).cardinality(Cardinality.SINGLE).make() - mgmt.setConsistency(key, ConsistencyModifier.LOCK) - mgmt.buildIndex("e-"+getEdgePropertyName(i),Edge.class).addKey(key).buildCompositeIndex() - } - for (int i = 0; i < edgeLabels; i++) { - String labelName = getEdgeLabelName(i) - String pkName = getSortKeyForLabel(labelName) - PropertyKey pk = mgmt.getPropertyKey(pkName) - ((StandardEdgeLabelMaker)mgmt.makeEdgeLabel(getEdgeLabelName(i))).sortKey(pk).make() - } - - PropertyKey uid = mgmt.makePropertyKey(UID_PROP).dataType(Long.class).cardinality(Cardinality.SINGLE).make() - mgmt.buildIndex("v-uid",Vertex.class).unique().addKey(uid).buildCompositeIndex() - mgmt.setConsistency(uid, ConsistencyModifier.LOCK) - mgmt.commit() - log.debug("Committed types") - } - - private static String[] generateNames(String prefix, int count) { - String[] result = new String[count] - StringBuilder sb = new StringBuilder(8) - sb.append(prefix) - for (int i = 0; i < count; i++) { - sb.append(i) - result[i] = sb.toString() - sb.setLength(prefix.length()) - } - return result - } -} diff --git a/janusgraph-backend-testutils/src/main/java/org/janusgraph/diskstorage/indexing/IndexProviderTest.java b/janusgraph-backend-testutils/src/main/java/org/janusgraph/diskstorage/indexing/IndexProviderTest.java index 67e580e4c7..0c41f2eca8 100644 --- a/janusgraph-backend-testutils/src/main/java/org/janusgraph/diskstorage/indexing/IndexProviderTest.java +++ b/janusgraph-backend-testutils/src/main/java/org/janusgraph/diskstorage/indexing/IndexProviderTest.java @@ -1097,21 +1097,21 @@ public void testCustomAnalyzer() throws Exception { query = new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Text.CONTAINS_REGEX, "jer.*")); assertEquals(1, tx.queryStream(query).count(), query.toString()); - assertEquals(1, tx.query(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.GREATER_THAN, "a"))).size()); - assertEquals(0, tx.query(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.GREATER_THAN, "z"))).size()); - assertEquals(1, tx.query(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.GREATER_THAN, "Tom and Jerry"))).size()); + assertEquals(1, tx.queryStream(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.GREATER_THAN, "a"))).count()); + assertEquals(0, tx.queryStream(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.GREATER_THAN, "z"))).count()); + assertEquals(1, tx.queryStream(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.GREATER_THAN, "Tom and Jerry"))).count()); - assertEquals(1, tx.query(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.GREATER_THAN_EQUAL, "a"))).size()); - assertEquals(0, tx.query(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.GREATER_THAN_EQUAL, "z"))).size()); - assertEquals(1, tx.query(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.GREATER_THAN_EQUAL, "Tom and Jerry"))).size()); + assertEquals(1, tx.queryStream(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.GREATER_THAN_EQUAL, "a"))).count()); + assertEquals(0, tx.queryStream(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.GREATER_THAN_EQUAL, "z"))).count()); + assertEquals(1, tx.queryStream(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.GREATER_THAN_EQUAL, "Tom and Jerry"))).count()); - assertEquals(0, tx.query(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.LESS_THAN, "a"))).size()); - assertEquals(1, tx.query(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.LESS_THAN, "z"))).size()); - assertEquals(0, tx.query(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.LESS_THAN, "Tom and Jerry"))).size()); + assertEquals(0, tx.queryStream(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.LESS_THAN, "a"))).count()); + assertEquals(1, tx.queryStream(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.LESS_THAN, "z"))).count()); + assertEquals(0, tx.queryStream(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.LESS_THAN, "Tom and Jerry"))).count()); - assertEquals(0, tx.query(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.LESS_THAN_EQUAL, "a"))).size()); - assertEquals(1, tx.query(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.LESS_THAN_EQUAL, "z"))).size()); - assertEquals(0, tx.query(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.LESS_THAN_EQUAL, "Tom and Jerry"))).size()); + assertEquals(0, tx.queryStream(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.LESS_THAN_EQUAL, "a"))).count()); + assertEquals(1, tx.queryStream(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.LESS_THAN_EQUAL, "z"))).count()); + assertEquals(0, tx.queryStream(new IndexQuery(store, PredicateCondition.of(FULL_TEXT, Cmp.LESS_THAN_EQUAL, "Tom and Jerry"))).count()); } query = new IndexQuery(store, PredicateCondition.of(KEYWORD, Text.CONTAINS_PREFIX, "Tom")); assertEquals(1, tx.queryStream(query).count(), query.toString()); diff --git a/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphConcurrentTest.java b/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphConcurrentTest.java index 2d26dd7a7f..003138d5b7 100644 --- a/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphConcurrentTest.java +++ b/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphConcurrentTest.java @@ -28,15 +28,12 @@ import org.janusgraph.core.RelationType; import org.janusgraph.core.schema.EdgeLabelMaker; import org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration; -import org.janusgraph.testutil.JUnitBenchmarkProvider; import org.janusgraph.testutil.RandomGenerator; -import org.junit.Rule; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.RepeatedTest; import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInfo; -import org.junit.rules.TestRule; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,9 +61,6 @@ @Tag(TestCategory.PERFORMANCE_TESTS) public abstract class JanusGraphConcurrentTest extends JanusGraphBaseTest { - @Rule - public TestRule benchmark = JUnitBenchmarkProvider.get(); - // Parallelism settings private static final int THREAD_COUNT = getThreadCount(); private static final int TASK_COUNT = THREAD_COUNT * 256; @@ -126,7 +120,7 @@ public void tearDown() throws Exception { super.tearDown(); } - @Test + @RepeatedTest(10) public void concurrentTxRead() throws Exception { final int numTypes = 20; final int numThreads = 100; @@ -168,7 +162,7 @@ public void concurrentTxRead() throws Exception { * * @throws Exception */ - @Test + @RepeatedTest(10) public void concurrentReadsOnSingleTransaction() throws Exception { initializeGraph(); @@ -197,7 +191,7 @@ public void concurrentReadsOnSingleTransaction() throws Exception { * * @throws Exception */ - @Test + @RepeatedTest(10) public void concurrentReadWriteOnSingleTransaction() throws Exception { initializeGraph(); @@ -227,7 +221,7 @@ public void concurrentReadWriteOnSingleTransaction() throws Exception { relFuture.cancel(true); } - @Test + @RepeatedTest(10) public void concurrentIndexReadWriteTest() throws Exception { clopen(option(GraphDatabaseConfiguration.ADJUST_LIMIT),false); @@ -310,7 +304,7 @@ public void concurrentIndexReadWriteTest() throws Exception { * @throws ExecutionException * @throws InterruptedException */ - @Test + @RepeatedTest(10) public void testStandardIndexVertexPropertyReads() throws InterruptedException, ExecutionException { testStandardIndexVertexPropertyReadsLogic(); } diff --git a/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphIterativeBenchmark.java b/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphIterativeBenchmark.java index 6377bd1bee..f491028784 100644 --- a/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphIterativeBenchmark.java +++ b/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphIterativeBenchmark.java @@ -18,6 +18,9 @@ import com.google.common.base.Stopwatch; import com.google.common.collect.Iterators; import org.apache.tinkerpop.gremlin.structure.VertexProperty; +import org.junit.jupiter.api.RepeatedTest; +import org.junit.jupiter.api.Tag; +import org.janusgraph.TestCategory; import org.janusgraph.core.JanusGraphEdge; import org.janusgraph.core.JanusGraphTransaction; import org.janusgraph.core.JanusGraphVertex; @@ -56,14 +59,14 @@ * * @author Matthias Broecheler (me@matthiasb.com) */ +@Tag(TestCategory.PERFORMANCE_TESTS) public abstract class JanusGraphIterativeBenchmark extends JanusGraphBaseTest { private static final Random random = new Random(); - public abstract KeyColumnValueStoreManager openStorageManager() throws BackendException; - //@Test + @RepeatedTest(10) public void testDataSequential() throws Exception { loadData(200000,2); close(); @@ -90,12 +93,11 @@ public void testDataSequential() throws Exception { } - //@Test + @RepeatedTest(10) public void testLoadData() throws Exception { loadData(100000,2); } - public void loadData(final int numVertices, final int numThreads) throws Exception { makeKey("w",Integer.class); PropertyKey time = makeKey("t",Long.class); @@ -132,7 +134,4 @@ public void loadData(final int numVertices, final int numThreads) throws Excepti if (!exe.isTerminated()) System.err.println("Could not load data in time"); System.out.println("Loaded "+numVertices+"vertices"); } - - - } diff --git a/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphOperationCountingTest.java b/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphOperationCountingTest.java index cd38a44487..1ebd6fd54a 100644 --- a/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphOperationCountingTest.java +++ b/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphOperationCountingTest.java @@ -158,7 +158,7 @@ public void testReadOperations(boolean cache) { mgmt.setConsistency(mgmt.getGraphIndex("uid"),ConsistencyModifier.LOCK); finishSchema(); - if (cache) clopen(option(DB_CACHE),true,option(DB_CACHE_CLEAN_WAIT),0,option(DB_CACHE_TIME),0); + if (cache) clopen(option(DB_CACHE),true,option(DB_CACHE_CLEAN_WAIT),0,option(DB_CACHE_TIME),0L); else clopen(); JanusGraphTransaction tx = graph.buildTransaction().groupName(metricsPrefix).start(); @@ -483,7 +483,7 @@ public void printAllMetrics(String prefix) { public void testCacheConcurrency() throws InterruptedException { metricsPrefix = "tCC"; Object[] newConfig = {option(GraphDatabaseConfiguration.DB_CACHE),true, - option(GraphDatabaseConfiguration.DB_CACHE_TIME),0, + option(GraphDatabaseConfiguration.DB_CACHE_TIME),0L, option(GraphDatabaseConfiguration.DB_CACHE_CLEAN_WAIT),0, option(GraphDatabaseConfiguration.DB_CACHE_SIZE),0.25, option(GraphDatabaseConfiguration.BASIC_METRICS),true, @@ -595,7 +595,7 @@ protected void resetMetrics() { @Test public void testCacheSpeedup() { Object[] newConfig = {option(GraphDatabaseConfiguration.DB_CACHE),true, - option(GraphDatabaseConfiguration.DB_CACHE_TIME),0}; + option(GraphDatabaseConfiguration.DB_CACHE_TIME),0L}; clopen(newConfig); int numV = 1000; diff --git a/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphPerformanceMemoryTest.java b/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphPerformanceMemoryTest.java index 3ae3e27351..ec6aa4927b 100644 --- a/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphPerformanceMemoryTest.java +++ b/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphPerformanceMemoryTest.java @@ -24,12 +24,9 @@ import org.janusgraph.core.JanusGraphTransaction; import org.janusgraph.core.JanusGraphVertex; import org.janusgraph.core.PropertyKey; -import org.janusgraph.testutil.JUnitBenchmarkProvider; import org.janusgraph.testutil.MemoryAssess; -import org.junit.Rule; +import org.junit.jupiter.api.RepeatedTest; import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; -import org.junit.rules.TestRule; import java.util.List; import java.util.Random; @@ -44,11 +41,8 @@ @Tag(TestCategory.MEMORY_TESTS) public abstract class JanusGraphPerformanceMemoryTest extends JanusGraphBaseTest { - @Rule - public TestRule benchmark = JUnitBenchmarkProvider.get(); - - @Test - void edgeById() { + @RepeatedTest(10) + public void edgeById() { Vertex v1 = graph.traversal() .addV("V1") .property("p1", "1").next(); @@ -74,7 +68,7 @@ void edgeById() { assertEquals(10000, graph.traversal().E(edges).count().next()); } - @Test + @RepeatedTest(10) public void testMemoryLeakage() { long memoryBaseline = 0; SummaryStatistics stats = new SummaryStatistics(); @@ -103,7 +97,7 @@ public void testMemoryLeakage() { assertTrue(stats.getStandardDeviation() < stats.getMin()); } - @Test + @RepeatedTest(10) public void testTransactionalMemory() throws Exception { makeVertexIndexedUniqueKey("uid",Long.class); makeKey("name",String.class); diff --git a/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphTest.java b/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphTest.java index 91ab8be2bd..9bc3ceae95 100644 --- a/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphTest.java +++ b/janusgraph-backend-testutils/src/main/java/org/janusgraph/graphdb/JanusGraphTest.java @@ -80,6 +80,7 @@ import org.janusgraph.diskstorage.configuration.ConfigElement; import org.janusgraph.diskstorage.configuration.ConfigOption; import org.janusgraph.diskstorage.configuration.WriteConfiguration; +import org.janusgraph.diskstorage.keycolumnvalue.scan.ScanJobFuture; import org.janusgraph.diskstorage.keycolumnvalue.scan.ScanMetrics; import org.janusgraph.diskstorage.locking.PermanentLockingException; import org.janusgraph.diskstorage.log.Log; @@ -104,6 +105,7 @@ import org.janusgraph.graphdb.internal.OrderList; import org.janusgraph.graphdb.internal.RelationCategory; import org.janusgraph.graphdb.log.StandardTransactionLogProcessor; +import org.janusgraph.graphdb.olap.job.GhostVertexRemover; import org.janusgraph.graphdb.olap.job.IndexRemoveJob; import org.janusgraph.graphdb.olap.job.IndexRepairJob; import org.janusgraph.graphdb.query.JanusGraphPredicateUtils; @@ -117,10 +119,6 @@ import org.janusgraph.graphdb.query.vertex.BasicVertexCentricQueryBuilder; import org.janusgraph.graphdb.relations.AbstractEdge; import org.janusgraph.graphdb.relations.RelationIdentifier; -import org.janusgraph.graphdb.schema.EdgeLabelDefinition; -import org.janusgraph.graphdb.schema.PropertyKeyDefinition; -import org.janusgraph.graphdb.schema.SchemaContainer; -import org.janusgraph.graphdb.schema.VertexLabelDefinition; import org.janusgraph.graphdb.serializer.SpecialInt; import org.janusgraph.graphdb.serializer.SpecialIntSerializer; import org.janusgraph.graphdb.tinkerpop.optimize.step.JanusGraphEdgeVertexStep; @@ -185,9 +183,9 @@ import static org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration.CUSTOM_ATTRIBUTE_CLASS; import static org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration.CUSTOM_SERIALIZER_CLASS; import static org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration.DB_CACHE; -import static org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration.DB_CACHE_TIME; import static org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration.FORCE_INDEX_USAGE; import static org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration.HARD_MAX_LIMIT; +import static org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration.IDS_STORE_NAME; import static org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration.INITIAL_JANUSGRAPH_VERSION; import static org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration.LIMIT_BATCH_SIZE; import static org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration.LOG_BACKEND; @@ -1205,23 +1203,6 @@ public void testSchemaTypes() { //Ensure that unidirected edges keep pointing to deleted vertices getV(tx, v13).remove(); assertCount(1, v.query().direction(Direction.BOTH).labels("link").edges()); - - //Finally, test the schema container - SchemaContainer schemaContainer = new SchemaContainer(graph); - assertTrue(schemaContainer.containsRelationType("weight")); - assertTrue(schemaContainer.containsRelationType("friend")); - assertTrue(schemaContainer.containsVertexLabel("person")); - VertexLabelDefinition vld = schemaContainer.getVertexLabel("tag"); - assertFalse(vld.isPartitioned()); - assertFalse(vld.isStatic()); - PropertyKeyDefinition pkd = schemaContainer.getPropertyKey("name"); - assertEquals(Cardinality.SET, pkd.getCardinality()); - assertEquals(String.class, pkd.getDataType()); - EdgeLabelDefinition eld = schemaContainer.getEdgeLabel("child"); - assertEquals("child", eld.getName()); - assertEquals(child.longId(), eld.getLongId()); - assertEquals(Multiplicity.ONE2MANY, eld.getMultiplicity()); - assertFalse(eld.isUnidirected()); } /** @@ -1679,7 +1660,7 @@ public void testGotGIndexRemoval() throws InterruptedException, ExecutionExcepti // Remove name index graphIndex = mgmt.getGraphIndex(name); mgmt.updateIndex(graphIndex, SchemaAction.REMOVE_INDEX); - JanusGraphManagement.IndexJobFuture graphMetrics = mgmt.getIndexJobStatus(graphIndex); + ScanJobFuture graphMetrics = mgmt.getIndexJobStatus(graphIndex); finishSchema(); // Should have deleted at least one record @@ -2740,7 +2721,7 @@ public void testGlobalGraphConfig() { @Test public void testGlobalOfflineGraphConfig() { - setAndCheckGraphOption(DB_CACHE_TIME, ConfigOption.Type.GLOBAL_OFFLINE, 500L, 777L); + setAndCheckGraphOption(IDS_STORE_NAME, ConfigOption.Type.GLOBAL_OFFLINE, "testIdsStoreName", "testIdsStoreName2"); } @Test @@ -7182,7 +7163,7 @@ public void performReindexAndVerifyEdgeCount(String indexName, String edgeLabel, } //Reindexing - mgmt.updateIndex(relationIndex, SchemaAction.REINDEX).get(); + mgmt.updateIndex(relationIndex, SchemaAction.REINDEX, 1).get(); finishSchema(); relationIndex = mgmt.getRelationIndex(t,indexName); @@ -7371,4 +7352,50 @@ public void testVerticesDropAfterWhereWithBatchQueryEnabled() { assertFalse(tx.traversal().V().has("a", timestamp).has("b", true).has("c", true).bothE().hasNext()); } + /** + * In this test, we deliberately create ghost vertices and use ManagementSystem to purge them + * We use two concurrent transactions and let one transaction removes a vertex and another transaction + * updates the same vertex. When the storage backend does not have locking support, both transactions + * will succeed, and then the vertex becomes a ghost vertex. + * + * @throws ExecutionException + * @throws InterruptedException + */ + @Test + public void testMgmtRemoveGhostVertices() throws ExecutionException, InterruptedException { + if (features.hasLocking()) return; + + final int numOfVertices = 100; + final int numOfGhostVertices = 80; + final int numOfRestVertices = numOfVertices - numOfGhostVertices; + List vertices = new ArrayList<>(numOfVertices); + for (int i = 0; i < numOfVertices; i++) { + vertices.add(tx.traversal().addV("test").next()); + } + tx.commit(); + + JanusGraphTransaction tx1 = graph.newTransaction(); + for (int i = 0; i < numOfVertices; i++) { + tx1.traversal().V(vertices.get(i)).property("prop", "val").next(); + } + + JanusGraphTransaction tx2 = graph.newTransaction(); + for (int i = 0; i < numOfGhostVertices; i++) { + tx2.traversal().V(vertices.get(i)).next().remove(); + } + tx2.commit(); + + tx1.commit(); + + JanusGraphManagement mgmt = graph.openManagement(); + ScanJobFuture future = mgmt.removeGhostVertices(); + assertEquals(numOfGhostVertices, future.get().getCustom(GhostVertexRemover.REMOVED_VERTEX_COUNT)); + assertEquals(numOfRestVertices, graph.traversal().V().count().next()); + assertEquals(numOfRestVertices, graph.traversal().V().hasLabel("test").count().next()); + assertEquals(numOfRestVertices, graph.traversal().V().has("prop", "val").count().next()); + assertEquals(numOfRestVertices, graph.traversal().V().hasLabel("test").has("prop", "val").count().next()); + + // running it again, no vertex is removed + assertEquals(0, mgmt.removeGhostVertices().get().getCustom(GhostVertexRemover.REMOVED_VERTEX_COUNT)); + } } diff --git a/janusgraph-backend-testutils/src/main/java/org/janusgraph/olap/OLAPTest.java b/janusgraph-backend-testutils/src/main/java/org/janusgraph/olap/OLAPTest.java index 8ec9565ffd..c247a514e9 100644 --- a/janusgraph-backend-testutils/src/main/java/org/janusgraph/olap/OLAPTest.java +++ b/janusgraph-backend-testutils/src/main/java/org/janusgraph/olap/OLAPTest.java @@ -274,8 +274,19 @@ public void removeGhostVertices() throws Exception { @Test public void testBasicComputeJob() { - GraphTraversalSource g = graph.traversal().withComputer(FulgoraGraphComputer.class); - System.out.println(g.V().count().next()); + int numV = 1000; + for (int i = 0; i < numV; i++) { + Vertex v = graph.addVertex(); + v.addEdge("loop", v, "val", i % 2); + } + graph.tx().commit(); + long startTime = System.currentTimeMillis(); + // in JanusGraph, withComputer() is equivalent to withComputer(FulgoraGraphComputer.class) + GraphTraversalSource g = graph.traversal().withComputer(); + long result = g.V().outE().has("val", 0).count().next(); + long elapsedTime = System.currentTimeMillis() - startTime; + log.info("elapsed time = {}", elapsedTime); + assertEquals(numV / 2, result); } @Test diff --git a/janusgraph-backend-testutils/src/main/java/org/janusgraph/testutil/CsvConsumer.java b/janusgraph-backend-testutils/src/main/java/org/janusgraph/testutil/CsvConsumer.java deleted file mode 100644 index b0b46abe6c..0000000000 --- a/janusgraph-backend-testutils/src/main/java/org/janusgraph/testutil/CsvConsumer.java +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.testutil; - -import com.carrotsearch.junitbenchmarks.IResultsConsumer; -import com.carrotsearch.junitbenchmarks.Result; -import com.google.common.base.Joiner; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStreamWriter; -import java.io.Writer; -import java.util.ArrayList; -import java.util.List; - -public class CsvConsumer implements IResultsConsumer { - - private static final Logger log = - LoggerFactory.getLogger(CsvConsumer.class); - - private final Writer csv; - - private final File csvFile; - - private final String separator = ","; - - public enum Column { - CLASS_NAME("class.name") { - @Override public String get(Result r) { - return r.getShortTestClassName(); - } - }, - METHOD_NAME("method.name") { - @Override public String get(Result r) { - return r.getTestMethodName(); - } - }, - ROUND_COUNT("round.measured") { - @Override - public String get(Result r) { - return String.valueOf(r.benchmarkRounds); - } - }, - ROUND_WARMUP("round.warmup") { - @Override public String get(Result r) { - return String.valueOf(r.warmupRounds); - } - }, - // Called "round" in JUB's standard WriterConsumer, - // but that's ambiguous with round counts above - ROUND_AVG("round.time") { - @Override public String get(Result r) { - return String.valueOf(r.roundAverage.avg); // ms - } - }, - ROUND_AVG_STDEV("round.time.stdev") { - @Override public String get(Result r) { - return String.valueOf(r.roundAverage.stddev); - } - }, - ROUND_BLOCK("round.block") { - @Override public String get(Result r) { - return String.valueOf(r.blockedAverage.avg); // ms - } - }, - ROUND_BLOCK_STDEV("round.block.stdev") { - @Override public String get(Result r) { - return String.valueOf(r.roundAverage.stddev); - } - }, - ROUND_GC("round.gc") { - @Override public String get(Result r) { - return String.valueOf(r.gcAverage.avg); // ms - } - }, - ROUND_GC_STDEV("round.gc.stdev") { - @Override public String get(Result r) { - return String.valueOf(r.gcAverage.stddev); - } - }, - GC_CALLS("gc.calls") { - @Override public String get(Result r) { - return String.valueOf(r.gcInfo.accumulatedInvocations()); - } - }, - GC_TIME("gc.time") { - @Override public String get(Result r) { - return String.valueOf(r.gcInfo.accumulatedTime() / 1000); // ms - } - }, - TIME_TOTAL("time.total") { - @Override public String get(Result r) { - return String.valueOf((r.benchmarkTime + r.warmupTime) / 1000); // ms - } - }, - TIME_WARMUP("time.warmup") { - @Override public String get(Result r) { - return String.valueOf(r.warmupTime / 1000); // ms - } - }, - TIME_BENCH("time.bench") { - @Override public String get(Result r) { - return String.valueOf(r.benchmarkTime / 1000); // ms - } - }; - - public abstract String get(Result r); - - private final String name; - - Column(String name) { - this.name = name; - } - - public String getName() { - return name; - } - } - - public CsvConsumer(String fileName) throws IOException { - csvFile = new File(fileName); - log.debug("Opening {} in append mode", csvFile); - csv = new OutputStreamWriter(new FileOutputStream(csvFile, true)); - printHeader(); - } - - public synchronized void accept(Result r) throws IOException { - Joiner j = Joiner.on(separator); - final List fields = new ArrayList<>(Column.values().length); - for (Column c : Column.values()) { - fields.add(c.get(r)); - } - csv.write(String.format("%s%n", j.join(fields))); - log.debug("Wrote {} to {}", r, csvFile); - csv.flush(); - } - - private synchronized void printHeader() throws IOException { - long len = csvFile.length(); - if (0 != len) { - log.debug("Not writing header to {}; file has non-zero length {}", csvFile, len); - return; - } - - Joiner j = Joiner.on(separator); - final List headers = new ArrayList<>(Column.values().length); - for (Column c : Column.values()) { - headers.add(c.getName()); - } - csv.write(String.format("%s%n", j.join(headers))); - log.debug("Wrote header to {}", csvFile); - csv.flush(); - } -} diff --git a/janusgraph-backend-testutils/src/main/java/org/janusgraph/testutil/JUnitBenchmarkProvider.java b/janusgraph-backend-testutils/src/main/java/org/janusgraph/testutil/JUnitBenchmarkProvider.java deleted file mode 100644 index eb502a36c2..0000000000 --- a/janusgraph-backend-testutils/src/main/java/org/janusgraph/testutil/JUnitBenchmarkProvider.java +++ /dev/null @@ -1,435 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.testutil; - -import com.carrotsearch.junitbenchmarks.AutocloseConsumer; -import com.carrotsearch.junitbenchmarks.BenchmarkOptions; -import com.carrotsearch.junitbenchmarks.BenchmarkRule; -import com.carrotsearch.junitbenchmarks.IResultsConsumer; -import com.carrotsearch.junitbenchmarks.Result; -import com.carrotsearch.junitbenchmarks.WriterConsumer; -import com.carrotsearch.junitbenchmarks.XMLConsumer; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableMap; -import org.junit.rules.TestRule; -import org.junit.runner.Description; -import org.junit.runners.model.Statement; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.BufferedReader; -import java.io.Closeable; -import java.io.File; -import java.io.FileReader; -import java.io.FileWriter; -import java.io.IOException; -import java.io.Writer; -import java.lang.annotation.Annotation; -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.lang.reflect.Proxy; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Map; - -/** - * JUB can write the results of a single JVM run to an XML file, but it does not - * support appending to an existing file. When given a path to an existing file, - * it silently overwrites the old contents. This is unusable in a - * fork-per-test-class surefire configuration. The results of each test class - * overwrite those previous, so that the only results still readable at the end - * of a test run are those of the final class executed. - *

- * This class exists to configure JUB programmatically and avoid the annoying - * behavior of the system-property-configured XMLConsumer. - */ -public class JUnitBenchmarkProvider { - - public static final String ENV_EFFORT_GENERATE = "JUB_EFFORT_GENERATE"; - public static final String ENV_EFFORT_FILE = "JUB_EFFORT_FILE"; - public static final String ENV_DEFAULT_ROUNDS = "JUB_DEFAULT_ROUNDS"; - public static final String ENV_WARMUP_ROUNDS = "JUB_WARMUP_ROUNDS"; - public static final String ENV_TARGET_RUNTIME_MS = "JUB_TARGET_RUNTIME_MS"; - - public static final String DEFAULT_EFFORT_FILE = "../janusgraph-test/data/jub-effort.txt"; - public static final long TARGET_RUNTIME_MS; - public static final int DEFAULT_ROUNDS; - public static final int WARMUP_ROUNDS; - - private static final Map efforts; - private static final Logger log = LoggerFactory.getLogger(JUnitBenchmarkProvider.class); - - static { - efforts = loadScalarsFromEnvironment(); - DEFAULT_ROUNDS = loadIntFromEnvironment(ENV_DEFAULT_ROUNDS, 1); - WARMUP_ROUNDS = loadIntFromEnvironment(ENV_WARMUP_ROUNDS, 1); - TARGET_RUNTIME_MS = loadIntFromEnvironment(ENV_TARGET_RUNTIME_MS, 5000); - } - - /** - * Get a JUnitBenchmarks rule configured for JanusGraph performance testing. - *

- * The returned rule will write results to an XML file named - * jub.(abs(current nanotime)).xml and to the console. - *

- * This method concentrates our JUB configuration in a single code block and - * gives us programmatic flexibility that exceeds the limited flexibility of - * configuring JUB through its hardcoded global system properties. It also - * converts the IOException that XMLConsumer's constructor can throw into a - * RuntimeException. In test classes, this conversion is the difference - * between: - * - *

-     * {@literal @}Rule
-     * public TestRule benchmark; // Can't initialize here b/c of IOException
-     * ...
-     * public TestClassConstructor() throws IOException {
-     *     benchmark = new BenchmarkRule(new XMLConsumer(...));
-     * }
-     *
-     * // or, if there are extant subclass constructors we want to leave alone...
-     *
-     * public TestClassConstructor() {
-     *     try {
-     *         benchmark = new BenchmarkRule(new XMLConsumer(...));
-     *     } catch (IOException e) {
-     *         throw new RuntimeException(e);
-     *     }
-     * }
-     * 
- * - * versus, with this method, - * - *
-     * {@literal @}Rule
-     * public TestRule benchmark = JUnitBenchmarkProvider.get(); // done
-     * 
- * - * @return a BenchmarkRule ready for use with the JUnit @Rule annotation - */ - public static TestRule get() { - return new AdjustableRoundsBenchmarkRule(efforts, getConsumers()); - } - - /** - * Like {@link #get()}, except extra JUB Results consumers can be attached - * to the returned rule. - * - * @param additionalConsumers - * extra JUB results consumers to apply in the returned rule - * object - * @return a BenchmarkRule ready for use with the JUnit @Rule annotation - */ - public static TestRule get(IResultsConsumer... additionalConsumers) { - return new AdjustableRoundsBenchmarkRule(efforts, getConsumers(additionalConsumers)); - } - - /** - * Get a filename from {@link #ENV_EFFORT_FILE}, then open the file and read - * method execution multipliers from it. Such a file can be produced using - * {@link TimeScaleConsumer}. - * - * @return map of classname + '.' + methodname to the number of iterations - * needed to run for at least {@link #TARGET_RUNTIME_MS} - */ - private static Map loadScalarsFromEnvironment() { - - String file = getEffortFilePath(); - - File f = new File(file); - if (!f.canRead()) { - log.error("Can't read JUnitBenchmarks effort file {}, no effort multipliers loaded.", file); - return ImmutableMap.of(); - } - - try (final BufferedReader reader = new BufferedReader(new FileReader(file))) { - return loadScalarsUnsafe(file, reader); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - private static IResultsConsumer[] getConsumers(IResultsConsumer... additional) { - try { - return getConsumersUnsafe(additional); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - private static IResultsConsumer[] getConsumersUnsafe(IResultsConsumer... additional) throws IOException { - final List consumers = new ArrayList<>(); - consumers.add(new XMLConsumer(new File("jub." + Math.abs(System.nanoTime()) + ".xml"))); - consumers.add(new WriterConsumer()); // defaults to System.out - consumers.add(new CsvConsumer("target/jub.csv")); - - if (null != System.getenv(ENV_EFFORT_GENERATE)) { - String file = getEffortFilePath(); - Writer writer = new FileWriter(file, true); - log.info("Opened " + file + " for appending"); - consumers.add(new TimeScaleConsumer(writer)); - } - - consumers.addAll(Arrays.asList(additional)); - - return consumers.toArray(new IResultsConsumer[consumers.size()]); - } - - private static String getEffortFilePath() { - String file = System.getenv(ENV_EFFORT_FILE); - if (null == file) { - log.debug("Env variable " + ENV_EFFORT_FILE + " was null"); - log.debug("Defaulting to JUB effort scalar file " + DEFAULT_EFFORT_FILE); - file = DEFAULT_EFFORT_FILE; - } - return file; - } - - private static Map loadScalarsUnsafe(String filename, BufferedReader reader) throws IOException { - String line; - int ln = 0; - final int tokensPerLine = 2; - final ImmutableMap.Builder builder = new ImmutableMap.Builder<>(); - - while (null != (line = reader.readLine())) { - ln++; - String[] tokens = line.split(" "); - if (tokensPerLine != tokens.length) { - log.warn("Parse error at {}:{}: required {} tokens, but found {} (skipping this line)", - filename, ln, tokensPerLine, tokens.length); - continue; - } - - int t = 0; - String name = tokens[t++]; - String rawscalar = tokens[t++]; - - assert tokensPerLine == t; - - Preconditions.checkNotNull(name); - - if (0 == name.length()) { - log.warn("Parse error at {}:{}: zero-length method name (skipping this line)", filename, ln); - continue; - } - - double scalar; - try { - scalar = Double.parseDouble(rawscalar); - } catch (Exception e) { - log.warn("Parse error at {}:{}: failed to convert string \"{}\" to a double (skipping this line)", - filename, ln, rawscalar); - log.warn("Double parsing exception stacktrace follows", e); - continue; - } - - if (0 > scalar) { - log.warn("Parse error at {}:{}: read negative method scalar {} (skipping this line)", - filename, ln, scalar); - continue; - } - - builder.put(name, Double.valueOf(Math.ceil(scalar)).intValue()); - } - - return builder.build(); - } - - /** - * Write methodnames followed by {@link JUnitBenchmarkProvider#TARGET_RUNTIME_MS} / roundAverage to a file. - */ - private static class TimeScaleConsumer extends AutocloseConsumer implements Closeable { - - final Writer writer; - - public TimeScaleConsumer(Writer writer) { - this.writer = writer; - } - - @Override - public void accept(Result result) throws IOException { - - // Result's javadoc says roundAvg.avg is ms, but it seems to be s in reality - double millis = 1000D * result.roundAverage.avg; - double scalar = Math.max(1D, TARGET_RUNTIME_MS / Math.max(1D, millis)); - - String testClass = result.getTestClassName(); - String testName = result.getTestMethodName(); - writer.write(String.format("%s.%s %.3f%n", testClass, testName, scalar)); - writer.flush(); - } - - @Override - public void close() throws IOException { - writer.close(); - } - } - - private static BenchmarkOptions getDefaultBenchmarkOptions(int rounds) { - return (BenchmarkOptions)Proxy.newProxyInstance( - JUnitBenchmarkProvider.class.getClassLoader(), // which classloader is correct? - new Class[] { BenchmarkOptions.class }, - new DefaultBenchmarkOptionsHandler(rounds)); - } - - private static BenchmarkOptions getWrappedBenchmarkOptions(BenchmarkOptions base, int rounds) { - return (BenchmarkOptions)Proxy.newProxyInstance( - JUnitBenchmarkProvider.class.getClassLoader(), // which classloader is correct? - new Class[] { BenchmarkOptions.class }, - new WrappedBenchmarkOptionsHandler(base, rounds)); - } - - private static int loadIntFromEnvironment(String envKey, int dfl) { - String s = System.getenv(envKey); - - if (null != s) { - try { - return Integer.parseInt(s); - } catch (NumberFormatException e) { - log.warn("Could not interpret value \"{}\" for environment variable {} as an integer", s, envKey, e); - } - } else { - log.debug("Using default value {} for environment variable {}", dfl, envKey); - } - - return dfl; - } - - - /** - * This class uses particularly awkward and inelegant encapsulation. I don't - * have much flexibility to improve it because both JUnit and - * JUnitBenchmarks aggressively prohibit inheritance through final and - * restrictive method/constructor visibility. - */ - private static class AdjustableRoundsBenchmarkRule implements TestRule { - - private final BenchmarkRule rule; - private final Map efforts; - - public AdjustableRoundsBenchmarkRule(Map efforts, IResultsConsumer... consumers) { - rule = new BenchmarkRule(consumers); - this.efforts = efforts; - } - - @Override - public Statement apply(Statement base, Description description) { - Class clazz = description.getTestClass(); - String mname = description.getMethodName(); - Collection annotations = description.getAnnotations(); - final int rounds = getRoundsForFullMethodName(clazz.getCanonicalName() + "." + mname); - - final List modifiedAnnotations = new ArrayList<>(annotations.size()); - - boolean hit = false; - - for (Annotation a : annotations) { - if (a.annotationType().equals(BenchmarkOptions.class)) { - final BenchmarkOptions old = (BenchmarkOptions)a; - BenchmarkOptions replacement = getWrappedBenchmarkOptions(old, rounds); - modifiedAnnotations.add(replacement); - log.debug("Modified BenchmarkOptions annotation on {}", mname); - hit = true; - } else { - modifiedAnnotations.add(a); - log.debug("Kept annotation {} with annotation type {} on {}", - a, a.annotationType(), mname); - } - } - - if (!hit) { - BenchmarkOptions opts = getDefaultBenchmarkOptions(rounds); - modifiedAnnotations.add(opts); - log.debug("Added BenchmarkOptions {} with annotation type {} to {}", - opts, opts.annotationType(), mname); - } - - Description roundsAdjustedDesc = - Description.createTestDescription( - clazz, mname, - modifiedAnnotations.toArray(new Annotation[modifiedAnnotations.size()])); - return rule.apply(base, roundsAdjustedDesc); - } - - private int getRoundsForFullMethodName(String fullname) { - Integer r = efforts.get(fullname); - if (null == r) { - r = DEFAULT_ROUNDS; - log.warn("Applying default iteration count ({}) to method {}", r, fullname); - } else { - log.debug("Loaded iteration count {} on method {}", r, fullname); - } - return r; - } - } - - private static class DefaultBenchmarkOptionsHandler implements InvocationHandler { - - private final int rounds; - - public DefaultBenchmarkOptionsHandler(int rounds) { - this.rounds = rounds; - } - - @Override - public Object invoke(Object proxy, Method method, Object[] args) - throws IllegalArgumentException { - if (method.getName().equals("benchmarkRounds")) { - log.trace("Intercepted benchmarkRounds() invocation: returning {}", rounds); - return rounds; - } - if (method.getName().equals("warmupRounds")) { - log.trace("Intercepted warmupRounds() invocation: returning {}", WARMUP_ROUNDS); - return WARMUP_ROUNDS; - } - if (method.getName().equals("annotationType")) { - return BenchmarkOptions.class; - } - log.trace("Returning default value for method intercepted invocation of method {}", method.getName()); - return method.getDefaultValue(); - } - } - - private static class WrappedBenchmarkOptionsHandler implements InvocationHandler { - - private final Object base; - private final int rounds; - - public WrappedBenchmarkOptionsHandler(Object base, int rounds) { - this.base = base; - this.rounds = rounds; - } - - @Override - public Object invoke(Object proxy, Method method, Object[] args) - throws IllegalAccessException, IllegalArgumentException, - InvocationTargetException { - if (method.getName().equals("benchmarkRounds")) { - log.trace("Intercepted benchmarkRounds() invocation: returning {}", rounds); - return rounds; - } - if (method.getName().equals("warmupRounds")) { - log.trace("Intercepted warmupRounds() invocation: returning {}", WARMUP_ROUNDS); - return WARMUP_ROUNDS; - } - log.trace("Delegating intercepted invocation of method {} to wrapped base instance {}", method.getName(), base); - return method.invoke(base, args); - } - - } -} diff --git a/janusgraph-benchmark/pom.xml b/janusgraph-benchmark/pom.xml new file mode 100644 index 0000000000..cf46991ad4 --- /dev/null +++ b/janusgraph-benchmark/pom.xml @@ -0,0 +1,93 @@ + + + 4.0.0 + + org.janusgraph + janusgraph + 1.0.0-SNAPSHOT + ../pom.xml + + janusgraph-benchmark + JanusGraph-Benchmark + https://janusgraph.org + jar + + + ${basedir}/.. + 1.34 + benchmarks + + + + + + org.apache.logging.log4j + log4j-slf4j-impl + + + org.apache.logging.log4j + log4j-core + + + + + org.openjdk.jmh + jmh-core + ${jmh.version} + + + org.openjdk.jmh + jmh-generator-annprocess + ${jmh.version} + provided + + + + org.janusgraph + janusgraph-core + ${project.version} + + + org.janusgraph + janusgraph-inmemory + ${project.version} + + + + + + benchmark + + + !skipTests + + + + + org.codehaus.mojo + exec-maven-plugin + + + run-benchmarks + integration-test + + exec + + + test + java + + -classpath + + org.janusgraph.BenchmarkRunner + .*Benchmark + + + + + + + + + + \ No newline at end of file diff --git a/janusgraph-benchmark/src/main/java/org/janusgraph/BenchmarkRunner.java b/janusgraph-benchmark/src/main/java/org/janusgraph/BenchmarkRunner.java new file mode 100644 index 0000000000..cecad2360c --- /dev/null +++ b/janusgraph-benchmark/src/main/java/org/janusgraph/BenchmarkRunner.java @@ -0,0 +1,39 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph; + +import org.openjdk.jmh.runner.Runner; +import org.openjdk.jmh.runner.RunnerException; +import org.openjdk.jmh.runner.options.ChainedOptionsBuilder; +import org.openjdk.jmh.runner.options.OptionsBuilder; +import org.openjdk.jmh.runner.options.TimeValue; + +public class BenchmarkRunner { + public static void main(String[] args) throws RunnerException { + final ChainedOptionsBuilder builder = new OptionsBuilder() + .forks(1) + .measurementTime(TimeValue.seconds(5)) + .warmupIterations(2) + .warmupTime(TimeValue.seconds(1)); + if (args.length > 0) { + for (String arg : args) { + builder.include(arg); + } + } else { + builder.include(".*Benchmark"); + } + new Runner(builder.build()).run(); + } +} diff --git a/janusgraph-test/src/test/java/org/janusgraph/graphdb/query/GraphCentricQueryBenchmark.java b/janusgraph-benchmark/src/main/java/org/janusgraph/GraphCentricQueryBenchmark.java similarity index 99% rename from janusgraph-test/src/test/java/org/janusgraph/graphdb/query/GraphCentricQueryBenchmark.java rename to janusgraph-benchmark/src/main/java/org/janusgraph/GraphCentricQueryBenchmark.java index c9ba587e51..d90ffe32e9 100644 --- a/janusgraph-test/src/test/java/org/janusgraph/graphdb/query/GraphCentricQueryBenchmark.java +++ b/janusgraph-benchmark/src/main/java/org/janusgraph/GraphCentricQueryBenchmark.java @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package org.janusgraph.graphdb.query; +package org.janusgraph; import org.apache.tinkerpop.gremlin.structure.Vertex; import org.janusgraph.core.Cardinality; diff --git a/janusgraph-benchmark/src/main/java/org/janusgraph/JanusGraphSpeedBenchmark.java b/janusgraph-benchmark/src/main/java/org/janusgraph/JanusGraphSpeedBenchmark.java new file mode 100644 index 0000000000..41ba34dc85 --- /dev/null +++ b/janusgraph-benchmark/src/main/java/org/janusgraph/JanusGraphSpeedBenchmark.java @@ -0,0 +1,96 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph; + +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.janusgraph.core.EdgeLabel; +import org.janusgraph.core.JanusGraph; +import org.janusgraph.core.JanusGraphFactory; +import org.janusgraph.core.PropertyKey; +import org.janusgraph.core.VertexLabel; +import org.janusgraph.core.schema.JanusGraphManagement; +import org.janusgraph.diskstorage.configuration.ModifiableConfiguration; +import org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.TearDown; + +@State(Scope.Thread) +public class JanusGraphSpeedBenchmark { + + @Param({ "1000", "10000", "100000" }) + long numberOfVertices; + + public JanusGraph graph; + + private static final String START_LABEL = "startVertex"; + private static final String END_LABEL = "endVertex"; + private static final String UID_PROP = "uid"; + + @Setup + public void setup() { + ModifiableConfiguration config = GraphDatabaseConfiguration.buildGraphConfiguration(); + config.set(GraphDatabaseConfiguration.STORAGE_BACKEND, "inmemory"); + config.set(GraphDatabaseConfiguration.AUTO_TYPE, "none"); + config.set(GraphDatabaseConfiguration.SCHEMA_CONSTRAINTS, true); + graph = JanusGraphFactory.open(config); + JanusGraphManagement jgm = graph.openManagement(); + VertexLabel startVertex = jgm.makeVertexLabel(START_LABEL).make(); + PropertyKey uid = jgm.makePropertyKey(UID_PROP).dataType(Integer.class).make(); + jgm.buildIndex("byUid", Vertex.class).addKey(uid).indexOnly(startVertex).buildCompositeIndex(); + jgm.addProperties(startVertex, uid); + VertexLabel endVertex = jgm.makeVertexLabel(END_LABEL).make(); + jgm.addProperties(endVertex, uid); + EdgeLabel between = jgm.makeEdgeLabel("between").make(); + jgm.addConnection(between, startVertex, endVertex); + + jgm.commit(); + Vertex next = graph.traversal().addV(START_LABEL).property(UID_PROP, 1).next(); + + for (int i = 0; i < numberOfVertices; i++) { + graph.traversal() + .addV(END_LABEL).property(UID_PROP, i).as("end") + .addE("between").to("end").from(next).iterate(); + } + } + + @Benchmark + public void basicCount() { + if (numberOfVertices != graph.traversal().V().has(START_LABEL, UID_PROP, 1).out().count().next()) + throw new AssertionError(); + } + + @Benchmark + public void basicAddAndDelete(){ + for (int i = 0; i < numberOfVertices; i++) { + GraphTraversalSource g = graph.traversal(); + g.addV(START_LABEL).property(UID_PROP, i+2).iterate(); + } + for (int i = 0; i < numberOfVertices; i++) { + GraphTraversalSource g = graph.traversal(); + g.V().has(START_LABEL, UID_PROP, i+2).iterate(); + } + } + + @TearDown + public void teardown() { + graph.close(); + } + +} diff --git a/janusgraph-benchmark/src/main/java/org/janusgraph/MgmtOlapJobBenchmark.java b/janusgraph-benchmark/src/main/java/org/janusgraph/MgmtOlapJobBenchmark.java new file mode 100644 index 0000000000..8931766f61 --- /dev/null +++ b/janusgraph-benchmark/src/main/java/org/janusgraph/MgmtOlapJobBenchmark.java @@ -0,0 +1,127 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph; + +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.janusgraph.core.Cardinality; +import org.janusgraph.core.JanusGraph; +import org.janusgraph.core.JanusGraphFactory; +import org.janusgraph.core.PropertyKey; +import org.janusgraph.core.schema.JanusGraphManagement; +import org.janusgraph.core.schema.SchemaAction; +import org.janusgraph.diskstorage.configuration.ModifiableConfiguration; +import org.janusgraph.diskstorage.configuration.WriteConfiguration; +import org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration; +import org.janusgraph.graphdb.database.management.ManagementSystem; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.TearDown; +import org.openjdk.jmh.infra.Blackhole; +import org.openjdk.jmh.runner.Runner; +import org.openjdk.jmh.runner.RunnerException; +import org.openjdk.jmh.runner.options.Options; +import org.openjdk.jmh.runner.options.OptionsBuilder; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +/** + * This benchmark evaluates performance of OLAP jobs that + * can be run via the ManagementSystem interface, including: + * 1) REINDEX + * 2) REMOVE_INDEX + * + * @author Boxuan Li (liboxuan@connect.hku.hk) + */ +@BenchmarkMode(Mode.AverageTime) +@Fork(1) +@State(Scope.Benchmark) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +public class MgmtOlapJobBenchmark { + @Param("10000") + int size; + + JanusGraph graph; + + public WriteConfiguration getConfiguration() { + ModifiableConfiguration config = GraphDatabaseConfiguration.buildGraphConfiguration(); + config.set(GraphDatabaseConfiguration.STORAGE_BACKEND, "inmemory"); + return config.getConfiguration(); + } + + @Setup(Level.Iteration) + public void setUp() throws Exception { + graph = JanusGraphFactory.open(getConfiguration()); + + JanusGraphManagement mgmt = graph.openManagement(); + PropertyKey name = mgmt.makePropertyKey("name").dataType(String.class).cardinality(Cardinality.SINGLE).make(); + mgmt.buildIndex("nameIndex", Vertex.class).addKey(name).buildCompositeIndex(); + mgmt.commit(); + ManagementSystem.awaitGraphIndexStatus(graph, "nameIndex").call(); + + for (int j = 0; j < size; j++) { + graph.addVertex("name", "value" + j, "alias", "value" + j); + } + graph.tx().commit(); + + mgmt = graph.openManagement(); + mgmt.buildIndex("aliasIndex", Vertex.class).addKey(mgmt.getPropertyKey("alias")).buildCompositeIndex(); + mgmt.commit(); + ManagementSystem.awaitGraphIndexStatus(graph, "aliasIndex").call(); + + mgmt = graph.openManagement(); + mgmt.updateIndex(mgmt.getGraphIndex("nameIndex"), SchemaAction.DISABLE_INDEX).get(); + mgmt.commit(); + ManagementSystem.awaitGraphIndexStatus(graph, "nameIndex").call(); + } + + @Benchmark + public void runReindex(Blackhole blackhole) throws ExecutionException, InterruptedException { + JanusGraphManagement mgmt = graph.openManagement(); + blackhole.consume(mgmt.updateIndex(mgmt.getGraphIndex("aliasIndex"), SchemaAction.REINDEX).get()); + mgmt.commit(); + } + + @Benchmark + public void runRemoveIndex(Blackhole blackhole) throws ExecutionException, InterruptedException { + JanusGraphManagement mgmt = graph.openManagement(); + blackhole.consume(mgmt.updateIndex(mgmt.getGraphIndex("nameIndex"), SchemaAction.REMOVE_INDEX).get()); + mgmt.commit(); + } + + + @TearDown(Level.Iteration) + public void tearDown() { + graph.close(); + } + + public static void main(String[] args) throws RunnerException { + Options options = new OptionsBuilder() + .include(MgmtOlapJobBenchmark.class.getSimpleName()) + .warmupIterations(1) + .measurementIterations(3) + .build(); + new Runner(options).run(); + } + +} diff --git a/janusgraph-benchmark/src/main/java/org/janusgraph/StaticArrayEntryListBenchmark.java b/janusgraph-benchmark/src/main/java/org/janusgraph/StaticArrayEntryListBenchmark.java new file mode 100644 index 0000000000..aaac5696e5 --- /dev/null +++ b/janusgraph-benchmark/src/main/java/org/janusgraph/StaticArrayEntryListBenchmark.java @@ -0,0 +1,69 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph; + +import org.janusgraph.diskstorage.Entry; +import org.janusgraph.diskstorage.EntryList; +import org.janusgraph.diskstorage.util.ByteBufferUtil; +import org.janusgraph.diskstorage.util.StaticArrayBuffer; +import org.janusgraph.diskstorage.util.StaticArrayEntry; +import org.janusgraph.diskstorage.util.StaticArrayEntryList; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.ArrayList; +import java.util.List; + +@BenchmarkMode(Mode.Throughput) +@State(Scope.Benchmark) +@Fork(jvmArgsAppend = "-Xmx1G") +public class StaticArrayEntryListBenchmark { + List entries = new ArrayList<>(); + + @Param({ "10000", "100000" }) + Integer size; + + @Param({ "50", "1000", "5000" }) + Integer valueSize; + + @Setup + public void setup() { + for (int i = 0; i < size; i++) { + StaticArrayBuffer column = StaticArrayEntry.of(ByteBufferUtil.oneByteBuffer(20)); + StaticArrayBuffer value = StaticArrayEntry.of(ByteBufferUtil.oneByteBuffer(valueSize)); + Entry entry = StaticArrayEntry.of(column, value); + entries.add(entry); + } + } + + @Benchmark + public void iterator(Blackhole bh) { + EntryList result = StaticArrayEntryList.ofStaticBuffer(entries.iterator(), StaticArrayEntry.ENTRY_GETTER); + bh.consume(result); + } + + @Benchmark + public void iterable(Blackhole bh) { + EntryList result = StaticArrayEntryList.ofStaticBuffer(entries, StaticArrayEntry.ENTRY_GETTER); + bh.consume(result); + } +} diff --git a/janusgraph-benchmark/src/main/resources/log4j2.xml b/janusgraph-benchmark/src/main/resources/log4j2.xml new file mode 100644 index 0000000000..07924f6ba0 --- /dev/null +++ b/janusgraph-benchmark/src/main/resources/log4j2.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/janusgraph-berkeleyje/pom.xml b/janusgraph-berkeleyje/pom.xml index a4f1dccbf3..7a3397295f 100644 --- a/janusgraph-berkeleyje/pom.xml +++ b/janusgraph-berkeleyje/pom.xml @@ -3,7 +3,7 @@ org.janusgraph janusgraph - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT ../pom.xml janusgraph-berkeleyje diff --git a/janusgraph-berkeleyje/src/test/resources/log4j.properties b/janusgraph-berkeleyje/src/test/resources/log4j.properties deleted file mode 100644 index b067a1746b..0000000000 --- a/janusgraph-berkeleyje/src/test/resources/log4j.properties +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -log4j.appender.A1=org.apache.log4j.FileAppender -log4j.appender.A1.File=target/test.log -log4j.appender.A1.Threshold=ALL -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n - -log4j.appender.A2=org.apache.log4j.ConsoleAppender -log4j.appender.A2.Threshold=ALL -log4j.appender.A2.layout=org.apache.log4j.PatternLayout -log4j.appender.A2.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n - -#log4j.rootLogger=INFO, A1, A2 -log4j.rootLogger=ERROR, A1 - -#log4j.logger.org.apache.cassandra=INFO -#log4j.logger.org.apache.hadoop=INFO -#log4j.logger.org.apache.zookeeper=INFO -#log4j.logger.org.janusgraph.blueprints.BerkeleyJEBlueprintsTest=INFO -#log4j.logger.org.janusgraph.blueprints.JanusGraphBlueprintsTest=INFO -# Uncomment these lines to enable voluminous operation, tx, and db logging -# Expect one line per insert/get/delete/slice, tx open/close, db open/close -#log4j.logger.org.janusgraph.diskstorage.berkeleyje.BerkeleyJEStoreManager=TRACE -#log4j.logger.org.janusgraph.diskstorage.berkeleyje.BerkeleyJEKeyValueStore=TRACE -#log4j.logger.org.janusgraph.diskstorage.berkeleyje.BerkeleyJETx=TRACE -# BerkeleyGraphTest does some JUnit @Rule TestName chicanery -# when generating graph configs. Uncomment this line to make -# the test noisier about what it's doing. -#log4j.logger.org.janusgraph.graphdb.berkeleyje.BerkeleyGraphTest=TRACE diff --git a/janusgraph-berkeleyje/src/test/resources/log4j2-test.xml b/janusgraph-berkeleyje/src/test/resources/log4j2-test.xml new file mode 100644 index 0000000000..5bd810fa3c --- /dev/null +++ b/janusgraph-berkeleyje/src/test/resources/log4j2-test.xml @@ -0,0 +1,15 @@ + + + + + + %d{HH:mm:ss} %-5level %class.%method{36} - %msg%n + + + + + + + + + \ No newline at end of file diff --git a/janusgraph-bigtable/pom.xml b/janusgraph-bigtable/pom.xml index 56102311db..ebbdd07d85 100644 --- a/janusgraph-bigtable/pom.xml +++ b/janusgraph-bigtable/pom.xml @@ -3,7 +3,7 @@ org.janusgraph janusgraph - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT ../pom.xml janusgraph-bigtable diff --git a/janusgraph-core/pom.xml b/janusgraph-core/pom.xml index b64c5bf4dd..3dcaf7af2f 100644 --- a/janusgraph-core/pom.xml +++ b/janusgraph-core/pom.xml @@ -3,7 +3,7 @@ org.janusgraph janusgraph - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT ../pom.xml janusgraph-core @@ -31,11 +31,6 @@ true
- - org.glassfish - javax.json - 1.1.4 - io.dropwizard.metrics metrics-core @@ -76,14 +71,17 @@ com.carrotsearch hppc + + org.jctools + jctools-core + 3.3.0 + + - com.boundary - high-scale-lib + com.google.code.findbugs + jsr305 + compile - - com.google.code.findbugs - jsr305 - ${basedir}/target diff --git a/janusgraph-core/src/main/java/org/janusgraph/core/JanusGraphIndexQuery.java b/janusgraph-core/src/main/java/org/janusgraph/core/JanusGraphIndexQuery.java index 032df46bed..2ec0c53b7f 100644 --- a/janusgraph-core/src/main/java/org/janusgraph/core/JanusGraphIndexQuery.java +++ b/janusgraph-core/src/main/java/org/janusgraph/core/JanusGraphIndexQuery.java @@ -92,15 +92,6 @@ public interface JanusGraphIndexQuery { */ JanusGraphIndexQuery setElementIdentifier(String identifier); - /** - * Returns all vertices that match the query in the indexing backend. - * - * @deprecated use {@link #vertexStream()} instead. - * - * @return - */ - @Deprecated - Iterable> vertices(); /** * Returns all vertices that match the query in the indexing backend. @@ -109,16 +100,6 @@ public interface JanusGraphIndexQuery { */ Stream> vertexStream(); - /** - * Returns all edges that match the query in the indexing backend. - * - * @deprecated use {@link #edgeStream()} instead. - * - * @return - */ - @Deprecated - Iterable> edges(); - /** * Returns all edges that match the query in the indexing backend. * @@ -126,16 +107,6 @@ public interface JanusGraphIndexQuery { */ Stream> edgeStream(); - /** - * Returns all properties that match the query in the indexing backend. - * - * @deprecated use {@link #propertyStream()} instead. - * - * @return - */ - @Deprecated - Iterable> properties(); - /** * Returns all properties that match the query in the indexing backend. * diff --git a/janusgraph-core/src/main/java/org/janusgraph/core/schema/JanusGraphManagement.java b/janusgraph-core/src/main/java/org/janusgraph/core/schema/JanusGraphManagement.java index 4d62e9c118..a3e1b8d608 100644 --- a/janusgraph-core/src/main/java/org/janusgraph/core/schema/JanusGraphManagement.java +++ b/janusgraph-core/src/main/java/org/janusgraph/core/schema/JanusGraphManagement.java @@ -22,12 +22,10 @@ import org.janusgraph.core.PropertyKey; import org.janusgraph.core.RelationType; import org.janusgraph.core.VertexLabel; -import org.janusgraph.diskstorage.keycolumnvalue.scan.ScanMetrics; +import org.janusgraph.diskstorage.keycolumnvalue.scan.ScanJobFuture; import java.time.Duration; import java.util.Set; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; /** * The JanusGraphManagement interface provides methods to define, update, and inspect the schema of a JanusGraph graph. @@ -242,32 +240,6 @@ interface IndexBuilder { } - interface IndexJobFuture extends Future { - - /** - * Returns a set of potentially incomplete and still-changing metrics - * for this job. This is not guaranteed to be the same object as the - * one returned by {@link #get()}, nor will the metrics visible through - * the object returned by this method necessarily eventually converge - * on the same values in the object returned by {@link #get()}, though - * the implementation should attempt to provide both properties when - * practical. - *

- * The metrics visible through the object returned by this method may - * also change their values between reads. In other words, this is not - * necessarily an immutable snapshot. - *

- * If the index job has failed and the implementation is capable of - * quickly detecting that, then the implementation should throw an - * {@code ExecutionException}. Returning metrics in case of failure is - * acceptable, but throwing an exception is preferred. - * - * @return metrics for a potentially still-running job - * @throws ExecutionException if the index job threw an exception - */ - ScanMetrics getIntermediateResult() throws ExecutionException; - } - /* ##################### CONSISTENCY SETTING ########################## */ @@ -310,6 +282,21 @@ interface IndexJobFuture extends Future { */ void setTTL(JanusGraphSchemaType type, Duration duration); + /* + ##################### CONSISTENCY MANAGEMENT ################# + */ + + /** + * Remove all ghost vertices (a.k.a. phantom vertices) from the graph + */ + ScanJobFuture removeGhostVertices(); + + /** + * Remove all ghost vertices (a.k.a. phantom vertices) from the graph, + * with the given concurrency level + */ + ScanJobFuture removeGhostVertices(int numOfThreads); + /* ##################### SCHEMA UPDATE ########################## */ @@ -324,13 +311,26 @@ interface IndexJobFuture extends Future { void changeName(JanusGraphSchemaElement element, String newName); /** - * Updates the provided index according to the given {@link SchemaAction} + * Updates the provided index according to the given {@link SchemaAction}. + * If action is REINDEX or REMOVE_INDEX, then number of threads running the + * action will be the number of available processors running on current JVM. * * @param index * @param updateAction * @return a future that completes when the index action is done */ - IndexJobFuture updateIndex(Index index, SchemaAction updateAction); + ScanJobFuture updateIndex(Index index, SchemaAction updateAction); + + /** + * Updates the provided index according to the given {@link SchemaAction}, using + * given number of threads if applicable (REINDEX and REMOVE_INDEX). + * + * @param index + * @param updateAction + * @param numOfThreads + * @return + */ + ScanJobFuture updateIndex(Index index, SchemaAction updateAction, int numOfThreads); /** * If an index update job was triggered through {@link #updateIndex(Index, SchemaAction)} with schema actions @@ -340,7 +340,7 @@ interface IndexJobFuture extends Future { * @param index * @return A message that reflects the status of the index job */ - IndexJobFuture getIndexJobStatus(Index index); + ScanJobFuture getIndexJobStatus(Index index); /* ##################### CLUSTER MANAGEMENT ########################## diff --git a/janusgraph-core/src/main/java/org/janusgraph/core/util/JanusGraphId.java b/janusgraph-core/src/main/java/org/janusgraph/core/util/JanusGraphId.java deleted file mode 100644 index ae5cd53159..0000000000 --- a/janusgraph-core/src/main/java/org/janusgraph/core/util/JanusGraphId.java +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.core.util; - -import com.google.common.base.Preconditions; -import org.janusgraph.core.JanusGraphVertex; -import org.janusgraph.graphdb.idmanagement.IDManager; - -/** - * Utility methods for handling JanusGraph ids and converting them between indexing and storage backend representations. - * - * @author Matthias Broecheler (me@matthiasb.com) - * @deprecated This class does not produce valid JanusGraph vertex ids as it does not take into account partitioning - * bits used in vertex id assignment. Use {@link org.janusgraph.graphdb.idmanagement.IDManager}, which can be obtained - * through {@link org.janusgraph.graphdb.database.StandardJanusGraph#getIDManager()} and includes methods for converting - * a user id to ({@link org.janusgraph.graphdb.idmanagement.IDManager#toVertexId(long)}) and from - * ({@link org.janusgraph.graphdb.idmanagement.IDManager#fromVertexId(long)}) JanusGraph vertex id. - *

- *

- * IDManager idManager = ((StandardJanusGraph) graph).getIDManager();
- * 
- */ -@Deprecated -public class JanusGraphId { - - /** - * Converts a user provided long id into a JanusGraph vertex id. The id must be positive and can be at most 61 bits long. - * This method is useful when providing ids during vertex creation via {@link org.apache.tinkerpop.gremlin.structure.Graph#addVertex(Object...)}. - * - * @param id long id - * @return a corresponding JanusGraph vertex id - * @deprecated Use {@link org.janusgraph.graphdb.idmanagement.IDManager#toVertexId(long)}. - */ - public static long toVertexId(long id) { - Preconditions.checkArgument(id > 0, "Vertex id must be positive: %s", id); - Preconditions.checkArgument(IDManager.VertexIDType.NormalVertex.removePadding(Long.MAX_VALUE) >= id, "Vertex id is too large: %s", id); - return IDManager.VertexIDType.NormalVertex.addPadding(id); - } - - /** - * Converts a JanusGraph vertex id to the user provided id as the inverse mapping of {@link #toVertexId(long)}. - * - * @param id JanusGraph vertex id (must be positive) - * @return original user provided id - * @deprecated Use {@link org.janusgraph.graphdb.idmanagement.IDManager#fromVertexId(long)} - */ - public static long fromVertexId(long id) { - Preconditions.checkArgument(id > 0, "Invalid vertex id provided: %s", id); - return IDManager.VertexIDType.NormalVertex.removePadding(id); - } - - /** - * Converts a JanusGraph vertex id of a given vertex to the user provided id as the inverse mapping of {@link #toVertexId(long)}. - * - * @param v Vertex - * @return original user provided id - * @deprecated Use {@link org.janusgraph.graphdb.idmanagement.IDManager#fromVertexId(long)} - */ - public static long fromVertexID(JanusGraphVertex v) { - Preconditions.checkArgument(v.hasId(), "Invalid vertex provided: %s", v); - return fromVertexId(v.longId()); - } -} diff --git a/janusgraph-core/src/main/java/org/janusgraph/diskstorage/Backend.java b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/Backend.java index c84d284d21..d2e639a370 100644 --- a/janusgraph-core/src/main/java/org/janusgraph/diskstorage/Backend.java +++ b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/Backend.java @@ -20,7 +20,6 @@ import org.apache.commons.lang3.StringUtils; import org.janusgraph.core.JanusGraphConfigurationException; import org.janusgraph.core.JanusGraphException; -import org.janusgraph.core.schema.JanusGraphManagement; import org.janusgraph.diskstorage.configuration.BasicConfiguration; import org.janusgraph.diskstorage.configuration.ConfigOption; import org.janusgraph.diskstorage.configuration.Configuration; @@ -48,6 +47,7 @@ import org.janusgraph.diskstorage.keycolumnvalue.cache.NoKCVSCache; import org.janusgraph.diskstorage.keycolumnvalue.keyvalue.OrderedKeyValueStoreManager; import org.janusgraph.diskstorage.keycolumnvalue.keyvalue.OrderedKeyValueStoreManagerAdapter; +import org.janusgraph.diskstorage.keycolumnvalue.scan.ScanJobFuture; import org.janusgraph.diskstorage.keycolumnvalue.scan.StandardScanner; import org.janusgraph.diskstorage.locking.Locker; import org.janusgraph.diskstorage.locking.LockerProvider; @@ -436,7 +436,7 @@ private StandardScanner.Builder buildStoreIndexScanJob(String storeName) { .setWorkBlockSize(this.configuration.get(PAGE_SIZE)); } - public JanusGraphManagement.IndexJobFuture getScanJobStatus(Object jobId) { + public ScanJobFuture getScanJobStatus(Object jobId) { return scanner.getRunningJob(jobId); } diff --git a/janusgraph-core/src/main/java/org/janusgraph/diskstorage/indexing/IndexTransaction.java b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/indexing/IndexTransaction.java index 7adce66b28..9e46824dab 100644 --- a/janusgraph-core/src/main/java/org/janusgraph/diskstorage/indexing/IndexTransaction.java +++ b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/indexing/IndexTransaction.java @@ -22,14 +22,12 @@ import org.janusgraph.diskstorage.util.BackendOperation; import org.janusgraph.graphdb.database.idhandling.VariableLong; import org.janusgraph.graphdb.database.serialize.DataOutput; -import org.janusgraph.graphdb.util.StreamIterable; import java.time.Duration; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.Callable; -import java.util.stream.Collectors; import java.util.stream.Stream; /** @@ -98,14 +96,6 @@ public void register(String store, String key, KeyInformation information) throw index.register(store,key,information,indexTx); } - /** - * @deprecated use {@link #queryStream(IndexQuery query)} instead. - */ - @Deprecated - public List query(IndexQuery query) throws BackendException { - return queryStream(query).collect(Collectors.toList()); - } - public Stream queryStream(IndexQuery query) throws BackendException { return index.query(query, keyInformation, indexTx); } @@ -114,14 +104,6 @@ public Long queryCount(IndexQuery query) throws BackendException { return index.queryCount(query, keyInformation, indexTx); } - /** - * @deprecated use {@link #queryStream(RawQuery query)} instead. - */ - @Deprecated - public Iterable> query(RawQuery query) throws BackendException { - return new StreamIterable<>(index.query(query, keyInformation,indexTx)); - } - public Stream> queryStream(RawQuery query) throws BackendException { return index.query(query, keyInformation,indexTx); } diff --git a/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/CompletedJobFuture.java b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/CompletedJobFuture.java new file mode 100644 index 0000000000..3af39926eb --- /dev/null +++ b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/CompletedJobFuture.java @@ -0,0 +1,57 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph.diskstorage.keycolumnvalue.scan; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class CompletedJobFuture implements ScanJobFuture { + private final ScanMetrics completedJobMetrics; + + public CompletedJobFuture(ScanMetrics completedJobMetrics) { + this.completedJobMetrics = completedJobMetrics; + } + + @Override + public ScanMetrics getIntermediateResult() { + return completedJobMetrics; + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return false; + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public boolean isDone() { + return true; + } + + @Override + public ScanMetrics get() throws InterruptedException, ExecutionException { + return completedJobMetrics; + } + + @Override + public ScanMetrics get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + return completedJobMetrics; + } +} diff --git a/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/EmptyScanJobFuture.java b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/EmptyScanJobFuture.java new file mode 100644 index 0000000000..7bd88c6169 --- /dev/null +++ b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/EmptyScanJobFuture.java @@ -0,0 +1,52 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph.diskstorage.keycolumnvalue.scan; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class EmptyScanJobFuture implements ScanJobFuture { + + @Override + public ScanMetrics getIntermediateResult() { + return null; + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return false; + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public boolean isDone() { + return true; + } + + @Override + public ScanMetrics get() throws InterruptedException, ExecutionException { + return null; + } + + @Override + public ScanMetrics get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + return null; + } +} diff --git a/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/FailedJobFuture.java b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/FailedJobFuture.java new file mode 100644 index 0000000000..0c7f9d65f2 --- /dev/null +++ b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/FailedJobFuture.java @@ -0,0 +1,57 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph.diskstorage.keycolumnvalue.scan; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class FailedJobFuture implements ScanJobFuture { + private final Throwable cause; + + public FailedJobFuture(Throwable cause) { + this.cause = cause; + } + + @Override + public ScanMetrics getIntermediateResult() throws ExecutionException { + throw new ExecutionException(cause); + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return false; + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public boolean isDone() { + return true; + } + + @Override + public ScanMetrics get() throws InterruptedException, ExecutionException { + throw new ExecutionException(cause); + } + + @Override + public ScanMetrics get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + throw new ExecutionException(cause); + } +} diff --git a/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/ScanJobFuture.java b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/ScanJobFuture.java new file mode 100644 index 0000000000..b1c69ad26a --- /dev/null +++ b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/ScanJobFuture.java @@ -0,0 +1,45 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph.diskstorage.keycolumnvalue.scan; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; + +public interface ScanJobFuture extends Future { + + /** + * Returns a set of potentially incomplete and still-changing metrics + * for this job. This is not guaranteed to be the same object as the + * one returned by {@link #get()}, nor will the metrics visible through + * the object returned by this method necessarily eventually converge + * on the same values in the object returned by {@link #get()}, though + * the implementation should attempt to provide both properties when + * practical. + *

+ * The metrics visible through the object returned by this method may + * also change their values between reads. In other words, this is not + * necessarily an immutable snapshot. + *

+ * If the job has failed and the implementation is capable of + * quickly detecting that, then the implementation should throw an + * {@code ExecutionException}. Returning metrics in case of failure is + * acceptable, but throwing an exception is preferred. + * + * @return metrics for a potentially still-running job + * @throws ExecutionException if the job threw an exception + */ + ScanMetrics getIntermediateResult() throws ExecutionException; +} + diff --git a/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/StandardScanner.java b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/StandardScanner.java index 1c576549f0..9499294cfd 100644 --- a/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/StandardScanner.java +++ b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/StandardScanner.java @@ -16,7 +16,6 @@ import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; -import org.janusgraph.core.schema.JanusGraphManagement; import org.janusgraph.diskstorage.BackendException; import org.janusgraph.diskstorage.configuration.Configuration; import org.janusgraph.diskstorage.configuration.MergedConfiguration; @@ -85,7 +84,7 @@ private void addJob(Object jobId, StandardScannerExecutor executor) { Preconditions.checkArgument(runningJobs.putIfAbsent(jobId, executor) == null,"Another job with the same id is already running: %s",jobId); } - public JanusGraphManagement.IndexJobFuture getRunningJob(Object jobId) { + public ScanJobFuture getRunningJob(Object jobId) { return runningJobs.get(jobId); } @@ -172,7 +171,7 @@ public Builder setFinishJob(Consumer finishJob) { return this; } - public JanusGraphManagement.IndexJobFuture execute() throws BackendException { + public ScanJobFuture execute() throws BackendException { Preconditions.checkNotNull(job,"Need to specify a job to execute"); Preconditions.checkArgument(StringUtils.isNotBlank(dbName),"Need to specify a database to execute against"); Preconditions.checkNotNull(times,"Need to configure the timestamp provider for this job"); diff --git a/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/StandardScannerExecutor.java b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/StandardScannerExecutor.java index 0e01948685..8efd3ed2f4 100644 --- a/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/StandardScannerExecutor.java +++ b/janusgraph-core/src/main/java/org/janusgraph/diskstorage/keycolumnvalue/scan/StandardScannerExecutor.java @@ -16,7 +16,6 @@ import com.google.common.base.Preconditions; import com.google.common.util.concurrent.AbstractFuture; -import org.janusgraph.core.schema.JanusGraphManagement; import org.janusgraph.diskstorage.BackendException; import org.janusgraph.diskstorage.EntryList; import org.janusgraph.diskstorage.StaticBuffer; @@ -41,7 +40,7 @@ /** * @author Matthias Broecheler (me@matthiasb.com) */ -class StandardScannerExecutor extends AbstractFuture implements JanusGraphManagement.IndexJobFuture, Runnable { +class StandardScannerExecutor extends AbstractFuture implements ScanJobFuture, Runnable { private static final Logger log = LoggerFactory.getLogger(StandardScannerExecutor.class); diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/configuration/GraphDatabaseConfiguration.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/configuration/GraphDatabaseConfiguration.java index 3bd1867338..b5b0fd1b37 100644 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/configuration/GraphDatabaseConfiguration.java +++ b/janusgraph-core/src/main/java/org/janusgraph/graphdb/configuration/GraphDatabaseConfiguration.java @@ -376,7 +376,7 @@ public boolean apply(@Nullable String s) { "How long, in milliseconds, database-level cache will keep entries after flushing them. " + "This option is only useful on distributed storage backends that are capable of acknowledging writes " + "without necessarily making them immediately visible.", - ConfigOption.Type.GLOBAL_OFFLINE, 50); + ConfigOption.Type.MASKABLE, 50); /** * The default expiration time for elements held in the database level cache. This is the time period before @@ -389,7 +389,7 @@ public boolean apply(@Nullable String s) { "Entries are evicted when they reach this age even if the cache has room to spare. " + "Set to 0 to disable expiration (cache entries live forever or until memory pressure " + "triggers eviction when set to 0).", - ConfigOption.Type.GLOBAL_OFFLINE, 10000L); + ConfigOption.Type.MASKABLE, 10000L); /** * Configures the maximum number of recently-used vertices cached by a transaction. The smaller the cache size, the diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/database/cache/StandardSchemaCache.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/database/cache/StandardSchemaCache.java index 76699a22b6..16eac20f70 100644 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/database/cache/StandardSchemaCache.java +++ b/janusgraph-core/src/main/java/org/janusgraph/graphdb/database/cache/StandardSchemaCache.java @@ -18,7 +18,7 @@ import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import org.apache.tinkerpop.gremlin.structure.Direction; -import org.cliffc.high_scale_lib.NonBlockingHashMapLong; +import org.jctools.maps.NonBlockingHashMapLong; import org.janusgraph.diskstorage.EntryList; import org.janusgraph.graphdb.idmanagement.IDManager; import org.janusgraph.graphdb.relations.EdgeDirection; diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/database/management/ManagementSystem.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/database/management/ManagementSystem.java index 3d023a39b8..124cc84204 100644 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/database/management/ManagementSystem.java +++ b/janusgraph-core/src/main/java/org/janusgraph/graphdb/database/management/ManagementSystem.java @@ -57,6 +57,8 @@ import org.janusgraph.diskstorage.configuration.TransactionalConfiguration; import org.janusgraph.diskstorage.configuration.UserModifiableConfiguration; import org.janusgraph.diskstorage.configuration.backend.KCVSConfiguration; +import org.janusgraph.diskstorage.keycolumnvalue.scan.EmptyScanJobFuture; +import org.janusgraph.diskstorage.keycolumnvalue.scan.ScanJobFuture; import org.janusgraph.diskstorage.keycolumnvalue.scan.ScanMetrics; import org.janusgraph.diskstorage.keycolumnvalue.scan.StandardScanner; import org.janusgraph.diskstorage.log.Log; @@ -70,6 +72,7 @@ import org.janusgraph.graphdb.internal.Order; import org.janusgraph.graphdb.internal.Token; import org.janusgraph.graphdb.olap.VertexJobConverter; +import org.janusgraph.graphdb.olap.job.GhostVertexRemover; import org.janusgraph.graphdb.olap.job.IndexRemoveJob; import org.janusgraph.graphdb.olap.job.IndexRepairJob; import org.janusgraph.graphdb.query.QueryUtil; @@ -111,9 +114,6 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.StreamSupport; @@ -839,9 +839,13 @@ public JanusGraphIndex buildMixedIndex(String backingIndex) { /* -------------- Schema Update --------------- */ + @Override + public ScanJobFuture updateIndex(Index index, SchemaAction updateAction) { + return updateIndex(index, updateAction, Runtime.getRuntime().availableProcessors()); + } @Override - public IndexJobFuture updateIndex(Index index, SchemaAction updateAction) { + public ScanJobFuture updateIndex(Index index, SchemaAction updateAction, int numOfThreads) { Preconditions.checkArgument(index != null, "Need to provide an index"); Preconditions.checkArgument(updateAction != null, "Need to provide update action"); @@ -878,19 +882,20 @@ public IndexJobFuture updateIndex(Index index, SchemaAction updateAction) { IndexIdentifier indexId = new IndexIdentifier(index); StandardScanner.Builder builder; - IndexJobFuture future; + ScanJobFuture future; switch (updateAction) { case REGISTER_INDEX: setStatus(schemaVertex, SchemaStatus.INSTALLED, keySubset); updatedTypes.add(schemaVertex); updatedTypes.addAll(dependentTypes); setUpdateTrigger(new UpdateStatusTrigger(graph, schemaVertex, SchemaStatus.REGISTERED, keySubset)); - future = new EmptyIndexJobFuture(); + future = new EmptyScanJobFuture(); break; case REINDEX: builder = graph.getBackend().buildEdgeScanJob(); builder.setFinishJob(indexId.getIndexJobFinisher(graph, SchemaAction.ENABLE_INDEX)); builder.setJobId(indexId); + builder.setNumProcessingThreads(numOfThreads); builder.setJob(VertexJobConverter.convert(graph, new IndexRepairJob(indexId.indexName, indexId.relationTypeName))); try { future = builder.execute(); @@ -902,14 +907,14 @@ public IndexJobFuture updateIndex(Index index, SchemaAction updateAction) { setStatus(schemaVertex, SchemaStatus.ENABLED, keySubset); updatedTypes.add(schemaVertex); if (!keySubset.isEmpty()) updatedTypes.addAll(dependentTypes); - future = new EmptyIndexJobFuture(); + future = new EmptyScanJobFuture(); break; case DISABLE_INDEX: setStatus(schemaVertex, SchemaStatus.INSTALLED, keySubset); updatedTypes.add(schemaVertex); if (!keySubset.isEmpty()) updatedTypes.addAll(dependentTypes); setUpdateTrigger(new UpdateStatusTrigger(graph, schemaVertex, SchemaStatus.DISABLED, keySubset)); - future = new EmptyIndexJobFuture(); + future = new EmptyScanJobFuture(); break; case REMOVE_INDEX: if (index instanceof RelationTypeIndex) { @@ -922,6 +927,7 @@ public IndexJobFuture updateIndex(Index index, SchemaAction updateAction) { } builder.setFinishJob(indexId.getIndexJobFinisher()); builder.setJobId(indexId); + builder.setNumProcessingThreads(numOfThreads); builder.setJob(new IndexRemoveJob(graph, indexId.indexName, indexId.relationTypeName)); try { future = builder.execute(); @@ -974,39 +980,6 @@ public Boolean call() { } } - private static class EmptyIndexJobFuture implements IndexJobFuture { - - @Override - public ScanMetrics getIntermediateResult() { - return null; - } - - @Override - public boolean cancel(boolean mayInterruptIfRunning) { - return false; - } - - @Override - public boolean isCancelled() { - return false; - } - - @Override - public boolean isDone() { - return true; - } - - @Override - public ScanMetrics get() throws InterruptedException, ExecutionException { - return null; - } - - @Override - public ScanMetrics get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { - return null; - } - } - private static class UpdateStatusTrigger implements Callable { private static final Logger log = @@ -1127,7 +1100,7 @@ private void setStatusEdges(JanusGraphSchemaVertex vertex, SchemaStatus status, } @Override - public IndexJobFuture getIndexJobStatus(Index index) { + public ScanJobFuture getIndexJobStatus(Index index) { IndexIdentifier indexId = new IndexIdentifier(index); return graph.getBackend().getScanJobStatus(indexId); } @@ -1378,6 +1351,25 @@ public void setTTL(final JanusGraphSchemaType type, setTypeModifier(type, ModifierType.TTL, ttlSeconds); } + @Override + public ScanJobFuture removeGhostVertices(int numOfThreads) { + StandardScanner.Builder builder = graph.getBackend().buildEdgeScanJob(); + builder.setJob(new GhostVertexRemover(graph)); + builder.setNumProcessingThreads(numOfThreads); + ScanJobFuture future; + try { + future = builder.execute(); + } catch (BackendException e) { + throw new JanusGraphException(e); + } + return future; + } + + @Override + public ScanJobFuture removeGhostVertices() { + return removeGhostVertices(Runtime.getRuntime().availableProcessors()); + } + private void setTypeModifier(final JanusGraphSchemaElement element, final ModifierType modifierType, final Object value) { diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/olap/computer/FulgoraGraphComputer.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/olap/computer/FulgoraGraphComputer.java index 5978983940..2029c4a845 100644 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/olap/computer/FulgoraGraphComputer.java +++ b/janusgraph-core/src/main/java/org/janusgraph/graphdb/olap/computer/FulgoraGraphComputer.java @@ -83,7 +83,7 @@ public class FulgoraGraphComputer implements JanusGraphComputer { private FulgoraVertexMemory vertexMemory; private boolean executed = false; - private int numThreads = 1;//Math.max(1,Runtime.getRuntime().availableProcessors()); + private int numThreads = Runtime.getRuntime().availableProcessors(); private final int readBatchSize; private final int writeBatchSize; diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/olap/computer/FulgoraVertexMemory.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/olap/computer/FulgoraVertexMemory.java index 2e4e64d8ad..ce340cb791 100644 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/olap/computer/FulgoraVertexMemory.java +++ b/janusgraph-core/src/main/java/org/janusgraph/graphdb/olap/computer/FulgoraVertexMemory.java @@ -21,7 +21,7 @@ import org.apache.tinkerpop.gremlin.process.computer.MessageScope; import org.apache.tinkerpop.gremlin.process.computer.VertexComputeKey; import org.apache.tinkerpop.gremlin.process.computer.VertexProgram; -import org.cliffc.high_scale_lib.NonBlockingHashMapLong; +import org.jctools.maps.NonBlockingHashMapLong; import org.janusgraph.diskstorage.EntryList; import org.janusgraph.graphdb.idmanagement.IDManager; diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/query/graph/IndexQueryBuilder.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/query/graph/IndexQueryBuilder.java index b529d0feb1..e55012c504 100644 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/query/graph/IndexQueryBuilder.java +++ b/janusgraph-core/src/main/java/org/janusgraph/graphdb/query/graph/IndexQueryBuilder.java @@ -29,7 +29,6 @@ import org.janusgraph.graphdb.internal.ElementCategory; import org.janusgraph.graphdb.query.BaseQuery; import org.janusgraph.graphdb.transaction.StandardJanusGraphTx; -import org.janusgraph.graphdb.util.StreamIterable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -223,36 +222,18 @@ private Long executeTotals(ElementCategory resultType) { return serializer.executeTotals(this,resultType,tx.getTxHandle(),tx); } - @Deprecated - @Override - public Iterable> vertices() { - return new StreamIterable<>(vertexStream()); - } - @Override public Stream> vertexStream() { setPrefixInternal(VERTEX_PREFIX); return execute(ElementCategory.VERTEX, JanusGraphVertex.class); } - @Deprecated - @Override - public Iterable> edges() { - return new StreamIterable<>(edgeStream()); - } - @Override public Stream> edgeStream() { setPrefixInternal(EDGE_PREFIX); return execute(ElementCategory.EDGE, JanusGraphEdge.class); } - @Deprecated - @Override - public Iterable> properties() { - return new StreamIterable<>(propertyStream()); - } - @Override public Stream> propertyStream() { setPrefixInternal(PROPERTY_PREFIX); diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/EdgeLabelDefinition.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/EdgeLabelDefinition.java deleted file mode 100644 index 7e4c1057a4..0000000000 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/EdgeLabelDefinition.java +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.graphdb.schema; - -import org.apache.tinkerpop.gremlin.structure.Direction; -import org.janusgraph.core.EdgeLabel; -import org.janusgraph.core.Multiplicity; - -/** - * @author Matthias Broecheler (me@matthiasb.com) - * - * @deprecated part of the management revamp in JG, see https://github.com/JanusGraph/janusgraph/projects/3. - */ -@Deprecated -public class EdgeLabelDefinition extends RelationTypeDefinition { - - private final boolean unidirected; - - public EdgeLabelDefinition(String name, long id, Multiplicity multiplicity, boolean unidirected) { - super(name, id, multiplicity); - this.unidirected = unidirected; - } - - public EdgeLabelDefinition(EdgeLabel label) { - this(label.name(),label.longId(),label.multiplicity(),label.isUnidirected()); - } - - public boolean isDirected() { - return !unidirected; - } - - public boolean isUnidirected() { - return unidirected; - } - - @Override - public boolean isUnidirected(Direction dir) { - if (unidirected) return dir==Direction.OUT; - else return dir==Direction.BOTH; - } - - - -} diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/PropertyKeyDefinition.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/PropertyKeyDefinition.java deleted file mode 100644 index 7237ba13f7..0000000000 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/PropertyKeyDefinition.java +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.graphdb.schema; - -import org.apache.tinkerpop.gremlin.structure.Direction; -import org.janusgraph.core.Cardinality; -import org.janusgraph.core.Multiplicity; -import org.janusgraph.core.PropertyKey; - -/** - * @author Matthias Broecheler (me@matthiasb.com) - * - * @deprecated part of the management revamp in JG, see https://github.com/JanusGraph/janusgraph/projects/3. - */ -@Deprecated -public class PropertyKeyDefinition extends RelationTypeDefinition { - - private final Class dataType; - - public PropertyKeyDefinition(String name, long id, Cardinality cardinality, Class dataType) { - this(name,id,Multiplicity.convert(cardinality),dataType); - } - - public PropertyKeyDefinition(String name, long id, Multiplicity multiplicity, Class dataType) { - super(name, id, multiplicity); - this.dataType = dataType; - } - - public PropertyKeyDefinition(PropertyKey key) { - this(key.name(),key.longId(),key.cardinality(),key.dataType()); - } - - public Class getDataType() { - return dataType; - } - - @Override - public boolean isUnidirected(Direction dir) { - return dir==Direction.OUT; - } - -} diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/RelationTypeDefinition.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/RelationTypeDefinition.java deleted file mode 100644 index f27307e79d..0000000000 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/RelationTypeDefinition.java +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.graphdb.schema; - -import org.apache.tinkerpop.gremlin.structure.Direction; -import org.janusgraph.core.Cardinality; -import org.janusgraph.core.Multiplicity; - -/** - * @author Matthias Broecheler (me@matthiasb.com) - * - * @deprecated part of the management revamp in JG, see https://github.com/JanusGraph/janusgraph/projects/3. - */ -@Deprecated -public abstract class RelationTypeDefinition extends SchemaElementDefinition { - - private final Multiplicity multiplicity; - - public RelationTypeDefinition(String name, long id, Multiplicity multiplicity) { - super(name, id); - this.multiplicity = multiplicity; - } - - public Multiplicity getMultiplicity() { - return multiplicity; - } - - public Cardinality getCardinality() { - return multiplicity.getCardinality(); - } - - public abstract boolean isUnidirected(Direction dir); - -} diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/SchemaContainer.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/SchemaContainer.java deleted file mode 100644 index bcf1727f7c..0000000000 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/SchemaContainer.java +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.graphdb.schema; - -import com.google.common.collect.Iterables; -import com.google.common.collect.Maps; -import org.janusgraph.core.EdgeLabel; -import org.janusgraph.core.JanusGraph; -import org.janusgraph.core.PropertyKey; -import org.janusgraph.core.VertexLabel; -import org.janusgraph.core.schema.JanusGraphManagement; - -import java.util.Map; - -/** - * @author Matthias Broecheler (me@matthiasb.com) - * - * @deprecated part of the management revamp in JG, see https://github.com/JanusGraph/janusgraph/projects/3. - */ -@Deprecated -public class SchemaContainer implements SchemaProvider { - - private final Map vertexLabels; - private final Map relationTypes; - - public SchemaContainer(JanusGraph graph) { - vertexLabels = Maps.newHashMap(); - relationTypes = Maps.newHashMap(); - JanusGraphManagement management = graph.openManagement(); - - try { - for (VertexLabel vl : management.getVertexLabels()) { - VertexLabelDefinition vld = new VertexLabelDefinition(vl); - vertexLabels.put(vld.getName(),vld); - } - - for (EdgeLabel el : management.getRelationTypes(EdgeLabel.class)) { - EdgeLabelDefinition eld = new EdgeLabelDefinition(el); - relationTypes.put(eld.getName(),eld); - } - for (PropertyKey pk : management.getRelationTypes(PropertyKey.class)) { - PropertyKeyDefinition pkd = new PropertyKeyDefinition(pk); - relationTypes.put(pkd.getName(), pkd); - } - } finally { - management.rollback(); - } - - } - - public Iterable getVertexLabels() { - return vertexLabels.values(); - } - - @Override - public VertexLabelDefinition getVertexLabel(String name) { - return vertexLabels.get(name); - } - - public boolean containsVertexLabel(String name) { - return getVertexLabel(name)!=null; - } - - public Iterable getPropertyKeys() { - return Iterables.filter(relationTypes.values(),PropertyKeyDefinition.class); - } - - public Iterable getEdgeLabels() { - return Iterables.filter(relationTypes.values(),EdgeLabelDefinition.class); - } - - @Override - public RelationTypeDefinition getRelationType(String name) { - return relationTypes.get(name); - } - - public boolean containsRelationType(String name) { - return getRelationType(name)!=null; - } - - @Override - public EdgeLabelDefinition getEdgeLabel(String name) { - RelationTypeDefinition def = getRelationType(name); - if (def!=null && !(def instanceof EdgeLabelDefinition)) - throw new IllegalArgumentException("Not an edge label but property key: " + name); - return (EdgeLabelDefinition)def; - } - - @Override - public PropertyKeyDefinition getPropertyKey(String name) { - RelationTypeDefinition def = getRelationType(name); - if (def!=null && !(def instanceof PropertyKeyDefinition)) - throw new IllegalArgumentException("Not a property key but edge label: " + name); - return (PropertyKeyDefinition)def; - } - -} diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/SchemaElementDefinition.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/SchemaElementDefinition.java deleted file mode 100644 index 81de86b27d..0000000000 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/SchemaElementDefinition.java +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.graphdb.schema; - -import com.google.common.base.Preconditions; -import org.apache.commons.lang3.StringUtils; - -/** - * @author Matthias Broecheler (me@matthiasb.com) - * - * @deprecated part of the management revamp in JG, see https://github.com/JanusGraph/janusgraph/projects/3. - */ -@Deprecated -public class SchemaElementDefinition { - - private final String name; - private final long id; - - - public SchemaElementDefinition(String name, long id) { - Preconditions.checkArgument(StringUtils.isNotBlank(name), "name cannot be blank"); - this.name = name; - this.id = id; - } - - public String getName() { - return name; - } - - public long getLongId() { - return id; - } - - - @Override - public int hashCode() { - return name.hashCode(); - } - - @Override - public boolean equals(Object oth) { - if (this==oth) return true; - else if (oth==null || !getClass().isInstance(oth)) return false; - return name.equals(((SchemaElementDefinition)oth).name); - } - - @Override - public String toString() { - return name; - } - -} diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/SchemaProvider.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/SchemaProvider.java deleted file mode 100644 index 67709b054e..0000000000 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/SchemaProvider.java +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.graphdb.schema; - -/** - * @author Matthias Broecheler (me@matthiasb.com) - * - * @deprecated part of the management revamp in JG, see https://github.com/JanusGraph/janusgraph/projects/3. - */ -@Deprecated -public interface SchemaProvider { - - EdgeLabelDefinition getEdgeLabel(String name); - - PropertyKeyDefinition getPropertyKey(String name); - - RelationTypeDefinition getRelationType(String name); - - VertexLabelDefinition getVertexLabel(String name); - -} diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/VertexLabelDefinition.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/VertexLabelDefinition.java deleted file mode 100644 index b6c4d4f267..0000000000 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/schema/VertexLabelDefinition.java +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.graphdb.schema; - -import org.janusgraph.core.VertexLabel; - -/** - * @author Matthias Broecheler (me@matthiasb.com) - * - * @deprecated part of the management revamp in JG, see https://github.com/JanusGraph/janusgraph/projects/3. - */ -@Deprecated -public class VertexLabelDefinition extends SchemaElementDefinition { - - private final boolean isPartitioned; - private final boolean isStatic; - - public VertexLabelDefinition(String name, long id, boolean isPartitioned, boolean isStatic) { - super(name, id); - this.isPartitioned = isPartitioned; - this.isStatic = isStatic; - } - - public VertexLabelDefinition(VertexLabel vl) { - this(vl.name(),vl.longId(),vl.isPartitioned(),vl.isStatic()); - } - - public boolean isStatic() { - return isStatic; - } - - public boolean isPartitioned() { - return isPartitioned; - } - - public boolean hasDefaultConfiguration() { - return !isPartitioned && !isStatic; - } - -} diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/transaction/StandardJanusGraphTx.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/transaction/StandardJanusGraphTx.java index af46f40e4a..cbfbc2ab8d 100644 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/transaction/StandardJanusGraphTx.java +++ b/janusgraph-core/src/main/java/org/janusgraph/graphdb/transaction/StandardJanusGraphTx.java @@ -23,7 +23,6 @@ import org.apache.tinkerpop.gremlin.structure.Direction; import org.apache.tinkerpop.gremlin.structure.Property; import org.apache.tinkerpop.gremlin.structure.VertexProperty; -import org.cliffc.high_scale_lib.NonBlockingHashMap; import org.janusgraph.core.Cardinality; import org.janusgraph.core.Connection; import org.janusgraph.core.EdgeLabel; @@ -142,6 +141,7 @@ import org.janusgraph.graphdb.vertices.StandardVertex; import org.janusgraph.util.datastructures.Retriever; import org.janusgraph.util.stats.MetricManager; +import org.jctools.maps.NonBlockingHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/transaction/vertexcache/GuavaVertexCache.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/transaction/vertexcache/GuavaVertexCache.java index ab6bea903b..9291942513 100644 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/transaction/vertexcache/GuavaVertexCache.java +++ b/janusgraph-core/src/main/java/org/janusgraph/graphdb/transaction/vertexcache/GuavaVertexCache.java @@ -19,10 +19,10 @@ import com.google.common.cache.CacheBuilder; import com.google.common.cache.RemovalCause; import com.google.common.cache.RemovalListener; -import org.cliffc.high_scale_lib.NonBlockingHashMapLong; import org.janusgraph.graphdb.internal.InternalVertex; import org.janusgraph.graphdb.vertices.AbstractVertex; import org.janusgraph.util.datastructures.Retriever; +import org.jctools.maps.NonBlockingHashMapLong; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/util/AllEdgesIterable.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/util/AllEdgesIterable.java deleted file mode 100644 index 2b63b94626..0000000000 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/util/AllEdgesIterable.java +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.graphdb.util; - -import org.apache.tinkerpop.gremlin.structure.Edge; -import org.apache.tinkerpop.gremlin.structure.Vertex; - -import java.util.Iterator; -import java.util.Set; - -/** - * Constructs {@link Iterable}s over all {@link org.janusgraph.core.JanusGraphEdge}s connecting a provided set of vertices. - *

- * Given a set of vertices, one may be interested in all edges that are contained in the subgraph spanned - * by those vertices. - * - * @author Matthias Bröcheler (me@matthiasb.com); - */ -@Deprecated -public class AllEdgesIterable { - - private AllEdgesIterable() { - } - - /** - * Returns an iterable over all edges incident on the vertices returned by the given Iterable over vertices. - *

- * Note that this method assumes that the given Iterable will return all vertices in the connected component, - * otherwise the behavior of this method is undefined. - * - * @param vertices Iterable over a set of vertices defining a connected component. - * @return Iterable over all edges contained in this component. - */ - public static Iterable of(Iterable vertices) { - return new IterableBased(vertices); - } - - /** - * Returns an iterable over all edges contained in the subgraph spanned by the given vertices. - *

- * This method will return all edges whose end points are contained in the given set of vertices. - * - * @param vertices Set of vertices - * @return All edges contained in the subgraph spanned by the set of vertices. - */ - public static Iterable of(Set vertices) { - return new SetBased(vertices); - } - - - private static class IterableBased implements Iterable { - - private final Iterable vertices; - - public IterableBased(Iterable vertices) { - this.vertices = vertices; - } - - @Override - public Iterator iterator() { - return new AllEdgesIterator(vertices.iterator()); - } - - } - - private static class SetBased implements Iterable { - - private final Set vertices; - - public SetBased(Set vertices) { - this.vertices = vertices; - } - - @Override - public Iterator iterator() { - return new AllEdgesIterator(vertices); - } - - } - -} diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/util/AllEdgesIterator.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/util/AllEdgesIterator.java deleted file mode 100644 index 74b9b9111f..0000000000 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/util/AllEdgesIterator.java +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.graphdb.util; - -import org.apache.tinkerpop.gremlin.structure.Direction; -import org.apache.tinkerpop.gremlin.structure.Edge; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.janusgraph.core.JanusGraphEdge; - -import java.util.Collections; -import java.util.Iterator; -import java.util.NoSuchElementException; -import java.util.Set; - -/** - * Defines an {@link java.util.Iterator} over all {@link org.janusgraph.core.JanusGraphEdge}s connecting a provided set of vertices. - *

- * Given a set of vertices, one may be interested in all edges that are contained in the subgraph spanned - * by those vertices. This iterator will return these edges. - * - * @author Matthias Bröcheler (me@matthiasb.com); - */ -@Deprecated -public class AllEdgesIterator implements Iterator { - - private final Set vertices; - private final Iterator vertexIterator; - - private Iterator currentEdges = Collections.emptyIterator(); - - private Edge next; - - /** - * Returns an iterator over all edges incident on the vertices returned by the given Iterable over vertices. - *

- * Note that this method assumes that the given Iterable will return all vertices in the connected component, - * otherwise the behavior of this method is undefined. - * - * @param vertexIterator Iterator over a set of vertices defining a connected component. - */ - public AllEdgesIterator(Iterator vertexIterator) { - this.vertexIterator = vertexIterator; - this.vertices = null; - next = findNext(); - } - - /** - * Returns an iterator over all edges contained in the subgraph spanned by the given vertices. - *

- * This method will return all edges whose end points are contained in the given set of vertices. - * - * @param vertices Set of vertices - */ - public AllEdgesIterator(Set vertices) { - this.vertexIterator = vertices.iterator(); - this.vertices = vertices; - next = findNext(); - } - - private Edge findNext() { - JanusGraphEdge rel = null; - while (rel == null) { - if (currentEdges.hasNext()) { - rel = (JanusGraphEdge)currentEdges.next(); - if (vertices != null && !vertices.contains(rel.vertex(Direction.IN))) - rel = null; - } else { - if (vertexIterator.hasNext()) { - Vertex nextVertex = vertexIterator.next(); - currentEdges = nextVertex.edges(Direction.OUT); - } else break; - } - } - return rel; - } - - @Override - public boolean hasNext() { - return next != null; - } - - @Override - public Edge next() { - if (next == null) throw new NoSuchElementException(); - Edge current = next; - next = findNext(); - return current; - } - - /** - * Removing edges is not supported! - * - * @throws UnsupportedOperationException if invoked - */ - @Override - public void remove() { - throw new UnsupportedOperationException("Removals are not supported"); - } - -} diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/util/ConcurrentLRUCache.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/util/ConcurrentLRUCache.java deleted file mode 100644 index 04c76bcda1..0000000000 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/util/ConcurrentLRUCache.java +++ /dev/null @@ -1,662 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY LongIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.janusgraph.graphdb.util; - -import org.cliffc.high_scale_lib.NonBlockingHashMapLong; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.lang.ref.WeakReference; -import java.util.Arrays; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.TreeSet; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.ReentrantLock; - -/** - * A LRU cache implementation based upon ConcurrentHashMap and other techniques to reduce - * contention and synchronization overhead to utilize multiple CPU cores more effectively. - *

- * Note that the implementation does not follow a true LRU (least-recently-used) eviction - * strategy. Instead it strives to remove least recently used items but when the initial - * cleanup does not remove enough items to reach the 'acceptableWaterMark' limit, it can - * remove more items forcefully regardless of access order. - *

- * ADDED COMMENT: - * This class has been copied from the Apache Solr project (see license above). - * New method has been added "putIfAbsent" which has the same behaviour as normal CHM.putIfAbsent - * but cache maintenance operations are only done in context of a winner thread, in other words, - * whoever puts absent item to the map would run cache maintenance ops, everybody else would be just - * newly added item returned. - * - * @since solr 1.4 - */ -@Deprecated -public class ConcurrentLRUCache { - private static final Logger log = LoggerFactory.getLogger(ConcurrentLRUCache.class); - - private final NonBlockingHashMapLong> map; - private final int upperWaterMark, lowerWaterMark; - private final ReentrantLock markAndSweepLock = new ReentrantLock(true); - private boolean isCleaning = false; // not volatile... piggybacked on other volatile vars - private final boolean newThreadForCleanup; - private volatile boolean isAlive = true; - private final Stats stats = new Stats(); - private final int acceptableWaterMark; - private long oldestEntry = 0; // not volatile, only accessed in the cleaning method - private final EvictionListener evictionListener; - private CleanupThread cleanupThread; - - public ConcurrentLRUCache(int upperWaterMark, final int lowerWaterMark, int acceptableWatermark, - int initialSize, boolean runCleanupThread, boolean runNewThreadForCleanup, - EvictionListener evictionListener) { - if (upperWaterMark < 1) throw new IllegalArgumentException("upperWaterMark must be > 0"); - if (lowerWaterMark >= upperWaterMark) - throw new IllegalArgumentException("lowerWaterMark must be < upperWaterMark"); - map = new NonBlockingHashMapLong<>(initialSize); - newThreadForCleanup = runNewThreadForCleanup; - this.upperWaterMark = upperWaterMark; - this.lowerWaterMark = lowerWaterMark; - this.acceptableWaterMark = acceptableWatermark; - this.evictionListener = evictionListener; - if (runCleanupThread) { - cleanupThread = new CleanupThread(this); - cleanupThread.start(); - } - } - - public ConcurrentLRUCache(int size, int lowerWatermark) { - this(size, lowerWatermark, (int) Math.floor((lowerWatermark + size) / 2), - (int) Math.ceil(0.75 * size), false, false, null); - } - - public void setAlive(boolean live) { - isAlive = live; - } - - public V get(Long key) { - CacheEntry e = map.get(key); - if (e == null) { - if (isAlive) stats.missCounter.incrementAndGet(); - return null; - } - if (isAlive) e.lastAccessed = stats.accessCounter.incrementAndGet(); - return e.value; - } - - public boolean containsKey(Long key) { - return map.containsKey(key); - } - - public V remove(Long key) { - CacheEntry cacheEntry = map.remove(key); - if (cacheEntry != null) { - stats.size.decrementAndGet(); - return cacheEntry.value; - } - return null; - } - - public V putIfAbsent(Long key, V val) { - if (val == null) - return null; - - final CacheEntry e = new CacheEntry<>(key, val, stats.accessCounter.incrementAndGet()); - final CacheEntry oldCacheEntry = map.putIfAbsent(key, e); - - if (oldCacheEntry == null) // only do maintenance if we have put a new item to the map - doCacheMaintenanceOnPut(oldCacheEntry); - - return oldCacheEntry == null ? null : oldCacheEntry.value; - } - - public V put(Long key, V val) { - if (val == null) - return null; - - final CacheEntry e = new CacheEntry<>(key, val, stats.accessCounter.incrementAndGet()); - final CacheEntry oldCacheEntry = map.put(key, e); - - doCacheMaintenanceOnPut(oldCacheEntry); - return oldCacheEntry == null ? null : oldCacheEntry.value; - } - - private void doCacheMaintenanceOnPut(CacheEntry oldCacheEntry) { - int currentSize; - if (oldCacheEntry == null) { - currentSize = stats.size.incrementAndGet(); - } else { - currentSize = stats.size.get(); - } - if (isAlive) { - stats.putCounter.incrementAndGet(); - } else { - stats.nonLivePutCounter.incrementAndGet(); - } - - // Check if we need to clear out old entries from the cache. - // isCleaning variable is checked instead of markAndSweepLock.isLocked() - // for performance because every put invocation will check until - // the size is back to an acceptable level. - // - // There is a race between the check and the call to markAndSweep, but - // it's unimportant because markAndSweep actually acquires the lock or returns if it can't. - // - // Thread safety note: isCleaning read is piggybacked (comes after) other volatile reads - // in this method. - if (currentSize > upperWaterMark && !isCleaning) { - if (newThreadForCleanup) { - new Thread(this::markAndSweep).start(); - } else if (cleanupThread != null) { - cleanupThread.wakeThread(); - } else { - markAndSweep(); - } - } - } - - /** - * Removes items from the cache to bring the size down - * to an acceptable value ('acceptableWaterMark'). - *

- * It is done in two stages. In the first stage, least recently used items are evicted. - * If, after the first stage, the cache size is still greater than 'acceptableSize' - * config parameter, the second stage takes over. - *

- * The second stage is more intensive and tries to bring down the cache size - * to the 'lowerWaterMark' config parameter. - */ - private void markAndSweep() { - // if we want to keep at least 1000 entries, then timestamps of - // current through current-1000 are guaranteed not to be the oldest (but that does - // not mean there are 1000 entries in that group... it's actually anywhere between - // 1 and 1000). - // Also, if we want to remove 500 entries, then - // oldestEntry through oldestEntry+500 are guaranteed to be - // removed (however many there are there). - - if (!markAndSweepLock.tryLock()) return; - try { - long oldestEntry = this.oldestEntry; - isCleaning = true; - this.oldestEntry = oldestEntry; // volatile write to make isCleaning visible - - long timeCurrent = stats.accessCounter.get(); - int sz = stats.size.get(); - - int numRemoved = 0; - int numLongept = 0; - long newestEntry = timeCurrent; - long newNewestEntry = -1; - long newOldestEntry = Long.MAX_VALUE; - - int wantToLongeep = lowerWaterMark; - int wantToRemove = sz - lowerWaterMark; - - @SuppressWarnings("unchecked") // generic array's are annoying - CacheEntry[] entrySet = new CacheEntry[sz]; - int eSize = 0; - - // System.out.println("newestEntry="+newestEntry + " oldestEntry="+oldestEntry); - // System.out.println("items removed:" + numRemoved + " numLongept=" + numLongept + " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved)); - - for (CacheEntry ce : map.values()) { - // set lastAccessedCopy to avoid more volatile reads - ce.lastAccessedCopy = ce.lastAccessed; - long thisEntry = ce.lastAccessedCopy; - - // since the wantToLongeep group is likely to be bigger than wantToRemove, check it first - if (thisEntry > newestEntry - wantToLongeep) { - // this entry is guaranteed not to be in the bottom - // group, so do nothing. - numLongept++; - newOldestEntry = Math.min(thisEntry, newOldestEntry); - } else if (thisEntry < oldestEntry + wantToRemove) { // entry in bottom group? - // this entry is guaranteed to be in the bottom group - // so immediately remove it from the map. - evictEntry(ce.key); - numRemoved++; - } else { - // This entry *could* be in the bottom group. - // Collect these entries to avoid another full pass... this is wasted - // effort if enough entries are normally removed in this first pass. - // An alternate impl could make a full second pass. - if (eSize < entrySet.length - 1) { - entrySet[eSize++] = ce; - newNewestEntry = Math.max(thisEntry, newNewestEntry); - newOldestEntry = Math.min(thisEntry, newOldestEntry); - } - } - } - - // System.out.println("items removed:" + numRemoved + " numLongept=" + numLongept + " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved)); - // TODO: allow this to be customized in the constructor? - int numPasses = 1; // maximum number of linear passes over the data - - // if we didn't remove enough entries, then make more passes - // over the values we collected, with updated min and max values. - while (sz - numRemoved > acceptableWaterMark && --numPasses >= 0) { - - oldestEntry = newOldestEntry == Long.MAX_VALUE ? oldestEntry : newOldestEntry; - newOldestEntry = Long.MAX_VALUE; - newestEntry = newNewestEntry; - newNewestEntry = -1; - wantToLongeep = lowerWaterMark - numLongept; - wantToRemove = sz - lowerWaterMark - numRemoved; - - // iterate backward to make it easy to remove items. - for (int i = eSize - 1; i >= 0; i--) { - CacheEntry ce = entrySet[i]; - long thisEntry = ce.lastAccessedCopy; - - if (thisEntry > newestEntry - wantToLongeep) { - // this entry is guaranteed not to be in the bottom - // group, so do nothing but remove it from the eset. - numLongept++; - // remove the entry by moving the last element to it's position - entrySet[i] = entrySet[eSize - 1]; - eSize--; - - newOldestEntry = Math.min(thisEntry, newOldestEntry); - - } else if (thisEntry < oldestEntry + wantToRemove) { // entry in bottom group? - - // this entry is guaranteed to be in the bottom group - // so immediately remove it from the map. - evictEntry(ce.key); - numRemoved++; - - // remove the entry by moving the last element to it's position - entrySet[i] = entrySet[eSize - 1]; - eSize--; - } else { - // This entry *could* be in the bottom group, so keep it in the eset, - // and update the stats. - newNewestEntry = Math.max(thisEntry, newNewestEntry); - newOldestEntry = Math.min(thisEntry, newOldestEntry); - } - } - // System.out.println("items removed:" + numRemoved + " numLongept=" + numLongept + " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved)); - } - - - // if we still didn't remove enough entries, then make another pass while - // inserting into a priority queue - if (sz - numRemoved > acceptableWaterMark) { - - oldestEntry = newOldestEntry == Long.MAX_VALUE ? oldestEntry : newOldestEntry; - newOldestEntry = Long.MAX_VALUE; - newestEntry = newNewestEntry; - wantToLongeep = lowerWaterMark - numLongept; - wantToRemove = sz - lowerWaterMark - numRemoved; - - final PQueue queue = new PQueue<>(wantToRemove); - - for (int i = eSize - 1; i >= 0; i--) { - CacheEntry ce = entrySet[i]; - long thisEntry = ce.lastAccessedCopy; - - if (thisEntry > newestEntry - wantToLongeep) { - // this entry is guaranteed not to be in the bottom - // group, so do nothing but remove it from the eset. - numLongept++; - // removal not necessary on last pass. - // eset[i] = eset[eSize-1]; - // eSize--; - - newOldestEntry = Math.min(thisEntry, newOldestEntry); - - } else if (thisEntry < oldestEntry + wantToRemove) { // entry in bottom group? - // this entry is guaranteed to be in the bottom group - // so immediately remove it. - evictEntry(ce.key); - numRemoved++; - - // removal not necessary on last pass. - // eset[i] = eset[eSize-1]; - // eSize--; - } else { - // This entry *could* be in the bottom group. - // add it to the priority queue - - // everything in the priority queue will be removed, so keep track of - // the lowest value that ever comes back out of the queue. - - // first reduce the size of the priority queue to account for - // the number of items we have already removed while executing - // this loop so far. - queue.myMaxSize = sz - lowerWaterMark - numRemoved; - while (queue.size() > queue.myMaxSize && queue.size() > 0) { - CacheEntry otherEntry = queue.pop(); - newOldestEntry = Math.min(otherEntry.lastAccessedCopy, newOldestEntry); - } - if (queue.myMaxSize <= 0) break; - - Object o = queue.myInsertWithOverflow(ce); - if (o != null) { - newOldestEntry = Math.min(((CacheEntry) o).lastAccessedCopy, newOldestEntry); - } - } - } - - // Now delete everything in the priority queue. - // avoid using pop() since order doesn't matter anymore - for (CacheEntry ce : queue.getValues()) { - if (ce == null) continue; - evictEntry(ce.key); - numRemoved++; - } - - // System.out.println("items removed:" + numRemoved + " numLongept=" + numLongept + " initialQueueSize="+ wantToRemove + " finalQueueSize=" + queue.size() + " sz-numRemoved=" + (sz-numRemoved)); - } - - oldestEntry = newOldestEntry == Long.MAX_VALUE ? oldestEntry : newOldestEntry; - this.oldestEntry = oldestEntry; - } finally { - isCleaning = false; // set before markAndSweep.unlock() for visibility - markAndSweepLock.unlock(); - } - } - - private static class PQueue extends PriorityQueue> { - int myMaxSize; - final Object[] heap; - - PQueue(int maxSz) { - super(maxSz); - heap = getHeapArray(); - myMaxSize = maxSz; - } - - @SuppressWarnings("unchecked") - Iterable> getValues() { - return (Iterable) Collections.unmodifiableCollection(Arrays.asList(heap)); - } - - @Override - protected boolean lessThan(CacheEntry a, CacheEntry b) { - // reverse the parameter order so that the queue keeps the oldest items - return b.lastAccessedCopy < a.lastAccessedCopy; - } - - // necessary because maxSize is private in base class - @SuppressWarnings("unchecked") - public CacheEntry myInsertWithOverflow(CacheEntry element) { - if (size() < myMaxSize) { - add(element); - return null; - } else if (size() > 0 && !lessThan(element, (CacheEntry) heap[1])) { - CacheEntry ret = (CacheEntry) heap[1]; - heap[1] = element; - updateTop(); - return ret; - } else { - return element; - } - } - } - - - private void evictEntry(Long key) { - CacheEntry o = map.remove(key); - if (o == null) return; - stats.size.decrementAndGet(); - stats.evictionCounter.incrementAndGet(); - if (evictionListener != null) evictionListener.evictedEntry(o.key, o.value); - } - - /** - * Returns 'n' number of oldest accessed entries present in this cache. - *

- * This uses a TreeSet to collect the 'n' oldest items ordered by ascending last access time - * and returns a LinkedHashMap containing 'n' or less than 'n' entries. - * - * @param n the number of oldest items needed - * @return a LinkedHashMap containing 'n' or less than 'n' entries - */ - public Map getOldestAccessedItems(int n) { - final Map result = new LinkedHashMap<>(); - if (n <= 0) - return result; - final TreeSet> tree = new TreeSet<>(); - markAndSweepLock.lock(); - try { - for (Map.Entry> entry : map.entrySet()) { - CacheEntry ce = entry.getValue(); - ce.lastAccessedCopy = ce.lastAccessed; - if (tree.size() < n) { - tree.add(ce); - } else { - if (ce.lastAccessedCopy < tree.first().lastAccessedCopy) { - tree.remove(tree.first()); - tree.add(ce); - } - } - } - } finally { - markAndSweepLock.unlock(); - } - for (CacheEntry e : tree) { - result.put(e.key, e.value); - } - return result; - } - - public Map getLatestAccessedItems(int n) { - final Map result = new LinkedHashMap<>(); - if (n <= 0) - return result; - final TreeSet> tree = new TreeSet<>(); - // we need to grab the lock since we are changing lastAccessedCopy - markAndSweepLock.lock(); - try { - for (Map.Entry> entry : map.entrySet()) { - final CacheEntry ce = entry.getValue(); - ce.lastAccessedCopy = ce.lastAccessed; - if (tree.size() < n) { - tree.add(ce); - } else { - if (ce.lastAccessedCopy > tree.last().lastAccessedCopy) { - tree.remove(tree.last()); - tree.add(ce); - } - } - } - } finally { - markAndSweepLock.unlock(); - } - for (CacheEntry e : tree) { - result.put(e.key, e.value); - } - return result; - } - - public int size() { - return stats.size.get(); - } - - public void clear() { - map.clear(); - } - - public Map> getMap() { - return map; - } - - private static class CacheEntry implements Comparable> { - final Long key; - final V value; - volatile long lastAccessed; - long lastAccessedCopy = 0; - - - public CacheEntry(Long key, V value, long lastAccessed) { - this.key = key; - this.value = value; - this.lastAccessed = lastAccessed; - } - - public void setLastAccessed(long lastAccessed) { - this.lastAccessed = lastAccessed; - } - - @Override - public int compareTo(CacheEntry that) { - if (this.lastAccessedCopy == that.lastAccessedCopy) return 0; - return this.lastAccessedCopy < that.lastAccessedCopy ? 1 : -1; - } - - @Override - public int hashCode() { - return value.hashCode(); - } - - @Override - public boolean equals(Object obj) { - return value.equals(obj); - } - - @Override - public String toString() { - return "key: " + key + " value: " + value + " lastAccessed:" + lastAccessed; - } - } - - private boolean isDestroyed = false; - - public void destroy() { - try { - if (cleanupThread != null) { - cleanupThread.stopThread(); - } - } finally { - isDestroyed = true; - } - } - - public Stats getStats() { - return stats; - } - - public static class Stats { - private final AtomicLong accessCounter = new AtomicLong(0), - putCounter = new AtomicLong(0), - nonLivePutCounter = new AtomicLong(0), - missCounter = new AtomicLong(); - private final AtomicInteger size = new AtomicInteger(); - private final AtomicLong evictionCounter = new AtomicLong(); - - public long getCumulativeLookups() { - return (accessCounter.get() - putCounter.get() - nonLivePutCounter.get()) + missCounter.get(); - } - - public long getCumulativeHits() { - return accessCounter.get() - putCounter.get() - nonLivePutCounter.get(); - } - - public long getCumulativePuts() { - return putCounter.get(); - } - - public long getCumulativeEvictions() { - return evictionCounter.get(); - } - - public int getCurrentSize() { - return size.get(); - } - - public long getCumulativeNonLivePuts() { - return nonLivePutCounter.get(); - } - - public long getCumulativeMisses() { - return missCounter.get(); - } - - public void add(Stats other) { - accessCounter.addAndGet(other.accessCounter.get()); - putCounter.addAndGet(other.putCounter.get()); - nonLivePutCounter.addAndGet(other.nonLivePutCounter.get()); - missCounter.addAndGet(other.missCounter.get()); - evictionCounter.addAndGet(other.evictionCounter.get()); - size.set(Math.max(size.get(), other.size.get())); - } - } - - public interface EvictionListener { - void evictedEntry(Long key, V value); - } - - private static class CleanupThread extends Thread { - private final WeakReference cache; - - private boolean stop = false; - - public CleanupThread(ConcurrentLRUCache c) { - cache = new WeakReference<>(c); - this.setDaemon(true); - this.setName("ConcurrentLRUCleaner-" + getId()); - } - - @Override - public void run() { - while (true) { - synchronized (this) { - if (stop) break; - try { - this.wait(); - } catch (InterruptedException ignored) { - } - } - if (stop) break; - ConcurrentLRUCache c = cache.get(); - if (c == null) break; - c.markAndSweep(); - } - } - - void wakeThread() { - synchronized (this) { - this.notify(); - } - } - - void stopThread() { - synchronized (this) { - stop = true; - this.notify(); - } - } - } - - @Override - protected void finalize() throws Throwable { - try { - if (!isDestroyed) { - log.error("ConcurrentLRUCache was not destroyed prior to finalize(), indicates a bug -- POSSIBLE RESOURCE LEAK!!!"); - destroy(); - } - } finally { - super.finalize(); - } - } -} diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/util/PriorityQueue.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/util/PriorityQueue.java deleted file mode 100644 index 896a4b7f37..0000000000 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/util/PriorityQueue.java +++ /dev/null @@ -1,265 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.janusgraph.graphdb.util; - - -/** A PriorityQueue maintains a partial ordering of its elements such that the - * least element can always be found in constant time. Put()'s and pop()'s - * require log(size) time. - * - *

NOTE: This class will pre-allocate a full array of - * length maxSize+1 if instantiated via the - * {@link #PriorityQueue(int,boolean)} constructor with - * prepopulate set to true. - * -*/ -@Deprecated -public abstract class PriorityQueue { - private int size; - private final int maxSize; - private final T[] heap; - - public PriorityQueue(int maxSize) { - this(maxSize, true); - } - - @SuppressWarnings("unchecked") - public PriorityQueue(int maxSize, boolean prepopulate) { - size = 0; - int heapSize; - if (0 == maxSize) - // We allocate 1 extra to avoid if statement in top() - heapSize = 2; - else { - if (maxSize == Integer.MAX_VALUE) { - // Don't wrap heapSize to -1, in this case, which - // causes a confusing NegativeArraySizeException. - // Note that very likely this will simply then hit - // an OutOfMemoryError, but at least that's more indicative to - // caller that this values is too big. We don't +1 - // in this case, but it's very unlikely in practice - // one will actually insert this many objects into - // the PQ: - heapSize = Integer.MAX_VALUE; - } else { - // NOTE: we add +1 because all access to heap is - // 1-based not 0-based. heap[0] is unused. - heapSize = maxSize + 1; - } - } - heap = (T[]) new Object[heapSize]; // T is unbounded type, so this unchecked cast works always - this.maxSize = maxSize; - - if (prepopulate) { - // If sentinel objects are supported, populate the queue with them - T sentinel = getSentinelObject(); - if (sentinel != null) { - heap[1] = sentinel; - for (int i = 2; i < heap.length; i++) { - heap[i] = getSentinelObject(); - } - size = maxSize; - } - } - } - - /** Determines the ordering of objects in this priority queue. Subclasses - * must define this one method. - * @return true iff parameter a is less than parameter b. - */ - protected abstract boolean lessThan(T a, T b); - - /** - * This method can be overridden by extending classes to return a sentinel - * object which will be used by the {@link PriorityQueue#PriorityQueue(int,boolean)} - * constructor to fill the queue, so that the code which uses that queue can always - * assume it's full and only change the top without attempting to insert any new - * object.
- * - * Those sentinel values should always compare worse than any non-sentinel - * value (i.e., {@link #lessThan} should always favor the - * non-sentinel values).
- * - * By default, this method returns false, which means the queue will not be - * filled with sentinel values. Otherwise, the value returned will be used to - * pre-populate the queue. Adds sentinel values to the queue.
- * - * If this method is extended to return a non-null value, then the following - * usage pattern is recommended: - * - *

-   * // extends getSentinelObject() to return a non-null value.
-   * PriorityQueue<MyObject> pq = new MyQueue<MyObject>(numHits);
-   * // save the 'top' element, which is guaranteed to not be null.
-   * MyObject pqTop = pq.top();
-   * <...>
-   * // now in order to add a new element, which is 'better' than top (after 
-   * // you've verified it is better), it is as simple as:
-   * pqTop.change().
-   * pqTop = pq.updateTop();
-   * 
- * - * NOTE: if this method returns a non-null value, it will be called by - * the {@link PriorityQueue#PriorityQueue(int,boolean)} constructor - * {@link #size()} times, relying on a new object to be returned and will not - * check if it's null again. Therefore you should ensure any call to this - * method creates a new instance and behaves consistently, e.g., it cannot - * return null if it previously returned non-null. - * - * @return the sentinel object to use to pre-populate the queue, or null if - * sentinel objects are not supported. - */ - protected T getSentinelObject() { - return null; - } - - /** - * Adds an Object to a PriorityQueue in log(size) time. If one tries to add - * more objects than maxSize from initialize an - * {@link ArrayIndexOutOfBoundsException} is thrown. - * - * @return the new 'top' element in the queue. - */ - public final T add(T element) { - size++; - heap[size] = element; - upHeap(); - return heap[1]; - } - - /** - * Adds an Object to a PriorityQueue in log(size) time. - * It returns the object (if any) that was - * dropped off the heap because it was full. This can be - * the given parameter (in case it is smaller than the - * full heap's minimum, and couldn't be added), or another - * object that was previously the smallest value in the - * heap and now has been replaced by a larger one, or null - * if the queue wasn't yet full with maxSize elements. - */ - public T insertWithOverflow(T element) { - if (size < maxSize) { - add(element); - return null; - } else if (size > 0 && !lessThan(element, heap[1])) { - T ret = heap[1]; - heap[1] = element; - updateTop(); - return ret; - } else { - return element; - } - } - - /** Returns the least element of the PriorityQueue in constant time. */ - public final T top() { - // We don't need to check size here: if maxSize is 0, - // then heap is length 2 array with both entries null. - // If size is 0 then heap[1] is already null. - return heap[1]; - } - - /** Removes and returns the least element of the PriorityQueue in log(size) - time. */ - public final T pop() { - if (size > 0) { - T result = heap[1]; // save first value - heap[1] = heap[size]; // move last to first - heap[size] = null; // permit GC of objects - size--; - downHeap(); // adjust heap - return result; - } else - return null; - } - - /** - * Should be called when the Object at top changes values. Still log(n) worst - * case, but it's at least twice as fast to - * - *
-   * pq.top().change();
-   * pq.updateTop();
-   * 
- * - * instead of - * - *
-   * o = pq.pop();
-   * o.change();
-   * pq.push(o);
-   * 
- * - * @return the new 'top' element. - */ - public final T updateTop() { - downHeap(); - return heap[1]; - } - - /** Returns the number of elements currently stored in the PriorityQueue. */ - public final int size() { - return size; - } - - /** Removes all entries from the PriorityQueue. */ - public final void clear() { - for (int i = 0; i <= size; i++) { - heap[i] = null; - } - size = 0; - } - - private void upHeap() { - int i = size; - T node = heap[i]; // save bottom node - int j = i >>> 1; - while (j > 0 && lessThan(node, heap[j])) { - heap[i] = heap[j]; // shift parents down - i = j; - j = j >>> 1; - } - heap[i] = node; // install saved node - } - - private void downHeap() { - int i = 1; - T node = heap[i]; // save top node - int j = i << 1; // find smaller child - int k = j + 1; - if (k <= size && lessThan(heap[k], heap[j])) { - j = k; - } - while (j <= size && lessThan(heap[j], node)) { - heap[i] = heap[j]; // shift up child - i = j; - j = i << 1; - k = j + 1; - if (k <= size && lessThan(heap[k], heap[j])) { - j = k; - } - } - heap[i] = node; // install saved node - } - - /** This method returns the internal heap array as Object[]. - */ - protected final Object[] getHeapArray() { - return heap; - } -} diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/vertices/RemovableRelationIterable.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/vertices/RemovableRelationIterable.java deleted file mode 100644 index 77ba8b77d9..0000000000 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/vertices/RemovableRelationIterable.java +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.graphdb.vertices; - -import org.janusgraph.core.JanusGraphRelation; -import org.janusgraph.graphdb.internal.InternalRelation; - -import java.util.Iterator; - -@Deprecated -public class RemovableRelationIterable - implements Iterable { - - private final Iterable iterable; - - public RemovableRelationIterable(Iterable iterable) { - this.iterable = iterable; - } - - @Override - public Iterator iterator() { - return new RemovableRelationIterator<>(iterable.iterator()); - } - -} diff --git a/janusgraph-core/src/main/java/org/janusgraph/graphdb/vertices/RemovableRelationIterator.java b/janusgraph-core/src/main/java/org/janusgraph/graphdb/vertices/RemovableRelationIterator.java deleted file mode 100644 index 900c797628..0000000000 --- a/janusgraph-core/src/main/java/org/janusgraph/graphdb/vertices/RemovableRelationIterator.java +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.graphdb.vertices; - -import org.janusgraph.core.JanusGraphRelation; -import org.janusgraph.graphdb.internal.InternalRelation; - -import java.util.Iterator; - -@Deprecated -public class RemovableRelationIterator - implements Iterator { - - - private final Iterator iterator; - private InternalRelation current; - - public RemovableRelationIterator(Iterator iterator) { - this.iterator = iterator; - current = null; - } - - @Override - public boolean hasNext() { - return iterator.hasNext(); - - } - - @SuppressWarnings("unchecked") - @Override - public O next() { - current = iterator.next(); - return (O) current; - } - - @Override - public void remove() { - assert current != null; - //iterator.remove(); - current.remove(); - } - - -} diff --git a/janusgraph-core/src/main/resources/log4j.properties b/janusgraph-core/src/main/resources/log4j.properties deleted file mode 100644 index 949d9cdf49..0000000000 --- a/janusgraph-core/src/main/resources/log4j.properties +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A1 is set to be a ConsoleAppender. -log4j.appender.A1=org.apache.log4j.ConsoleAppender - -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n - -# Set root logger level to the designated level and its only appender to A1. -log4j.rootLogger=INFO, A1 - -log4j.logger.org.apache.cassandra=INFO -log4j.logger.org.apache.hadoop=INFO -log4j.logger.org.apache.zookeeper=INFO diff --git a/janusgraph-cql/pom.xml b/janusgraph-cql/pom.xml index 5504a62ef2..cb0510d05a 100644 --- a/janusgraph-cql/pom.xml +++ b/janusgraph-cql/pom.xml @@ -4,7 +4,7 @@ org.janusgraph janusgraph - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT janusgraph-cql @@ -36,13 +36,15 @@ - org.slf4j - slf4j-log4j12 + org.apache.logging.log4j + log4j-slf4j-impl + runtime true ch.qos.logback logback-classic + true @@ -118,6 +120,18 @@ com.ning compress-lzf + + ch.qos.logback + logback-classic + + + ch.qos.logback + logback-core + + + org.jctools + jctools-core + @@ -130,6 +144,10 @@ org.json json + + com.esri.geometry + esri-geometry-api + org.apache.tinkerpop gremlin-core @@ -246,6 +264,12 @@ + + + ${basedir}/src/main/resources + true + + maven-dependency-plugin @@ -271,7 +295,6 @@ false 1 - file:${project.build.testOutputDirectory}/log4j.properties ${cassandra.docker.image} ${cassandra.docker.version} ${cassandra.docker.partitioner} diff --git a/janusgraph-cql/src/main/java/org/janusgraph/diskstorage/cql/CQLKeyColumnValueStore.java b/janusgraph-cql/src/main/java/org/janusgraph/diskstorage/cql/CQLKeyColumnValueStore.java index d007136373..c71ee2dbdf 100644 --- a/janusgraph-cql/src/main/java/org/janusgraph/diskstorage/cql/CQLKeyColumnValueStore.java +++ b/janusgraph-cql/src/main/java/org/janusgraph/diskstorage/cql/CQLKeyColumnValueStore.java @@ -462,7 +462,7 @@ public KeyIterator getKeys(final KeyRangeQuery query, final StoreTransaction txh @Override public KeyIterator getKeys(final SliceQuery query, final StoreTransaction txh) throws BackendException { if (!this.storeManager.getFeatures().hasUnorderedScan()) { - throw new PermanentBackendException("This operation is only allowed when a random partitioner (md5 or murmur3) is used."); + throw new PermanentBackendException("This operation is only allowed when partitioner supports unordered scan"); } return Try.of(() -> new CQLResultSetKeyIterator( diff --git a/janusgraph-cql/src/main/java/org/janusgraph/diskstorage/cql/builder/CQLStoreFeaturesBuilder.java b/janusgraph-cql/src/main/java/org/janusgraph/diskstorage/cql/builder/CQLStoreFeaturesBuilder.java index 02091b812c..70a511293f 100644 --- a/janusgraph-cql/src/main/java/org/janusgraph/diskstorage/cql/builder/CQLStoreFeaturesBuilder.java +++ b/janusgraph-cql/src/main/java/org/janusgraph/diskstorage/cql/builder/CQLStoreFeaturesBuilder.java @@ -88,7 +88,7 @@ public CQLStoreFeaturesWrapper build(final CqlSession session, final Configurati break; } case "ByteOrderedPartitioner": { - fb.keyOrdered(true).orderedScan(true).unorderedScan(false); + fb.keyOrdered(true).orderedScan(true).unorderedScan(true); deployment = (hostnames.length == 1)// mark deployment as local only in case we have byte ordered partitioner and local // connection ? (NetworkUtil.isLocalConnection(hostnames[0])) ? DistributedStoreManager.Deployment.LOCAL : DistributedStoreManager.Deployment.REMOTE diff --git a/janusgraph-cql/src/test/java/org/janusgraph/diskstorage/cql/CQLStoreTest.java b/janusgraph-cql/src/test/java/org/janusgraph/diskstorage/cql/CQLStoreTest.java index b47c33f2ee..499d6ddc6e 100644 --- a/janusgraph-cql/src/test/java/org/janusgraph/diskstorage/cql/CQLStoreTest.java +++ b/janusgraph-cql/src/test/java/org/janusgraph/diskstorage/cql/CQLStoreTest.java @@ -22,9 +22,14 @@ import org.janusgraph.JanusGraphCassandraContainer; import org.janusgraph.diskstorage.BackendException; import org.janusgraph.diskstorage.KeyColumnValueStoreTest; +import org.janusgraph.diskstorage.PermanentBackendException; import org.janusgraph.diskstorage.configuration.Configuration; import org.janusgraph.diskstorage.configuration.ModifiableConfiguration; +import org.janusgraph.diskstorage.keycolumnvalue.KeyRangeQuery; +import org.janusgraph.diskstorage.keycolumnvalue.SliceQuery; +import org.janusgraph.diskstorage.keycolumnvalue.StandardStoreFeatures; import org.janusgraph.diskstorage.keycolumnvalue.StoreFeatures; +import org.janusgraph.diskstorage.util.BufferUtil; import org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration; import org.janusgraph.testutil.FeatureFlag; import org.janusgraph.testutil.JanusGraphFeature; @@ -42,6 +47,8 @@ import org.testcontainers.junit.jupiter.Container; import org.testcontainers.junit.jupiter.Testcontainers; +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -59,8 +66,10 @@ import static org.janusgraph.diskstorage.cql.CQLConfigOptions.SPECULATIVE_RETRY; import static org.janusgraph.diskstorage.cql.CQLConfigOptions.USE_EXTERNAL_LOCKING; import static org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration.BASIC_METRICS; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.params.provider.Arguments.arguments; import static org.mockito.Mockito.any; @@ -109,7 +118,6 @@ public CQLStoreManager openStorageManager() throws BackendException { @FeatureFlag(feature = JanusGraphFeature.UnorderedScan) public void testUnorderedConfiguration(TestInfo testInfo) { final StoreFeatures features = this.manager.getFeatures(); - assertFalse(features.isKeyOrdered()); assertFalse(features.hasLocalKeyPartition()); } @@ -303,6 +311,44 @@ public void testNewTableOpenDatabase() throws BackendException { verify(session, times(1)).execute(any(Statement.class)); } + @Test + @FeatureFlag(feature = JanusGraphFeature.UnorderedScan) + public void testGetKeysWithoutOrderedScan() throws BackendException, NoSuchFieldException, IllegalAccessException { + // support unordered scan but not ordered scan + Field field = StandardStoreFeatures.class.getDeclaredField("orderedScan"); + field.setAccessible(true); + Field modifiersField = Field.class + .getDeclaredField("modifiers"); + modifiersField.setAccessible(true); + modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); + field.set(manager.getFeatures(), false); + Exception ex = assertThrows(PermanentBackendException.class, () -> store.getKeys( + new KeyRangeQuery(BufferUtil.getLongBuffer(1), BufferUtil.getLongBuffer(1000), BufferUtil.getLongBuffer(1), + BufferUtil.getLongBuffer(1000)), tx)); + assertEquals("This operation is only allowed when the byteorderedpartitioner is used.", ex.getMessage()); + assertDoesNotThrow(() -> store.getKeys( + new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(4)), tx)); + } + + @Test + @FeatureFlag(feature = JanusGraphFeature.OrderedScan) + public void testGetKeysWithoutUnorderedScan() throws BackendException, NoSuchFieldException, IllegalAccessException { + // support ordered scan but not unordered scan + Field field = StandardStoreFeatures.class.getDeclaredField("unorderedScan"); + field.setAccessible(true); + Field modifiersField = Field.class + .getDeclaredField("modifiers"); + modifiersField.setAccessible(true); + modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); + field.set(manager.getFeatures(), false); + Exception ex = assertThrows(PermanentBackendException.class, () -> store.getKeys( + new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(4)), tx)); + assertEquals("This operation is only allowed when partitioner supports unordered scan", ex.getMessage()); + assertDoesNotThrow(() -> store.getKeys( + new KeyRangeQuery(BufferUtil.getLongBuffer(1), BufferUtil.getLongBuffer(1000), BufferUtil.getLongBuffer(1), + BufferUtil.getLongBuffer(1000)), tx)); + } + @Override public CQLStoreManager openStorageManagerForClearStorageTest() throws Exception { return openStorageManager(getBaseStorageConfiguration().set(GraphDatabaseConfiguration.DROP_ON_CLEAR, true)); diff --git a/janusgraph-backend-testutils/src/main/java/org/janusgraph/testutil/TestLoggerUtils.java b/janusgraph-cql/src/test/java/org/janusgraph/testutil/TestLoggerUtils.java similarity index 100% rename from janusgraph-backend-testutils/src/main/java/org/janusgraph/testutil/TestLoggerUtils.java rename to janusgraph-cql/src/test/java/org/janusgraph/testutil/TestLoggerUtils.java diff --git a/janusgraph-cql/src/test/resources/log4j.properties b/janusgraph-cql/src/test/resources/log4j.properties deleted file mode 100644 index b8f0e78f14..0000000000 --- a/janusgraph-cql/src/test/resources/log4j.properties +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A1 is set to be a FileAppender. -log4j.appender.A1=org.apache.log4j.FileAppender -log4j.appender.A1.File=target/test.log -log4j.appender.A1.Threshold=ALL -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c{2}: %m%n - -# A2 is a ConsoleAppender. -log4j.appender.A2=org.apache.log4j.ConsoleAppender -log4j.appender.A2.Threshold=ALL -# A2 uses PatternLayout. -log4j.appender.A2.layout=org.apache.log4j.PatternLayout -log4j.appender.A2.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c{2}: %m%n - -# Set both appenders (A1 and A2) on the root logger. -#log4j.rootLogger=INFO, A1, A2 -log4j.rootLogger=ERROR, A1 - diff --git a/janusgraph-cql/src/test/resources/log4j2-test.xml b/janusgraph-cql/src/test/resources/log4j2-test.xml new file mode 100644 index 0000000000..8f0c992a9f --- /dev/null +++ b/janusgraph-cql/src/test/resources/log4j2-test.xml @@ -0,0 +1,15 @@ + + + + + + %d{HH:mm:ss} %-5level %class.%method{36} - %msg%n + + + + + + + + + \ No newline at end of file diff --git a/janusgraph-dist/Dockerfile b/janusgraph-dist/Dockerfile deleted file mode 100644 index 323510ac0e..0000000000 --- a/janusgraph-dist/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM openjdk:8 - -ARG server_zip -ADD ${server_zip} /var - -RUN apt-get update -y && apt-get install -y zip && \ - server_base=`basename ${server_zip} .zip` && \ - unzip -q /var/${server_base}.zip -d /var && \ - rm /var/${server_base}.zip && \ - ln -s /var/${server_base} /var/janusgraph && \ - groupadd -g 999 janusgraph && \ - useradd -d /home/janusgraph -m -r -u 999 -g janusgraph janusgraph && \ - chown -R janusgraph:janusgraph /var/${server_base} && \ - chmod 755 /var/${server_base} && \ - chown -R janusgraph:janusgraph /var/janusgraph && \ - chmod 755 /var/janusgraph - -USER janusgraph - -WORKDIR /var/janusgraph diff --git a/janusgraph-dist/README.md b/janusgraph-dist/README.md index fdcccd90fa..ab4429be96 100644 --- a/janusgraph-dist/README.md +++ b/janusgraph-dist/README.md @@ -31,34 +31,6 @@ The documentation output appears in: * site/ -## Building deb/rpm packages - -Requires: - -* a platform that can run shell scripts (e.g. Linux, Mac OS X, or - Windows with Cygwin) - -* the Aurelius public package GPG signing key - -Run `mvn -N -Ppkg-tools install` in the janusgraph-dist module. This writes -three folders to the root of the janusgraph repository: - -* debian -* pkgcommon -* redhat - -The debian and redhat folders contain platform-specific packaging -control and payload files. The pkgcommon folder contains shared -payload and helper scripts. - -To build the .deb and .rpm packages: - -* (cd to the repository root) -* `pkgcommon/bin/build-all.sh` - -To delete the packaging scripts from the root of the repository, run -`mvn -N -Ppkg-tools clean` from the janusgraph-dist module. - ## Upgrade cassandra-server version Following files have to be updated, if you update Cassandra server version diff --git a/janusgraph-dist/docker-compose.yml b/janusgraph-dist/docker-compose.yml deleted file mode 100644 index e1ee15f9fb..0000000000 --- a/janusgraph-dist/docker-compose.yml +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: '2.1' -services: - janusgraph: - image: janusgraph/server:latest - container_name: janusgraph - ports: - - "8182:8182" - depends_on: - elasticsearch: - condition: service_healthy - command: ["./bin/janusgraph-server.sh", "./conf/gremlin-server/gremlin-server-berkeleyje-es.yaml"] - - elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:6.2.4 - environment: - - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - - "http.host=0.0.0.0" - - "transport.host=127.0.0.1" - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:9200"] - interval: 1s - timeout: 30s - retries: 30 - ports: - - "9200:9200" - volumes: - - ./es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml - diff --git a/janusgraph-dist/es/elasticsearch.yml b/janusgraph-dist/es/elasticsearch.yml deleted file mode 100644 index 9428841aaa..0000000000 --- a/janusgraph-dist/es/elasticsearch.yml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -cluster.name: "docker-cluster" -network.host: 0.0.0.0 - -# minimum_master_nodes need to be explicitly set when bound on a public IP -# set to 1 to allow single node clusters -# Details: https://github.com/elastic/elasticsearch/pull/17288 -discovery.zen.minimum_master_nodes: 1 - -xpack.security.enabled : false - diff --git a/janusgraph-dist/pom.xml b/janusgraph-dist/pom.xml index 45c9f52e78..ead3cf7ab6 100644 --- a/janusgraph-dist/pom.xml +++ b/janusgraph-dist/pom.xml @@ -3,7 +3,7 @@ org.janusgraph janusgraph - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT ../pom.xml pom @@ -33,13 +33,8 @@ ${project.build.directory}/${packname.full}.zip package - - - 1 - ${project.basedir}/.. - ${it.skip} - 0.4.13 + ${project.parent.basedir}/docs @@ -182,13 +177,16 @@ binding appears on the classpath before logback-classic. See comments in janusgraph-cassandra/pom.xml for more information. --> - org.slf4j - slf4j-log4j12 + org.apache.logging.log4j + log4j-slf4j-impl + runtime true ch.qos.logback logback-classic + runtime + true @@ -198,7 +196,7 @@ com.googlecode.maven-download-plugin download-maven-plugin - 1.6.6 + 1.6.7 get-elasticsearch @@ -381,123 +379,17 @@ - - pkg-tools - - - - - org.codehaus.mojo - build-helper-maven-plugin - 3.2.0 - false - - - set-version-properties - generate-resources - - parse-version - - - - - - maven-assembly-plugin - false - - - assemble-package-tools - install - - single - - - false - ${pkgtools.dir}/ - / - false - - ${assembly.descriptor.dir}/pkgtools.xml - - - - - assemble-distribution-archive - none - - - - - maven-clean-plugin - false - - - clean-package-tools - clean - - clean - - - true - - - ${pkgtools.dir}/debian/ - - - ${pkgtools.dir}/pkgcommon/ - - - ${pkgtools.dir}/redhat/ - - - - - - - - - - - - janusgraph-docker - - - - - com.spotify - docker-maven-plugin - ${docker.maven.version} - - ${project.basedir} - - target/janusgraph-${project.version}.zip - ${env.http_proxy} - ${env.https_proxy} - - true - janusgraph/server - - ${project.version} - - - - - - - - java-11 janusgraph-java-11-${project.version} - ${project.build.directory}/${packname.standard}.zip + janusgraph-java-11-full-${project.version} FULL_TESTS none janusgraph-release - ${project.groupId} @@ -506,7 +398,6 @@ javadoc - @@ -632,7 +523,7 @@ maven-failsafe-plugin 1 - false + false ${top.level.basedir}/janusgraph-dist/target/test-classes @@ -647,26 +538,10 @@ false ${test.excluded.groups} - **/*HBase*IT.java **/*CompatIT.java - - hbase-integration-test - integration-test - - integration-test - - - -Dtest.hbase.parentdir=${top.level.basedir}/janusgraph-hbase-parent/ - hbase - false - - **/*HBase*IT.java - - - - - true - /examples - false - - ${project.groupId}:janusgraph-hadoop:zip:example-data:${project.version} - - diff --git a/janusgraph-dist/src/assembly/descriptor/common.component.xml b/janusgraph-dist/src/assembly/descriptor/common.component.xml index 571374dc84..1b274f068e 100644 --- a/janusgraph-dist/src/assembly/descriptor/common.component.xml +++ b/janusgraph-dist/src/assembly/descriptor/common.component.xml @@ -40,15 +40,5 @@ ${project.groupId}:janusgraph-core:jar:javadoc:${project.version} - - - - true - /examples - false - - ${project.groupId}:janusgraph-hadoop:zip:example-data:${project.version} - - diff --git a/janusgraph-dist/src/assembly/descriptor/pkgtools.xml b/janusgraph-dist/src/assembly/descriptor/pkgtools.xml deleted file mode 100644 index 98262f6e37..0000000000 --- a/janusgraph-dist/src/assembly/descriptor/pkgtools.xml +++ /dev/null @@ -1,21 +0,0 @@ - - pkg-tools - / - - - dir - - - - - src/pkg/static - false - / - - - src/pkg/resources - true - / - - - diff --git a/janusgraph-dist/src/assembly/static/bin/gremlin-server.bat b/janusgraph-dist/src/assembly/static/bin/gremlin-server.bat index 46a76917e0..607cea1fbe 100755 --- a/janusgraph-dist/src/assembly/static/bin/gremlin-server.bat +++ b/janusgraph-dist/src/assembly/static/bin/gremlin-server.bat @@ -84,10 +84,7 @@ IF NOT DEFINED JAVA_OPTIONS ( SET JAVA_OPTIONS=-Xms32m -Xmx512m ^ -Djanusgraph.logdir=%JANUSGRAPH_HOME%\logs ^ -Dtinkerpop.ext=%JANUSGRAPH_EXT% ^ - -Dlogback.configurationFile=conf\logback.xml ^ - -Dlog4j.configuration=file:/%JANUSGRAPH_HOME%\conf\gremlin-server\log4j-server.properties ^ - -Dlog4j.debug=true ^ - -Dgremlin.log4j.level=%GREMLIN_LOG_LEVEL% ^ + -Dlog4j.configurationFile=file:/%JANUSGRAPH_HOME%\conf\log4j2-server.xml ^ -javaagent:%JAMM_JAR% ^ -Dgremlin.io.kryoShimService=org.janusgraph.hadoop.serialize.JanusGraphKryoShimService ) diff --git a/janusgraph-dist/src/assembly/static/bin/gremlin.bat b/janusgraph-dist/src/assembly/static/bin/gremlin.bat index 6286d62aa3..27c6853579 100755 --- a/janusgraph-dist/src/assembly/static/bin/gremlin.bat +++ b/janusgraph-dist/src/assembly/static/bin/gremlin.bat @@ -84,9 +84,7 @@ SET CP=%CLASSPATH%;%SLF4J_LOG4J_JAR%;%JANUSGRAPH_JARS%;%JANUSGRAPH_LIB%\*;%EXTDI IF NOT DEFINED JAVA_OPTIONS ( SET JAVA_OPTIONS=-Xms32m -Xmx512m ^ -Dtinkerpop.ext=%JANUSGRAPH_EXT% ^ - -Dlogback.configurationFile=%JANUSGRAPH_HOME%\conf\logback.xml ^ - -Dlog4j.configuration=file:/%JANUSGRAPH_HOME%\conf\log4j-console.properties ^ - -Dgremlin.log4j.level=%GREMLIN_LOG_LEVEL% ^ + -Dlog4j.configurationFile=file:/%JANUSGRAPH_HOME%\conf\log4j2-server.xml ^ -Djline.terminal=none ^ -javaagent:%JAMM_JAR% ^ -Dgremlin.io.kryoShimService=org.janusgraph.hadoop.serialize.JanusGraphKryoShimService diff --git a/janusgraph-dist/src/assembly/static/bin/gremlin.sh b/janusgraph-dist/src/assembly/static/bin/gremlin.sh index 9ec7d4af9e..4ba582ce23 100755 --- a/janusgraph-dist/src/assembly/static/bin/gremlin.sh +++ b/janusgraph-dist/src/assembly/static/bin/gremlin.sh @@ -88,11 +88,6 @@ else JAVA="$JAVA_HOME/bin/java -server" fi -# Set default message threshold for Log4j Gremlin's console appender -if [ -z "${GREMLIN_LOG_LEVEL:-}" ]; then - GREMLIN_LOG_LEVEL=WARN -fi - # Script debugging is disabled by default, but can be enabled with -l # TRACE or -l DEBUG or enabled by exporting # SCRIPT_DEBUG=nonemptystring to gremlin.sh's environment @@ -119,7 +114,7 @@ if [ -z "${HADOOP_GREMLIN_LIBS:-}" ]; then export HADOOP_GREMLIN_LIBS="$LIB" fi -JAVA_OPTIONS="${JAVA_OPTIONS} -Duser.working_dir=${USER_DIR} -Dtinkerpop.ext=${USER_EXT_DIR:-${SYSTEM_EXT_DIR}} -Dlog4j.configuration=conf/log4j-console.properties -Dgremlin.log4j.level=$GREMLIN_LOG_LEVEL -javaagent:$LIB/jamm-0.3.0.jar -Dgremlin.io.kryoShimService=org.janusgraph.hadoop.serialize.JanusGraphKryoShimService" +JAVA_OPTIONS="${JAVA_OPTIONS} -Duser.working_dir=${USER_DIR} -Dtinkerpop.ext=${USER_EXT_DIR:-${SYSTEM_EXT_DIR}} -Dlog4j.configurationFile=file:conf/log4j2-console.xml -javaagent:$LIB/jamm-0.3.0.jar -Dgremlin.io.kryoShimService=org.janusgraph.hadoop.serialize.JanusGraphKryoShimService" JAVA_OPTIONS=$(awk -v RS=' ' '!/^$/ {if (!x[$0]++) print}' <<< "${JAVA_OPTIONS}" | grep -v '^$' | paste -sd ' ' -) if [ -n "$SCRIPT_DEBUG" ]; then diff --git a/janusgraph-dist/src/assembly/static/bin/janusgraph-server.sh b/janusgraph-dist/src/assembly/static/bin/janusgraph-server.sh index d6941c08a4..6dc6b76896 100755 --- a/janusgraph-dist/src/assembly/static/bin/janusgraph-server.sh +++ b/janusgraph-dist/src/assembly/static/bin/janusgraph-server.sh @@ -95,7 +95,7 @@ if [[ -z "$JANUSGRAPH_LIB" ]]; then fi # absolute file path requires 'file:' -LOG4J_CONF="file:$JANUSGRAPH_CONF/log4j-server.properties" +LOG4J_CONF="file:$JANUSGRAPH_CONF/log4j2-server.xml" # Find Java if [[ "$JAVA_HOME" = "" ]] ; then @@ -208,7 +208,7 @@ start() { exit 1 fi - $JAVA -Dlog4j.configuration=$LOG4J_CONF $JAVA_OPTIONS -cp $CLASSPATH $JANUSGRAPH_SERVER_CMD "$JANUSGRAPH_YAML" >> "$LOG_FILE" 2>&1 & + $JAVA -Dlog4j.configurationFile=$LOG4J_CONF $JAVA_OPTIONS -cp $CLASSPATH $JANUSGRAPH_SERVER_CMD "$JANUSGRAPH_YAML" >> "$LOG_FILE" 2>&1 & PID=$! disown $PID echo $PID > "$PID_FILE" @@ -226,7 +226,7 @@ start() { exit 1 fi - su -c "$JAVA -Dlog4j.configuration=$LOG4J_CONF $JAVA_OPTIONS -cp $CLASSPATH $JANUSGRAPH_SERVER_CMD \"$JANUSGRAPH_YAML\" >> \"$LOG_FILE\" 2>&1 & echo \$! " "$RUNAS" > "$PID_FILE" + su -c "$JAVA -Dlog4j.configurationFile=$LOG4J_CONF $JAVA_OPTIONS -cp $CLASSPATH $JANUSGRAPH_SERVER_CMD \"$JANUSGRAPH_YAML\" >> \"$LOG_FILE\" 2>&1 & echo \$! " "$RUNAS" > "$PID_FILE" chown "$RUNAS" "$PID_FILE" fi @@ -252,7 +252,7 @@ startForeground() { if [[ -z "$RUNAS" ]]; then echo "$JANUSGRAPH_YAML will be used to start JanusGraph Server in foreground" - exec $JAVA -Dlog4j.configuration=$LOG4J_CONF $JAVA_OPTIONS -cp $CLASSPATH $JANUSGRAPH_SERVER_CMD "$JANUSGRAPH_YAML" + exec $JAVA -Dlog4j.configurationFile=$LOG4J_CONF $JAVA_OPTIONS -cp $CLASSPATH $JANUSGRAPH_SERVER_CMD "$JANUSGRAPH_YAML" exit 0 else echo Starting in foreground not supported with RUNAS diff --git a/janusgraph-dist/src/assembly/static/conf/gremlin-server/gremlin-server-hbase-es.yaml b/janusgraph-dist/src/assembly/static/conf/gremlin-server/gremlin-server-hbase-es.yaml new file mode 100644 index 0000000000..53489d7209 --- /dev/null +++ b/janusgraph-dist/src/assembly/static/conf/gremlin-server/gremlin-server-hbase-es.yaml @@ -0,0 +1,61 @@ +# Copyright 2019 JanusGraph Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +host: 0.0.0.0 +port: 8182 +evaluationTimeout: 30000 +channelizer: org.apache.tinkerpop.gremlin.server.channel.WebSocketChannelizer +graphs: { + graph: conf/janusgraph-hbase-es.properties +} +scriptEngines: { + gremlin-groovy: { + plugins: { org.janusgraph.graphdb.tinkerpop.plugin.JanusGraphGremlinPlugin: {}, + org.apache.tinkerpop.gremlin.server.jsr223.GremlinServerGremlinPlugin: {}, + org.apache.tinkerpop.gremlin.tinkergraph.jsr223.TinkerGraphGremlinPlugin: {}, + org.apache.tinkerpop.gremlin.jsr223.ImportGremlinPlugin: {classImports: [java.lang.Math], methodImports: [java.lang.Math#*]}, + org.apache.tinkerpop.gremlin.jsr223.ScriptFileGremlinPlugin: {files: [scripts/empty-sample.groovy]}}}} +# JanusGraph sets default serializers. You need to uncomment the following lines, if you require any custom serializers. +# +# serializers: +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphBinaryMessageSerializerV1, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }} +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphBinaryMessageSerializerV1, config: { serializeResultToString: true }} +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV3d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }} +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV3d0, config: { serializeResultToString: true }} +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV3d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }} +# # Older serialization versions for backwards compatibility: +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV1d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }} +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GryoLiteMessageSerializerV1d0, config: {ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }} +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV1d0, config: { serializeResultToString: true }} +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV2d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }} +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerGremlinV1d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistryV1d0] }} +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistryV1d0] }} +processors: + - { className: org.apache.tinkerpop.gremlin.server.op.session.SessionOpProcessor, config: { sessionTimeout: 28800000 }} + - { className: org.apache.tinkerpop.gremlin.server.op.traversal.TraversalOpProcessor, config: { cacheExpirationTime: 600000, cacheMaxSize: 1000 }} +metrics: { + consoleReporter: {enabled: false, interval: 180000}, + csvReporter: {enabled: false, interval: 180000, fileName: /tmp/gremlin-server-metrics.csv}, + jmxReporter: {enabled: true}, + slf4jReporter: {enabled: false, interval: 180000}, + graphiteReporter: {enabled: false, interval: 180000}} +maxInitialLineLength: 4096 +maxHeaderSize: 8192 +maxChunkSize: 8192 +maxContentLength: 65536 +maxAccumulationBufferComponents: 1024 +resultIterationBatchSize: 64 +writeBufferLowWaterMark: 32768 +writeBufferHighWaterMark: 65536 + diff --git a/janusgraph-dist/src/assembly/static/conf/gremlin-server/gremlin-server-hbase.yaml b/janusgraph-dist/src/assembly/static/conf/gremlin-server/gremlin-server-hbase.yaml new file mode 100644 index 0000000000..67163d9e0c --- /dev/null +++ b/janusgraph-dist/src/assembly/static/conf/gremlin-server/gremlin-server-hbase.yaml @@ -0,0 +1,60 @@ +# Copyright 2021 JanusGraph Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +host: 0.0.0.0 +port: 8182 +evaluationTimeout: 30000 +channelizer: org.apache.tinkerpop.gremlin.server.channel.WebSocketChannelizer +graphs: { + graph: conf/janusgraph-hbase.properties +} +scriptEngines: { + gremlin-groovy: { + plugins: { org.janusgraph.graphdb.tinkerpop.plugin.JanusGraphGremlinPlugin: {}, + org.apache.tinkerpop.gremlin.server.jsr223.GremlinServerGremlinPlugin: {}, + org.apache.tinkerpop.gremlin.tinkergraph.jsr223.TinkerGraphGremlinPlugin: {}, + org.apache.tinkerpop.gremlin.jsr223.ImportGremlinPlugin: {classImports: [java.lang.Math], methodImports: [java.lang.Math#*]}, + org.apache.tinkerpop.gremlin.jsr223.ScriptFileGremlinPlugin: {files: [scripts/empty-sample.groovy]}}}} +# JanusGraph sets default serializers. You need to uncomment the following lines, if you require any custom serializers. +# +# serializers: +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphBinaryMessageSerializerV1, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }} +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphBinaryMessageSerializerV1, config: { serializeResultToString: true }} +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV3d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }} +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV3d0, config: { serializeResultToString: true }} +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV3d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }} +# # Older serialization versions for backwards compatibility: +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV1d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }} +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GryoLiteMessageSerializerV1d0, config: {ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }} +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV1d0, config: { serializeResultToString: true }} +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV2d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }} +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerGremlinV1d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistryV1d0] }} +# - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistryV1d0] }} +processors: + - { className: org.apache.tinkerpop.gremlin.server.op.session.SessionOpProcessor, config: { sessionTimeout: 28800000 }} + - { className: org.apache.tinkerpop.gremlin.server.op.traversal.TraversalOpProcessor, config: { cacheExpirationTime: 600000, cacheMaxSize: 1000 }} +metrics: { + consoleReporter: {enabled: false, interval: 180000}, + csvReporter: {enabled: false, interval: 180000, fileName: /tmp/gremlin-server-metrics.csv}, + jmxReporter: {enabled: true}, + slf4jReporter: {enabled: false, interval: 180000}, + graphiteReporter: {enabled: false, interval: 180000}} +maxInitialLineLength: 4096 +maxHeaderSize: 8192 +maxChunkSize: 8192 +maxContentLength: 65536 +maxAccumulationBufferComponents: 1024 +resultIterationBatchSize: 64 +writeBufferLowWaterMark: 32768 +writeBufferHighWaterMark: 65536 diff --git a/janusgraph-dist/src/assembly/static/conf/log4j-console.properties b/janusgraph-dist/src/assembly/static/conf/log4j-console.properties deleted file mode 100644 index 6cf4616a38..0000000000 --- a/janusgraph-dist/src/assembly/static/conf/log4j-console.properties +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Used by gremlin.sh - -log4j.appender.A2=org.apache.log4j.ConsoleAppender -log4j.appender.A2.Threshold=TRACE -log4j.appender.A2.layout=org.apache.log4j.PatternLayout -log4j.appender.A2.layout.ConversionPattern=%d{HH:mm:ss} %-5p %c %x - %m%n - -log4j.rootLogger=${gremlin.log4j.level}, A2 - -#log4j.logger.org.janusgraph.graphdb.database.idassigner.placement=DEBUG -#log4j.logger.org.janusgraph.diskstorage.hbase.HBaseStoreManager=DEBUG - -# Disable spurious Hadoop config deprecation warnings under 2.2.0. -# -# See https://issues.apache.org/jira/browse/HADOOP-10178 -# -# This can and should be deleted when we upgrade our Hadoop 2.2.0 -# dependency to 2.3.0 or 3.0.0. -log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=OFF - -# Configure MR at its own loglevel. We usually want MR at INFO, -# even if the rest of the loggers are at WARN or ERROR or FATAL, -# because job progress information is at INFO. -log4j.logger.org.apache.hadoop.mapred=${gremlin.mr.log4j.level} -log4j.logger.org.apache.hadoop.mapreduce=${gremlin.mr.log4j.level} - -# This generates 3 INFO lines per jar on the classpath -- usually more -# noise than desirable in the REPL. Switching it to the default -# log4j level means it will be at WARN by default, which is ideal. -log4j.logger.org.apache.hadoop.mapred.LocalDistributedCacheManager=${gremlin.log4j.level} diff --git a/janusgraph-dist/src/assembly/static/conf/log4j-server.properties b/janusgraph-dist/src/assembly/static/conf/log4j-server.properties deleted file mode 100644 index 03fa61336b..0000000000 --- a/janusgraph-dist/src/assembly/static/conf/log4j-server.properties +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A1 is a ConsoleAppender. -log4j.appender.A1=org.apache.log4j.ConsoleAppender -log4j.appender.A1.Threshold=INFO -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n - -# Set both appenders (A1) on the root logger. -log4j.rootLogger=INFO, A1 diff --git a/janusgraph-dist/src/assembly/static/conf/log4j2-console.xml b/janusgraph-dist/src/assembly/static/conf/log4j2-console.xml new file mode 100644 index 0000000000..547b7e2aff --- /dev/null +++ b/janusgraph-dist/src/assembly/static/conf/log4j2-console.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + diff --git a/janusgraph-dist/src/assembly/static/conf/log4j2-server.xml b/janusgraph-dist/src/assembly/static/conf/log4j2-server.xml new file mode 100644 index 0000000000..384ab8ded5 --- /dev/null +++ b/janusgraph-dist/src/assembly/static/conf/log4j2-server.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/janusgraph-dist/src/assembly/static/conf/logback.xml b/janusgraph-dist/src/assembly/static/conf/logback.xml deleted file mode 100644 index 8aafaf0796..0000000000 --- a/janusgraph-dist/src/assembly/static/conf/logback.xml +++ /dev/null @@ -1,12 +0,0 @@ - - - - - %d{HH:mm:ss} %-5level %logger - %msg%n - - - - - - diff --git a/janusgraph-dist/src/assembly/static/conf/solr/solrconfig.xml b/janusgraph-dist/src/assembly/static/conf/solr/solrconfig.xml index 317f68dea3..6bceb661ca 100644 --- a/janusgraph-dist/src/assembly/static/conf/solr/solrconfig.xml +++ b/janusgraph-dist/src/assembly/static/conf/solr/solrconfig.xml @@ -35,7 +35,7 @@ that you fully re-index after changing this setting as it can affect both how text is indexed and queried. --> - 7.0.0 + 8.0.0 - - org.noggit - noggit - 0.8 - + + + org.noggit + noggit + 0.8 + org.locationtech.spatial4j spatial4j @@ -53,8 +53,8 @@ org.locationtech.jts jts-core - - + + org.apache.commons commons-text 1.9 @@ -64,7 +64,7 @@ commons-lang3 - + org.junit.platform junit-platform-launcher @@ -100,6 +100,18 @@ junit-jupiter test + + + org.apache.logging.log4j + log4j-slf4j-impl + test + + + org.apache.logging.log4j + log4j-core + test + + diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/DeprecatedJanusGraphPSerializer.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/DeprecatedJanusGraphPSerializer.java deleted file mode 100644 index eb73dc84b4..0000000000 --- a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/DeprecatedJanusGraphPSerializer.java +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2020 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.graphdb.tinkerpop; - -import org.apache.tinkerpop.gremlin.process.traversal.P; -import org.apache.tinkerpop.gremlin.process.traversal.util.AndP; -import org.apache.tinkerpop.gremlin.process.traversal.util.OrP; -import org.apache.tinkerpop.gremlin.structure.io.gryo.kryoshim.InputShim; -import org.apache.tinkerpop.gremlin.structure.io.gryo.kryoshim.KryoShim; -import org.apache.tinkerpop.gremlin.structure.io.gryo.kryoshim.OutputShim; -import org.apache.tinkerpop.gremlin.structure.io.gryo.kryoshim.SerializerShim; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.lang.reflect.InvocationTargetException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - - -@Deprecated -public class DeprecatedJanusGraphPSerializer implements SerializerShim

{ - - private static final Logger log = LoggerFactory.getLogger(JanusGraphPSerializer.class); - private final SerializerShim

pSerializerShim; - - public DeprecatedJanusGraphPSerializer(SerializerShim

pSerializerShim) { - this.pSerializerShim = pSerializerShim; - } - - @Override - public void write(KryoShim kryo, O output, P p) { - pSerializerShim.write(kryo, output, p); - - } - - @Override - public P read(KryoShim kryo, I input, Class

aClass) { - final String predicate = input.readString(); - final boolean isCollection = input.readByte() == (byte) 0; - final Object value; - if (isCollection) { - value = new ArrayList(); - final int size = input.readInt(); - for (int ix = 0; ix < size; ix++) { - ((List) value).add(kryo.readClassAndObject(input)); - } - } else { - value = kryo.readClassAndObject(input); - } - - try { - return createPredicateWithValue(predicate, value); - } catch (final Exception e) { - log.info("Couldn't deserialize class: " + aClass + ", predicate: " + predicate + ", isCollection: " - + isCollection + ",value: " + value, e); - throw new IllegalStateException(e.getMessage(), e); - } - } - - public static P createPredicateWithValue(String predicate, Object value) throws IllegalAccessException, InvocationTargetException, NoSuchMethodException { - if (JanusGraphPSerializer.checkForJanusGraphPredicate(predicate)){ - return JanusGraphPSerializer.createPredicateWithValue(predicate, value); - } - if (!predicate.equals("and") && !predicate.equals("or")) { - if (value instanceof Collection) { - switch (predicate) { - case "between": - return P.between(((List) value).get(0), ((List) value).get(1)); - case "inside": - return P.inside(((List) value).get(0), ((List) value).get(1)); - case "outside": - return P.outside(((List) value).get(0), ((List) value).get(1)); - case "within": - return P.within((Collection) value); - default: - return predicate.equals("without") ? P.without((Collection) value) : (P) P.class.getMethod(predicate, Collection.class).invoke(null, value); - } - } else { - return (P) P.class.getMethod(predicate, Object.class).invoke(null, value); - } - } else { - return (P) (predicate.equals("and") ? new AndP((List) value) : new OrP((List) value)); - } - } -} diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistry.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistry.java index ce44a0d9f9..b4c841edae 100644 --- a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistry.java +++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistry.java @@ -14,12 +14,10 @@ package org.janusgraph.graphdb.tinkerpop; -import org.apache.tinkerpop.gremlin.process.traversal.P; import org.apache.tinkerpop.gremlin.structure.io.AbstractIoRegistry; import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryIo; import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONIo; import org.apache.tinkerpop.gremlin.structure.io.gryo.GryoIo; -import org.apache.tinkerpop.gremlin.structure.io.gryo.GryoSerializersV3d0; import org.janusgraph.core.attribute.Geoshape; import org.janusgraph.graphdb.relations.RelationIdentifier; import org.janusgraph.graphdb.tinkerpop.io.JanusGraphP; @@ -44,16 +42,9 @@ private JanusGraphIoRegistry() { register(GryoIo.class, RelationIdentifier.class, null); register(GryoIo.class, Geoshape.class, new Geoshape.GeoShapeGryoSerializer()); register(GryoIo.class, JanusGraphP.class, new JanusGraphPSerializer()); - //fallback for older janusgraph drivers - register(GryoIo.class, P.class, new DeprecatedJanusGraphPSerializer(new GryoSerializersV3d0.PSerializer())); } public static JanusGraphIoRegistry instance() { return INSTANCE; } - - @Deprecated() - public static JanusGraphIoRegistry getInstance() { - return instance(); - } } diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistryV1d0.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistryV1d0.java index e473cc242e..95465505f6 100644 --- a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistryV1d0.java +++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistryV1d0.java @@ -14,11 +14,9 @@ package org.janusgraph.graphdb.tinkerpop; -import org.apache.tinkerpop.gremlin.process.traversal.P; import org.apache.tinkerpop.gremlin.structure.io.AbstractIoRegistry; import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONIo; import org.apache.tinkerpop.gremlin.structure.io.gryo.GryoIo; -import org.apache.tinkerpop.gremlin.structure.io.gryo.GryoSerializersV1d0; import org.janusgraph.core.attribute.Geoshape; import org.janusgraph.graphdb.relations.RelationIdentifier; import org.janusgraph.graphdb.tinkerpop.io.JanusGraphP; @@ -37,16 +35,9 @@ private JanusGraphIoRegistryV1d0() { register(GryoIo.class, RelationIdentifier.class, null); register(GryoIo.class, Geoshape.class, new Geoshape.GeoShapeGryoSerializer()); register(GryoIo.class, JanusGraphP.class, new JanusGraphPSerializer()); - //fallback for older JanusGraph drivers - register(GryoIo.class, P.class, new DeprecatedJanusGraphPSerializer(new GryoSerializersV1d0.PSerializer())); } public static JanusGraphIoRegistryV1d0 instance() { return INSTANCE; } - - @Deprecated() - public static JanusGraphIoRegistryV1d0 getInstance() { - return instance(); - } } diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinaryConstants.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinaryConstants.java new file mode 100644 index 0000000000..7c7e23518a --- /dev/null +++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinaryConstants.java @@ -0,0 +1,32 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph.graphdb.tinkerpop.io.binary; + +public class GeoshapeGraphBinaryConstants { + + // Geoshape format versions 0 and 1 were used by the legacy GeoshapeGraphBinarySerializer. + public static final byte GEOSHAPE_FORMAT_VERSION = 2; + + // Geoshape type codes + public static final int GEOSHAPE_POINT_TYPE_CODE = 0; + public static final int GEOSHAPE_CIRCLE_TYPE_CODE = 1; + public static final int GEOSHAPE_BOX_TYPE_CODE = 2; + public static final int GEOSHAPE_LINE_TYPE_CODE = 3; + public static final int GEOSHAPE_POLYGON_TYPE_CODE = 4; + public static final int GEOSHAPE_MULTI_POINT_TYPE_CODE = 5; + public static final int GEOSHAPE_MULTI_LINE_TYPE_CODE = 6; + public static final int GEOSHAPE_MULTI_POLYGON_TYPE_CODE = 7; + public static final int GEOSHAPE_GEOMETRY_COLLECTION_TYPE_CODE = 8; +} diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinarySerializer.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinarySerializer.java index 3bba02bdd9..3b5d02a2a8 100644 --- a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinarySerializer.java +++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinarySerializer.java @@ -1,4 +1,4 @@ -// Copyright 2020 JanusGraph Authors +// Copyright 2021 JanusGraph Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,57 +14,82 @@ package org.janusgraph.graphdb.tinkerpop.io.binary; +import org.apache.tinkerpop.gremlin.driver.ser.SerializationException; import org.apache.tinkerpop.gremlin.structure.io.Buffer; import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; import org.janusgraph.core.attribute.Geoshape; +import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.BoxSerializer; +import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.CircleSerializer; +import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.GeometryCollectionSerializer; +import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.LineSerializer; +import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.MultiLineSerializer; +import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.MultiPointSerializer; +import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.MultiPolygonSerializer; +import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.PointSerializer; +import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.PolygonSerializer; +import org.janusgraph.graphdb.tinkerpop.io.binary.geoshape.GeoshapeTypeSerializer; import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; +import java.util.HashMap; +import java.util.Map; public class GeoshapeGraphBinarySerializer extends JanusGraphTypeSerializer { + + private final Map serializerByGeoshapeTypeCode = new HashMap<>(); + private final Map serializerByGeoshapeType = new HashMap<>(); + public GeoshapeGraphBinarySerializer() { super(GraphBinaryType.Geoshape); - } - private static class BufferInputStream extends InputStream { + serializerByGeoshapeTypeCode.put(GeoshapeGraphBinaryConstants.GEOSHAPE_POINT_TYPE_CODE, new PointSerializer()); + serializerByGeoshapeTypeCode.put(GeoshapeGraphBinaryConstants.GEOSHAPE_CIRCLE_TYPE_CODE, new CircleSerializer()); + serializerByGeoshapeTypeCode.put(GeoshapeGraphBinaryConstants.GEOSHAPE_BOX_TYPE_CODE, new BoxSerializer()); + serializerByGeoshapeTypeCode.put(GeoshapeGraphBinaryConstants.GEOSHAPE_LINE_TYPE_CODE, new LineSerializer()); + serializerByGeoshapeTypeCode.put(GeoshapeGraphBinaryConstants.GEOSHAPE_POLYGON_TYPE_CODE, new PolygonSerializer()); + serializerByGeoshapeTypeCode.put(GeoshapeGraphBinaryConstants.GEOSHAPE_MULTI_POINT_TYPE_CODE, new MultiPointSerializer()); + serializerByGeoshapeTypeCode.put(GeoshapeGraphBinaryConstants.GEOSHAPE_MULTI_LINE_TYPE_CODE, new MultiLineSerializer()); + serializerByGeoshapeTypeCode.put(GeoshapeGraphBinaryConstants.GEOSHAPE_MULTI_POLYGON_TYPE_CODE, new MultiPolygonSerializer()); + serializerByGeoshapeTypeCode.put(GeoshapeGraphBinaryConstants.GEOSHAPE_GEOMETRY_COLLECTION_TYPE_CODE, new GeometryCollectionSerializer()); - private Buffer buffer; - public BufferInputStream(Buffer buffer) { - this.buffer = buffer; - } - - @Override - public int read() { - return buffer.readInt(); - } + serializerByGeoshapeType.put(Geoshape.Type.POINT, new PointSerializer()); + serializerByGeoshapeType.put(Geoshape.Type.CIRCLE, new CircleSerializer()); + serializerByGeoshapeType.put(Geoshape.Type.BOX, new BoxSerializer()); + serializerByGeoshapeType.put(Geoshape.Type.LINE, new LineSerializer()); + serializerByGeoshapeType.put(Geoshape.Type.POLYGON, new PolygonSerializer()); + serializerByGeoshapeType.put(Geoshape.Type.MULTIPOINT, new MultiPointSerializer()); + serializerByGeoshapeType.put(Geoshape.Type.MULTILINESTRING, new MultiLineSerializer()); + serializerByGeoshapeType.put(Geoshape.Type.MULTIPOLYGON, new MultiPolygonSerializer()); + serializerByGeoshapeType.put(Geoshape.Type.GEOMETRYCOLLECTION, new GeometryCollectionSerializer()); } @Override - public Geoshape readNonNullableValue(Buffer buffer, GraphBinaryReader context) throws IOException { - BufferInputStream bufferOutputStream = new BufferInputStream(buffer); - return Geoshape.GeoshapeBinarySerializer.read(bufferOutputStream); - } - - private static class BufferOutputStream extends OutputStream { - - private Buffer buffer; - - public BufferOutputStream(Buffer buffer) { - this.buffer = buffer; + public Geoshape readNonNullableValue(final Buffer buffer, final GraphBinaryReader context) throws IOException { + final byte formatVersion = buffer.readByte(); + if (formatVersion != GeoshapeGraphBinaryConstants.GEOSHAPE_FORMAT_VERSION) { + throw new SerializationException("Geoshape format " + formatVersion + " not supported"); } - @Override - public void write(int i) { - buffer.writeInt(i); + final int geoshapeTypeCode = buffer.readInt(); + final GeoshapeTypeSerializer serializer = serializerByGeoshapeTypeCode.get(geoshapeTypeCode); + if (serializer == null) { + throw new SerializationException("Geoshape type code " + geoshapeTypeCode + " not supported"); } + return serializer.readNonNullableGeoshapeValue(buffer, context); + } @Override - public void writeNonNullableValue(Geoshape geoshape, Buffer buffer, GraphBinaryWriter context) throws IOException { - BufferOutputStream bufferOutputStream = new BufferOutputStream(buffer); - Geoshape.GeoshapeBinarySerializer.write(bufferOutputStream, geoshape); + public void writeNonNullableValue(final Geoshape geoshape, final Buffer buffer, final GraphBinaryWriter context) throws IOException { + final Geoshape.Type type = geoshape.getType(); + + final GeoshapeTypeSerializer serializer = serializerByGeoshapeType.get(type); + if (serializer == null) { + throw new SerializationException("Geoshape type " + type + " not supported"); + } + buffer.writeByte(GeoshapeGraphBinaryConstants.GEOSHAPE_FORMAT_VERSION); + + serializer.writeNonNullableValue(geoshape, buffer, context); } } diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/BoxSerializer.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/BoxSerializer.java new file mode 100644 index 0000000000..78dc54ef51 --- /dev/null +++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/BoxSerializer.java @@ -0,0 +1,47 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph.graphdb.tinkerpop.io.binary.geoshape; + +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; +import org.janusgraph.core.attribute.Geoshape; +import org.janusgraph.graphdb.tinkerpop.io.binary.GeoshapeGraphBinaryConstants; +import org.locationtech.spatial4j.shape.Rectangle; +import org.locationtech.spatial4j.shape.Shape; + +public class BoxSerializer extends GeoshapeTypeSerializer { + + public BoxSerializer() { super(GeoshapeGraphBinaryConstants.GEOSHAPE_BOX_TYPE_CODE); } + + @Override + public Geoshape readNonNullableGeoshapeValue(final Buffer buffer, final GraphBinaryReader context) { + final double minY = buffer.readDouble(); + final double minX = buffer.readDouble(); + final double maxY = buffer.readDouble(); + final double maxX = buffer.readDouble(); + return Geoshape.box(minY, minX, maxY, maxX); + } + + @Override + public void writeNonNullableGeoshapeValue(final Geoshape geoshape, final Buffer buffer, final GraphBinaryWriter context) { + final Shape shape = geoshape.getShape(); + final Rectangle rect = (Rectangle) shape; + buffer.writeDouble(rect.getMinY()); + buffer.writeDouble(rect.getMinX()); + buffer.writeDouble(rect.getMaxY()); + buffer.writeDouble(rect.getMaxX()); + } +} diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/CircleSerializer.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/CircleSerializer.java new file mode 100644 index 0000000000..855952fe28 --- /dev/null +++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/CircleSerializer.java @@ -0,0 +1,42 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph.graphdb.tinkerpop.io.binary.geoshape; + +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; +import org.janusgraph.core.attribute.Geoshape; +import org.janusgraph.graphdb.tinkerpop.io.binary.GeoshapeGraphBinaryConstants; + +public class CircleSerializer extends GeoshapeTypeSerializer { + + public CircleSerializer() { super(GeoshapeGraphBinaryConstants.GEOSHAPE_CIRCLE_TYPE_CODE); } + + @Override + public Geoshape readNonNullableGeoshapeValue(final Buffer buffer, final GraphBinaryReader context) { + final double latitude = buffer.readDouble(); + final double longitude = buffer.readDouble(); + final double radius = buffer.readDouble(); + return Geoshape.circle(latitude, longitude, radius); + } + + @Override + public void writeNonNullableGeoshapeValue(final Geoshape geoshape, final Buffer buffer, final GraphBinaryWriter context) { + final Geoshape.Point center = geoshape.getPoint(); + buffer.writeDouble(center.getLatitude()); + buffer.writeDouble(center.getLongitude()); + buffer.writeDouble(geoshape.getRadius()); + } +} diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/GeometryCollectionSerializer.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/GeometryCollectionSerializer.java new file mode 100644 index 0000000000..c26c92f7fd --- /dev/null +++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/GeometryCollectionSerializer.java @@ -0,0 +1,54 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph.graphdb.tinkerpop.io.binary.geoshape; + +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; +import org.janusgraph.core.attribute.Geoshape; +import org.janusgraph.graphdb.tinkerpop.io.binary.GeoshapeGraphBinaryConstants; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.ShapeCollection; +import org.locationtech.spatial4j.shape.ShapeFactory; + +import java.io.IOException; +import java.util.List; + +public class GeometryCollectionSerializer extends GeoshapeTypeSerializer { + + public GeometryCollectionSerializer() { super(GeoshapeGraphBinaryConstants.GEOSHAPE_GEOMETRY_COLLECTION_TYPE_CODE); } + + @Override + public Geoshape readNonNullableGeoshapeValue(final Buffer buffer, final GraphBinaryReader context) throws IOException { + final int nrShapes = buffer.readInt(); + final ShapeFactory.MultiShapeBuilder geometryCollectionBuilder = Geoshape.getGeometryCollectionBuilder(); + for (int i = 0; i < nrShapes; i++) { + final Geoshape shape = context.readValue(buffer, Geoshape.class, true); + geometryCollectionBuilder.add(shape.getShape()); + } + return Geoshape.geoshape(geometryCollectionBuilder.build()); + } + + @Override + public void writeNonNullableGeoshapeValue(final Geoshape geoshape, final Buffer buffer, final GraphBinaryWriter context) throws IOException { + final ShapeCollection shapeCollection = (ShapeCollection) geoshape.getShape(); + final List shapes = shapeCollection.getShapes(); + buffer.writeInt(shapes.size()); + for (Shape shape : shapes) { + final Geoshape geoshapeMember = Geoshape.geoshape(shape); + context.writeValue(geoshapeMember, buffer, true); + } + } +} diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/GeoshapeTypeSerializer.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/GeoshapeTypeSerializer.java new file mode 100644 index 0000000000..2e973a9bd4 --- /dev/null +++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/GeoshapeTypeSerializer.java @@ -0,0 +1,40 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph.graphdb.tinkerpop.io.binary.geoshape; + +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; +import org.janusgraph.core.attribute.Geoshape; + +import java.io.IOException; + +public abstract class GeoshapeTypeSerializer { + + private final int geoshapeTypeCode; + + protected GeoshapeTypeSerializer(final int geoshapeTypeCode) { + this.geoshapeTypeCode = geoshapeTypeCode; + } + + public abstract Geoshape readNonNullableGeoshapeValue(final Buffer buffer, final GraphBinaryReader context) throws IOException; + + public void writeNonNullableValue(final Geoshape geoshape, final Buffer buffer, final GraphBinaryWriter context) throws IOException { + buffer.writeInt(geoshapeTypeCode); + writeNonNullableGeoshapeValue(geoshape, buffer, context); + } + + public abstract void writeNonNullableGeoshapeValue(final Geoshape geoshape, final Buffer buffer, final GraphBinaryWriter context) throws IOException; +} diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/LineSerializer.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/LineSerializer.java new file mode 100644 index 0000000000..025a98c310 --- /dev/null +++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/LineSerializer.java @@ -0,0 +1,40 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph.graphdb.tinkerpop.io.binary.geoshape; + +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; +import org.janusgraph.core.attribute.Geoshape; +import org.janusgraph.graphdb.tinkerpop.io.binary.GeoshapeGraphBinaryConstants; + +import java.util.List; + +public class LineSerializer extends GeoshapeTypeSerializer { + private static final PointCollectionSerializer pointCollectionSerializer = new PointCollectionSerializer(); + + public LineSerializer() { super(GeoshapeGraphBinaryConstants.GEOSHAPE_LINE_TYPE_CODE); } + + @Override + public Geoshape readNonNullableGeoshapeValue(final Buffer buffer, final GraphBinaryReader context) { + final List points = pointCollectionSerializer.readPoints(buffer); + return Geoshape.line(points); + } + + @Override + public void writeNonNullableGeoshapeValue(final Geoshape geoshape, final Buffer buffer, final GraphBinaryWriter context) { + pointCollectionSerializer.writePointCollectionGeoshape(geoshape, buffer); + } +} diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/MultiLineSerializer.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/MultiLineSerializer.java new file mode 100644 index 0000000000..d0d4050531 --- /dev/null +++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/MultiLineSerializer.java @@ -0,0 +1,62 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph.graphdb.tinkerpop.io.binary.geoshape; + +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; +import org.janusgraph.core.attribute.Geoshape; +import org.janusgraph.graphdb.tinkerpop.io.binary.GeoshapeGraphBinaryConstants; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.ShapeFactory; +import org.locationtech.spatial4j.shape.jts.JtsGeometry; + +import java.util.List; + +public class MultiLineSerializer extends GeoshapeTypeSerializer { + private static final PointCollectionSerializer pointCollectionSerializer = new PointCollectionSerializer(); + + + public MultiLineSerializer() { super(GeoshapeGraphBinaryConstants.GEOSHAPE_MULTI_LINE_TYPE_CODE); } + + @Override + public Geoshape readNonNullableGeoshapeValue(final Buffer buffer, final GraphBinaryReader context) { + final int nrLines = buffer.readInt(); + final ShapeFactory.MultiLineStringBuilder multiLineStringBuilder = Geoshape.getShapeFactory().multiLineString(); + for (int i = 0; i < nrLines; i++) { + final List linePoints = pointCollectionSerializer.readPoints(buffer); + final ShapeFactory.LineStringBuilder line = Geoshape.getShapeFactory().lineString(); + for (double[] point : linePoints) { + line.pointXY(point[0], point[1]); + } + multiLineStringBuilder.add(line); + } + return Geoshape.geoshape(multiLineStringBuilder.build()); + } + + @Override + public void writeNonNullableGeoshapeValue(final Geoshape geoshape, final Buffer buffer, final GraphBinaryWriter context) { + final Shape shape = geoshape.getShape(); + final Geometry geom = ((JtsGeometry) shape).getGeom(); + + final int nrLines = geom.getNumGeometries(); + buffer.writeInt(nrLines); + for (int i = 0; i < nrLines; i++) { + final Geometry line = geom.getGeometryN(i); + pointCollectionSerializer.writePoints(line.getCoordinates(), buffer); + } + } +} diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/MultiPointSerializer.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/MultiPointSerializer.java new file mode 100644 index 0000000000..b4f3367a6c --- /dev/null +++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/MultiPointSerializer.java @@ -0,0 +1,45 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph.graphdb.tinkerpop.io.binary.geoshape; + +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; +import org.janusgraph.core.attribute.Geoshape; +import org.janusgraph.graphdb.tinkerpop.io.binary.GeoshapeGraphBinaryConstants; +import org.locationtech.spatial4j.shape.ShapeFactory; + +import java.util.List; + +public class MultiPointSerializer extends GeoshapeTypeSerializer { + private static final PointCollectionSerializer pointCollectionSerializer = new PointCollectionSerializer(); + + public MultiPointSerializer() { super(GeoshapeGraphBinaryConstants.GEOSHAPE_MULTI_POINT_TYPE_CODE); } + + @Override + public Geoshape readNonNullableGeoshapeValue(final Buffer buffer, final GraphBinaryReader context) { + final List points = pointCollectionSerializer.readPoints(buffer); + final ShapeFactory.MultiPointBuilder multiPointBuilder = Geoshape.getShapeFactory().multiPoint(); + for (double[] xy : points) { + multiPointBuilder.pointXY(xy[0], xy[1]); + } + return Geoshape.geoshape(multiPointBuilder.build()); + } + + @Override + public void writeNonNullableGeoshapeValue(final Geoshape geoshape, final Buffer buffer, final GraphBinaryWriter context) { + pointCollectionSerializer.writePointCollectionGeoshape(geoshape, buffer); + } +} diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/MultiPolygonSerializer.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/MultiPolygonSerializer.java new file mode 100644 index 0000000000..941cc5be26 --- /dev/null +++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/MultiPolygonSerializer.java @@ -0,0 +1,61 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph.graphdb.tinkerpop.io.binary.geoshape; + +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; +import org.janusgraph.core.attribute.Geoshape; +import org.janusgraph.graphdb.tinkerpop.io.binary.GeoshapeGraphBinaryConstants; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.ShapeFactory; +import org.locationtech.spatial4j.shape.jts.JtsGeometry; + +import java.util.List; + +public class MultiPolygonSerializer extends GeoshapeTypeSerializer { + private static final PointCollectionSerializer pointCollectionSerializer = new PointCollectionSerializer(); + + public MultiPolygonSerializer() { super(GeoshapeGraphBinaryConstants.GEOSHAPE_MULTI_POLYGON_TYPE_CODE); } + + @Override + public Geoshape readNonNullableGeoshapeValue(final Buffer buffer, final GraphBinaryReader context) { + final int nrPolygons = buffer.readInt(); + final ShapeFactory.MultiPolygonBuilder multiPolygonBuilder = Geoshape.getShapeFactory().multiPolygon(); + for (int i = 0; i < nrPolygons; i++) { + final List polygonPoints = pointCollectionSerializer.readPoints(buffer); + final ShapeFactory.PolygonBuilder polygon = Geoshape.getShapeFactory().polygon(); + for (double[] point : polygonPoints) { + polygon.pointXY(point[0], point[1]); + } + multiPolygonBuilder.add(polygon); + } + return Geoshape.geoshape(multiPolygonBuilder.build()); + } + + @Override + public void writeNonNullableGeoshapeValue(final Geoshape geoshape, final Buffer buffer, final GraphBinaryWriter context) { + final Shape shape = geoshape.getShape(); + final Geometry geom = ((JtsGeometry) shape).getGeom(); + + final int nrPolygons = geom.getNumGeometries(); + buffer.writeInt(nrPolygons); + for (int i = 0; i < nrPolygons; i++) { + final Geometry polygon = geom.getGeometryN(i); + pointCollectionSerializer.writePoints(polygon.getCoordinates(), buffer); + } + } +} diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/PointCollectionSerializer.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/PointCollectionSerializer.java new file mode 100644 index 0000000000..0c83dce767 --- /dev/null +++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/PointCollectionSerializer.java @@ -0,0 +1,53 @@ +// Copyright 2020 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph.graphdb.tinkerpop.io.binary.geoshape; + +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.janusgraph.core.attribute.Geoshape; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.jts.JtsGeometry; + +import java.util.ArrayList; +import java.util.List; + +class PointCollectionSerializer { + + public List readPoints(final Buffer buffer) { + final int length = buffer.readInt(); + final ArrayList points = new ArrayList(length); + for (int i = 0; i < length; i++) { + final double y = buffer.readDouble(); + final double x = buffer.readDouble(); + points.add(new double[] {x, y}); + } + return points; + } + + public void writePointCollectionGeoshape(final Geoshape geoshape, final Buffer buffer) { + final Shape shape = geoshape.getShape(); + final Geometry geom = ((JtsGeometry) shape).getGeom(); + writePoints(geom.getCoordinates(), buffer); + } + + public void writePoints(final Coordinate[] points, final Buffer buffer) { + buffer.writeInt(points.length); + for (Coordinate coordinate : points) { + buffer.writeDouble(coordinate.getY()); + buffer.writeDouble(coordinate.getX()); + } + } +} diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/PointSerializer.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/PointSerializer.java new file mode 100644 index 0000000000..dd38e36566 --- /dev/null +++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/PointSerializer.java @@ -0,0 +1,40 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph.graphdb.tinkerpop.io.binary.geoshape; + +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; +import org.janusgraph.core.attribute.Geoshape; +import org.janusgraph.graphdb.tinkerpop.io.binary.GeoshapeGraphBinaryConstants; + +public class PointSerializer extends GeoshapeTypeSerializer { + + public PointSerializer() { super(GeoshapeGraphBinaryConstants.GEOSHAPE_POINT_TYPE_CODE); } + + @Override + public Geoshape readNonNullableGeoshapeValue(final Buffer buffer, final GraphBinaryReader context) { + final double latitude = buffer.readDouble(); + final double longitude = buffer.readDouble(); + return Geoshape.point(latitude, longitude); + } + + @Override + public void writeNonNullableGeoshapeValue(final Geoshape geoshape, final Buffer buffer, final GraphBinaryWriter context) { + final Geoshape.Point point = geoshape.getPoint(); + buffer.writeDouble(point.getLatitude()); + buffer.writeDouble(point.getLongitude()); + } +} diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/PolygonSerializer.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/PolygonSerializer.java new file mode 100644 index 0000000000..c704501508 --- /dev/null +++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/binary/geoshape/PolygonSerializer.java @@ -0,0 +1,40 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph.graphdb.tinkerpop.io.binary.geoshape; + +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; +import org.janusgraph.core.attribute.Geoshape; +import org.janusgraph.graphdb.tinkerpop.io.binary.GeoshapeGraphBinaryConstants; + +import java.util.List; + +public class PolygonSerializer extends GeoshapeTypeSerializer { + private static final PointCollectionSerializer pointCollectionSerializer = new PointCollectionSerializer(); + + public PolygonSerializer() { super(GeoshapeGraphBinaryConstants.GEOSHAPE_POLYGON_TYPE_CODE); } + + @Override + public Geoshape readNonNullableGeoshapeValue(final Buffer buffer, final GraphBinaryReader context) { + final List points = pointCollectionSerializer.readPoints(buffer); + return Geoshape.polygon(points); + } + + @Override + public void writeNonNullableGeoshapeValue(final Geoshape geoshape, final Buffer buffer, final GraphBinaryWriter context) { + pointCollectionSerializer.writePointCollectionGeoshape(geoshape, buffer); + } +} diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/graphson/JanusGraphSONModule.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/graphson/JanusGraphSONModule.java index f2939309f9..50db8168be 100644 --- a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/graphson/JanusGraphSONModule.java +++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/graphson/JanusGraphSONModule.java @@ -14,7 +14,6 @@ package org.janusgraph.graphdb.tinkerpop.io.graphson; -import org.apache.tinkerpop.gremlin.process.traversal.P; import org.apache.tinkerpop.gremlin.structure.io.graphson.AbstractObjectDeserializer; import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONTokens; import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONUtil; @@ -30,7 +29,6 @@ import org.apache.tinkerpop.shaded.jackson.databind.ser.std.StdSerializer; import org.janusgraph.core.attribute.Geoshape; import org.janusgraph.graphdb.relations.RelationIdentifier; -import org.janusgraph.graphdb.tinkerpop.DeprecatedJanusGraphPSerializer; import org.janusgraph.graphdb.tinkerpop.JanusGraphPSerializer; import org.janusgraph.graphdb.tinkerpop.io.JanusGraphP; @@ -195,38 +193,4 @@ public boolean isCachable() { } } - @Deprecated - public static class DeprecatedJanusGraphPDeserializerV2d0 extends StdDeserializer

{ - - public DeprecatedJanusGraphPDeserializerV2d0() { - super(P.class); - } - - @Override - public P deserialize(final JsonParser jsonParser, final DeserializationContext deserializationContext) throws IOException { - String predicate = null; - Object value = null; - - while (jsonParser.nextToken() != JsonToken.END_OBJECT) { - if (jsonParser.getCurrentName().equals(GraphSONTokens.PREDICATE)) { - jsonParser.nextToken(); - predicate = jsonParser.getText(); - } else if (jsonParser.getCurrentName().equals(GraphSONTokens.VALUE)) { - jsonParser.nextToken(); - value = deserializationContext.readValue(jsonParser, Object.class); - } - } - - try { - return DeprecatedJanusGraphPSerializer.createPredicateWithValue(predicate, value); - } catch (final Exception e) { - throw new IllegalStateException(e.getMessage(), e); - } - } - - @Override - public boolean isCachable() { - return true; - } - } } diff --git a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/graphson/JanusGraphSONModuleV2d0.java b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/graphson/JanusGraphSONModuleV2d0.java index e3741464a4..6104bcd7e7 100644 --- a/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/graphson/JanusGraphSONModuleV2d0.java +++ b/janusgraph-driver/src/main/java/org/janusgraph/graphdb/tinkerpop/io/graphson/JanusGraphSONModuleV2d0.java @@ -14,7 +14,6 @@ package org.janusgraph.graphdb.tinkerpop.io.graphson; -import org.apache.tinkerpop.gremlin.process.traversal.P; import org.janusgraph.core.attribute.Geoshape; import org.janusgraph.graphdb.relations.RelationIdentifier; import org.janusgraph.graphdb.tinkerpop.io.JanusGraphP; @@ -33,8 +32,6 @@ private JanusGraphSONModuleV2d0() { addDeserializer(RelationIdentifier.class, new RelationIdentifierDeserializerV2d0()); addDeserializer(Geoshape.class, new Geoshape.GeoshapeGsonDeserializerV2d0()); addDeserializer(JanusGraphP.class, new JanusGraphPDeserializerV2d0()); - //fallback for older janusgraph drivers - addDeserializer(P.class, new DeprecatedJanusGraphPDeserializerV2d0()); } private static final JanusGraphSONModuleV2d0 INSTANCE = new JanusGraphSONModuleV2d0(); diff --git a/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistryTest.java b/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistryTest.java index d8c4aea7ff..6f68034c87 100644 --- a/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistryTest.java +++ b/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/JanusGraphIoRegistryTest.java @@ -28,11 +28,6 @@ import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; import org.apache.tinkerpop.gremlin.structure.Graph; -import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONMapper; -import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONReader; -import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONVersion; -import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONWriter; -import org.apache.tinkerpop.gremlin.structure.io.graphson.TypeInfo; import org.apache.tinkerpop.gremlin.structure.io.gryo.GryoMapper; import org.apache.tinkerpop.gremlin.structure.io.gryo.GryoMapper.Builder; import org.apache.tinkerpop.gremlin.structure.util.empty.EmptyGraph; @@ -100,7 +95,6 @@ public void testTinkerPopPredicatesAsGryo() throws SerializationException { @Test public void testJanusGraphPredicatesAsGryo() throws SerializationException { - Graph graph = EmptyGraph.instance(); GraphTraversalSource g = graph.traversal(); @@ -185,54 +179,4 @@ public void testTokenIoRegistyInConfig() throws SerializationException { Bytecode result = (Bytecode)requestMessage1.getArgs().get(Tokens.ARGS_GREMLIN); assertEquals(expectedBytecode, result); } - - @Test - public void testOldFormatJanusGraphPredicatesAsGryo() throws SerializationException { - Graph graph = EmptyGraph.instance(); - GraphTraversalSource g = graph.traversal(); - - Bytecode oldBytecode = serializeByteCodeAfterDeserializeAsGryo(g.V().has("name", new P<>(Text.CONTAINS, "test"))); - Bytecode newBytecode = serializeByteCodeAfterDeserializeAsGryo(g.V().has("name", Text.textContains("test"))); - - assertEquals(newBytecode, oldBytecode); - } - - private Bytecode serializeByteCodeAfterDeserializeAsGryo(GraphTraversal traversal) throws SerializationException { - Builder mapper = GryoMapper.build().addRegistry(JanusGraphIoRegistry.instance()); - MessageSerializer binarySerializer = new GryoMessageSerializerV1d0(mapper); - Bytecode expectedBytecode = traversal.asAdmin().getBytecode(); - RequestMessage requestMessage = RequestMessage.build(Tokens.OPS_BYTECODE).processor("traversal") - .addArg(Tokens.ARGS_GREMLIN, expectedBytecode).create(); - - ByteBuf bb = binarySerializer.serializeRequestAsBinary(requestMessage, allocator); - final int mimeLen = bb.readByte(); - bb.readBytes(new byte[mimeLen]); - RequestMessage deser = binarySerializer.deserializeRequest(bb); - return (Bytecode) deser.getArgs().get(Tokens.ARGS_GREMLIN); - } - - @Test - public void testOldFormatJanusGraphPredicatesAsGraphSON() throws Exception { - Graph graph = EmptyGraph.instance(); - GraphTraversalSource g = graph.traversal(); - - Bytecode oldBytecode = serializeByteCodeAfterDeserializeAsGraphSON(g.V().has("name", new P<>(Text.CONTAINS, "test"))); - Bytecode newBytecode = serializeByteCodeAfterDeserializeAsGraphSON(g.V().has("name", Text.textContains("test"))); - - assertEquals(newBytecode, oldBytecode); - } - - private Bytecode serializeByteCodeAfterDeserializeAsGraphSON(GraphTraversal traversal) throws Exception { - final GraphSONMapper mapper = GraphSONMapper.build().version(GraphSONVersion.V3_0) - .typeInfo(TypeInfo.PARTIAL_TYPES).addRegistry(JanusGraphIoRegistry.instance()).create(); - final GraphSONWriter writer = GraphSONWriter.build().mapper(mapper).create(); - final GraphSONReader reader = GraphSONReader.build().mapper(mapper).create(); - Bytecode expectedBytecode = traversal.asAdmin().getBytecode(); - ByteArrayOutputStream serializationStream = new ByteArrayOutputStream(); - writer.writeObject(serializationStream, expectedBytecode); - - ByteArrayInputStream inputStream = new ByteArrayInputStream(serializationStream.toByteArray()); - - return reader.readObject(inputStream, Bytecode.class); - } } diff --git a/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/JanusGraphSerializerBaseIT.java b/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/JanusGraphSerializerBaseIT.java index a7b774e585..cc0f2eb481 100644 --- a/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/JanusGraphSerializerBaseIT.java +++ b/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/JanusGraphSerializerBaseIT.java @@ -21,7 +21,6 @@ import org.janusgraph.core.attribute.Geoshape; import org.janusgraph.core.attribute.Text; import org.janusgraph.graphdb.relations.RelationIdentifier; -import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInfo; import org.testcontainers.junit.jupiter.Container; @@ -92,7 +91,6 @@ public void testRelationIdentifier(TestInfo testInfo) { } @Test - @Disabled("JanusGraphPredicate serialization won't work any older version than 0.6.0.") public void testJanusGraphTextPredicates() { GraphTraversalSource g = traversal(); g.addV("predicateTestLabel").property("name", "neptune").iterate(); diff --git a/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinarySerializerTest.java b/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinarySerializerTest.java index 609923efd0..1b2d1d3853 100644 --- a/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinarySerializerTest.java +++ b/janusgraph-driver/src/test/java/org/janusgraph/graphdb/tinkerpop/io/binary/GeoshapeGraphBinarySerializerTest.java @@ -29,6 +29,7 @@ import org.junit.jupiter.params.provider.MethodSource; import java.io.IOException; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -46,19 +47,30 @@ private static Stream geoshapeProvider() { return Stream.of( Geoshape.point(37.97, 23.72), Geoshape.circle(37.97, 23.72, 10.0), - Geoshape.box(37.97, 23.72, 38.97, 24.72) + Geoshape.box(37.97, 23.72, 38.97, 24.72), + Geoshape.line(Arrays.asList(new double[] {37.97, 23.72}, new double[] {38.97, 24.72})), + Geoshape.polygon(Arrays.asList(new double[] {119.0, 59.0}, new double[] {121.0, 59.0}, new double[] {121.0, 61.0}, new double[] {119.0, 61.0}, new double[] {119.0, 59.0})), + //MultiPoint + Geoshape.geoshape(Geoshape.getShapeFactory().multiPoint().pointXY(60.0, 60.0).pointXY(120.0, 60.0).build()), + //MultiLine + Geoshape.geoshape(Geoshape.getShapeFactory().multiLineString() + .add(Geoshape.getShapeFactory().lineString().pointXY(59.0, 60.0).pointXY(61.0, 60.0)) + .add(Geoshape.getShapeFactory().lineString().pointXY(119.0, 60.0).pointXY(121.0, 60.0)).build()), + //MultiPolygon + Geoshape.geoshape(Geoshape.getShapeFactory().multiPolygon() + .add(Geoshape.getShapeFactory().polygon().pointXY(59.0, 59.0).pointXY(61.0, 59.0) + .pointXY(61.0, 61.0).pointXY(59.0, 61.0).pointXY(59.0, 59.0)) + .add(Geoshape.getShapeFactory().polygon().pointXY(119.0, 59.0).pointXY(121.0, 59.0) + .pointXY(121.0, 61.0).pointXY(119.0, 61.0).pointXY(119.0, 59.0)).build()), + //GeometryCollection + Geoshape.geoshape(Geoshape.getGeometryCollectionBuilder() + .add(Geoshape.getShapeFactory().pointXY(60.0, 60.0)) + .add(Geoshape.getShapeFactory().lineString().pointXY(119.0, 60.0).pointXY(121.0, 60.0).build()) + .add(Geoshape.getShapeFactory().polygon().pointXY(119.0, 59.0).pointXY(121.0, 59.0) + .pointXY(121.0, 61.0).pointXY(119.0, 61.0).pointXY(119.0, 59.0).build()).build()) ); } - @ParameterizedTest - @MethodSource("geoshapeProvider") - public void shouldCustomSerialization(Geoshape geoshape) throws IOException { - final GraphBinaryMessageSerializerV1 serializer = new GraphBinaryMessageSerializerV1( - TypeSerializerRegistry.build().addCustomType(Geoshape.class, new GeoshapeGraphBinarySerializer()).create()); - - assertGeoshape(serializer, geoshape); - } - @ParameterizedTest @MethodSource("geoshapeProvider") public void shouldSerializeViaIoRegistry(Geoshape geoshape) throws IOException { @@ -67,7 +79,7 @@ public void shouldSerializeViaIoRegistry(Geoshape geoshape) throws IOException { config.put(TOKEN_IO_REGISTRIES, Collections.singletonList(JanusGraphIoRegistry.class.getName())); serializer.configure(config, Collections.emptyMap()); - assertGeoshape(serializer, geoshape); + assertSymmetricGeoshapeSerializationInResponseMessage(serializer, geoshape); } @ParameterizedTest @@ -88,7 +100,7 @@ public void readValueAndWriteValueShouldBeSymmetric(Geoshape geoshape) throws IO } } - private void assertGeoshape(final GraphBinaryMessageSerializerV1 serializer, final Geoshape geoshape) throws IOException { + private void assertSymmetricGeoshapeSerializationInResponseMessage(final GraphBinaryMessageSerializerV1 serializer, final Geoshape geoshape) throws IOException { final ByteBuf serialized = serializer.serializeResponseAsBinary( ResponseMessage.build(UUID.randomUUID()).result(geoshape).create(), allocator); diff --git a/janusgraph-driver/src/test/resources/log4j.properties b/janusgraph-driver/src/test/resources/log4j.properties deleted file mode 100644 index d5467091fe..0000000000 --- a/janusgraph-driver/src/test/resources/log4j.properties +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A1 is set to be a FileAppender. -#log4j.appender.A1=org.apache.log4j.ConsoleAppender -log4j.appender.A1=org.apache.log4j.FileAppender -log4j.appender.A1.File=target/test.log - -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n - -# Set root logger level to the designated level and its only appender to A1. -log4j.rootLogger=DEBUG, A1 diff --git a/janusgraph-driver/src/test/resources/log4j2-test.xml b/janusgraph-driver/src/test/resources/log4j2-test.xml new file mode 100644 index 0000000000..8f0c992a9f --- /dev/null +++ b/janusgraph-driver/src/test/resources/log4j2-test.xml @@ -0,0 +1,15 @@ + + + + + + %d{HH:mm:ss} %-5level %class.%method{36} - %msg%n + + + + + + + + + \ No newline at end of file diff --git a/janusgraph-es/pom.xml b/janusgraph-es/pom.xml index 4b304139fb..b451574a0a 100644 --- a/janusgraph-es/pom.xml +++ b/janusgraph-es/pom.xml @@ -3,7 +3,7 @@ org.janusgraph janusgraph - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT ../pom.xml janusgraph-es @@ -70,13 +70,16 @@ - org.slf4j - slf4j-log4j12 + org.apache.logging.log4j + log4j-slf4j-impl + runtime true ch.qos.logback logback-classic + runtime + true diff --git a/janusgraph-es/src/test/resources/log4j.properties b/janusgraph-es/src/test/resources/log4j.properties deleted file mode 100644 index b8f0e78f14..0000000000 --- a/janusgraph-es/src/test/resources/log4j.properties +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A1 is set to be a FileAppender. -log4j.appender.A1=org.apache.log4j.FileAppender -log4j.appender.A1.File=target/test.log -log4j.appender.A1.Threshold=ALL -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c{2}: %m%n - -# A2 is a ConsoleAppender. -log4j.appender.A2=org.apache.log4j.ConsoleAppender -log4j.appender.A2.Threshold=ALL -# A2 uses PatternLayout. -log4j.appender.A2.layout=org.apache.log4j.PatternLayout -log4j.appender.A2.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c{2}: %m%n - -# Set both appenders (A1 and A2) on the root logger. -#log4j.rootLogger=INFO, A1, A2 -log4j.rootLogger=ERROR, A1 - diff --git a/janusgraph-es/src/test/resources/log4j2-test.xml b/janusgraph-es/src/test/resources/log4j2-test.xml new file mode 100644 index 0000000000..8f0c992a9f --- /dev/null +++ b/janusgraph-es/src/test/resources/log4j2-test.xml @@ -0,0 +1,15 @@ + + + + + + %d{HH:mm:ss} %-5level %class.%method{36} - %msg%n + + + + + + + + + \ No newline at end of file diff --git a/janusgraph-examples/example-berkeleyje/pom.xml b/janusgraph-examples/example-berkeleyje/pom.xml index a8f26b4c98..daad784cf9 100644 --- a/janusgraph-examples/example-berkeleyje/pom.xml +++ b/janusgraph-examples/example-berkeleyje/pom.xml @@ -3,7 +3,7 @@ org.janusgraph janusgraph-examples - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT ../pom.xml example-berkeleyje diff --git a/janusgraph-examples/example-common/pom.xml b/janusgraph-examples/example-common/pom.xml index 9cd0ce5413..a6d7f3fa93 100644 --- a/janusgraph-examples/example-common/pom.xml +++ b/janusgraph-examples/example-common/pom.xml @@ -3,7 +3,7 @@ org.janusgraph janusgraph-examples - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT ../pom.xml example-common diff --git a/janusgraph-examples/example-common/src/test/resources/log4j.properties b/janusgraph-examples/example-common/src/test/resources/log4j.properties deleted file mode 100644 index 5778efa98a..0000000000 --- a/janusgraph-examples/example-common/src/test/resources/log4j.properties +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A1 is set to be a ConsoleAppender. -log4j.appender.A1=org.apache.log4j.ConsoleAppender - -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n - -# Set root logger level to the designated level and its only appender to A1. -log4j.rootLogger=INFO, A1 diff --git a/janusgraph-examples/example-common/src/test/resources/log4j2.xml b/janusgraph-examples/example-common/src/test/resources/log4j2.xml new file mode 100644 index 0000000000..384ab8ded5 --- /dev/null +++ b/janusgraph-examples/example-common/src/test/resources/log4j2.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/janusgraph-examples/example-cql/README.md b/janusgraph-examples/example-cql/README.md index 4efa87678a..d2805e2ec6 100644 --- a/janusgraph-examples/example-cql/README.md +++ b/janusgraph-examples/example-cql/README.md @@ -23,12 +23,6 @@ graphs on the same Cassandra and Elasticsearch servers. Refer to the JanusGraph [configuration reference](https://docs.janusgraph.org/basics/configuration-reference/) for additional properties. -* [`logback.xml`](conf/logback.xml) configures logging with [Logback](https://logback.qos.ch/), -which is the logger used by Cassandra. The example configuration logs to the -console and adjusts the logging level for some noisier packages. Refer to -the Logback [manual](https://logback.qos.ch/manual/index.html) for additional -details. - ### Cassandra configuration The JanusGraph properties file assumes that Cassandra is installed on localhost diff --git a/janusgraph-examples/example-cql/pom.xml b/janusgraph-examples/example-cql/pom.xml index 863dff7bc3..94c9c17c45 100644 --- a/janusgraph-examples/example-cql/pom.xml +++ b/janusgraph-examples/example-cql/pom.xml @@ -3,7 +3,7 @@ org.janusgraph janusgraph-examples - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT ../pom.xml example-cql @@ -36,7 +36,6 @@ org.janusgraph.example.JanusGraphApp ${project.basedir}/conf/jgex-cql.properties - ${project.basedir}/conf/logback.xml diff --git a/janusgraph-examples/example-hbase/README.md b/janusgraph-examples/example-hbase/README.md index 9228b13a23..da7f8de740 100644 --- a/janusgraph-examples/example-hbase/README.md +++ b/janusgraph-examples/example-hbase/README.md @@ -30,11 +30,6 @@ you can store multiple graphs on the same HBase and Solr servers. Refer to the JanusGraph [configuration reference](https://docs.janusgraph.org/basics/configuration-reference/) for additional properties. -* [`logback.xml`](conf/logback.xml) configures logging with [Logback](https://logback.qos.ch/). -The example configuration logs to the console and adjusts the logging level -for some noisier packages. Refer to the Logback [manual](https://logback.qos.ch/manual/index.html) -for additional details. - ### HBase configuration The JanusGraph properties file assumes that HBase is installed on localhost @@ -63,12 +58,6 @@ The required Maven dependencies for HBase: ${janusgraph.version} runtime - - org.apache.hbase - hbase-shaded-client - ${hbase1.version} - runtime - ``` The required Maven dependency for Solr: diff --git a/janusgraph-examples/example-hbase/pom.xml b/janusgraph-examples/example-hbase/pom.xml index 090de61f90..0e741c3630 100644 --- a/janusgraph-examples/example-hbase/pom.xml +++ b/janusgraph-examples/example-hbase/pom.xml @@ -3,7 +3,7 @@ org.janusgraph janusgraph-examples - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT ../pom.xml example-hbase @@ -25,12 +25,6 @@ ${project.version} runtime - - org.apache.hbase - hbase-shaded-client - ${hbase1.version} - runtime - org.janusgraph janusgraph-solr @@ -42,7 +36,6 @@ org.janusgraph.example.JanusGraphApp ${project.basedir}/conf/jgex-hbase-solr-cloud.properties - ${project.basedir}/conf/logback.xml diff --git a/janusgraph-examples/example-remotegraph/pom.xml b/janusgraph-examples/example-remotegraph/pom.xml index 48d62c6e9f..7e7d880fb0 100644 --- a/janusgraph-examples/example-remotegraph/pom.xml +++ b/janusgraph-examples/example-remotegraph/pom.xml @@ -3,7 +3,7 @@ org.janusgraph janusgraph-examples - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT ../pom.xml example-remotegraph diff --git a/janusgraph-examples/example-tinkergraph/pom.xml b/janusgraph-examples/example-tinkergraph/pom.xml index b0aeca518c..c13c85d239 100644 --- a/janusgraph-examples/example-tinkergraph/pom.xml +++ b/janusgraph-examples/example-tinkergraph/pom.xml @@ -3,7 +3,7 @@ org.janusgraph janusgraph-examples - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT ../pom.xml example-tinkergraph diff --git a/janusgraph-examples/example-tinkergraph/src/test/resources/log4j.properties b/janusgraph-examples/example-tinkergraph/src/test/resources/log4j.properties deleted file mode 100644 index 5778efa98a..0000000000 --- a/janusgraph-examples/example-tinkergraph/src/test/resources/log4j.properties +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A1 is set to be a ConsoleAppender. -log4j.appender.A1=org.apache.log4j.ConsoleAppender - -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n - -# Set root logger level to the designated level and its only appender to A1. -log4j.rootLogger=INFO, A1 diff --git a/janusgraph-examples/example-tinkergraph/src/test/resources/log4j2.xml b/janusgraph-examples/example-tinkergraph/src/test/resources/log4j2.xml new file mode 100644 index 0000000000..384ab8ded5 --- /dev/null +++ b/janusgraph-examples/example-tinkergraph/src/test/resources/log4j2.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/janusgraph-examples/pom.xml b/janusgraph-examples/pom.xml index 8962273df8..b1af0850e9 100644 --- a/janusgraph-examples/pom.xml +++ b/janusgraph-examples/pom.xml @@ -3,7 +3,7 @@ org.janusgraph janusgraph - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT ../pom.xml janusgraph-examples @@ -49,12 +49,6 @@ ${example.config} ${cmd} - - - logback.configurationFile - ${logback.config} - - diff --git a/janusgraph-grpc/pom.xml b/janusgraph-grpc/pom.xml index 15b3dde432..aed1707c35 100644 --- a/janusgraph-grpc/pom.xml +++ b/janusgraph-grpc/pom.xml @@ -4,7 +4,7 @@ org.janusgraph janusgraph - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT janusgraph-grpc JanusGraph-gRPC: gRPC Components for JanusGraph diff --git a/janusgraph-grpc/src/main/proto/janusgraph/types/v1/schema_types.proto b/janusgraph-grpc/src/main/proto/janusgraph/types/v1/schema_types.proto index 5e33911aca..45b282def8 100644 --- a/janusgraph-grpc/src/main/proto/janusgraph/types/v1/schema_types.proto +++ b/janusgraph-grpc/src/main/proto/janusgraph/types/v1/schema_types.proto @@ -21,6 +21,56 @@ import "google/protobuf/wrappers.proto"; option java_multiple_files = true; option java_package = "org.janusgraph.graphdb.grpc.types"; +enum PropertyDataType { + PROPERTY_DATA_TYPE_UNSPECIFIED = 0; + // java class: String.class + PROPERTY_DATA_TYPE_STRING = 1; + // java class: Character.class + PROPERTY_DATA_TYPE_CHARACTER = 2; + // java class: Boolean.class + PROPERTY_DATA_TYPE_BOOLEAN = 3; + // java class: Byte.class + PROPERTY_DATA_TYPE_INT8 = 4; + // java class: Short.class + PROPERTY_DATA_TYPE_INT16 = 5; + // java class: Integer.class + PROPERTY_DATA_TYPE_INT32 = 6; + // java class: Long.class + PROPERTY_DATA_TYPE_INT64 = 7; + // java class: Float.class + PROPERTY_DATA_TYPE_FLOAT32 = 8; + // java class: Double.class + PROPERTY_DATA_TYPE_FLOAT64 = 9; + // java class: Date.class + PROPERTY_DATA_TYPE_DATE = 10; + // java class: Geoshape.class + PROPERTY_DATA_TYPE_GEO_SHAPE = 11; + // java class: UUID.class + PROPERTY_DATA_TYPE_UUID = 12; + // java class: Object.class + PROPERTY_DATA_TYPE_JAVA_OBJECT = 13; +} + +message VertexProperty { + // The Internal id. + google.protobuf.Int64Value id = 1; + // The VertexProperty Name. + string name = 2; + // The Data type. + PropertyDataType data_type = 3; + enum Cardinality { + CARDINALITY_UNSPECIFIED = 0; + // Vertex can have the property just once. + CARDINALITY_SINGLE = 1; + // Vertex can have multiple properties of the same name. + CARDINALITY_LIST = 2; + // Vertex can have multiple properties of the same with uniqueness constraint for the value. + CARDINALITY_SET = 3; + } + // Defines cardinality. + Cardinality cardinality = 4; +} + message VertexLabel { // The Internal id. google.protobuf.Int64Value id = 1; @@ -30,6 +80,8 @@ message VertexLabel { bool read_only = 3; // Marking as partitioned, default not partitioned. bool partitioned = 4; + // Contains all constrained properties at the moment. + repeated VertexProperty properties = 5; } message EdgeLabel { @@ -38,27 +90,29 @@ message EdgeLabel { // The EdgeLabel Name. string name = 2; enum Direction { - // Edge can be queried from both vertices. - BOTH = 0; + DIRECTION_UNSPECIFIED = 0; + // Edge can be queried from both vertices (default). + DIRECTION_BOTH = 1; // Edge can be only queried from the outgoing vertex. // Direction OUT should be only used by experts, see https://docs.janusgraph.org/schema/advschema/#unidirected-edges. - OUT = 1; + DIRECTION_OUT = 2; } // Defines queryable direction. Direction direction = 3; enum Multiplicity { + MULTIPLICITY_UNSPECIFIED = 0; // The given edge label specifies a multi-graph, meaning that the multiplicity is not constrained and that // there may be multiple edges of this label between any given pair of vertices. - MULTI = 0; + MULTIPLICITY_MULTI = 1; // The given edge label specifies a simple graph, meaning that the multiplicity is not constrained but that there // can only be at most a single edge of this label between a given pair of vertices. - SIMPLE = 1; + MULTIPLICITY_SIMPLE = 2; // There can only be a single in-edge of this label for a given vertex but multiple out-edges (i.e. in-unique). - ONE2MANY = 2; + MULTIPLICITY_ONE2MANY = 3; // There can only be a single out-edge of this label for a given vertex but multiple in-edges (i.e. out-unique). - MANY2ONE = 3; + MULTIPLICITY_MANY2ONE = 4; // There can be only a single in and out-edge of this label for a given vertex (i.e. unique in both directions). - ONE2ONE = 4; + MULTIPLICITY_ONE2ONE = 5; } // Defines Multiplicity of a edge. Multiplicity multiplicity = 4; diff --git a/janusgraph-hadoop/pom.xml b/janusgraph-hadoop/pom.xml index e7731039d2..10cbe53cfc 100644 --- a/janusgraph-hadoop/pom.xml +++ b/janusgraph-hadoop/pom.xml @@ -3,7 +3,7 @@ org.janusgraph janusgraph - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT ../pom.xml janusgraph-hadoop diff --git a/janusgraph-hadoop/src/assembly/shared-resources/log4j.properties b/janusgraph-hadoop/src/assembly/shared-resources/log4j.properties deleted file mode 100644 index 11af5ec56f..0000000000 --- a/janusgraph-hadoop/src/assembly/shared-resources/log4j.properties +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A1 is a FileAppender. -log4j.appender.A1=org.apache.log4j.FileAppender -log4j.appender.A1.File=target/test.log -log4j.appender.A1.Threshold=ALL -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c{2}: %m%n - -# A2 is a ConsoleAppender. -log4j.appender.A2=org.apache.log4j.ConsoleAppender -log4j.appender.A2.Threshold=ALL -# A2 uses PatternLayout. -log4j.appender.A2.layout=org.apache.log4j.PatternLayout -log4j.appender.A2.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c{2}: %m%n - -# Set both appenders (A1 and A2) on the root logger. -#log4j.rootLogger=INFO, A1, A2 -log4j.rootLogger=ERROR, A1 - -# Restrict some of JanusGraph's dependencies to INFO and scarier. -# These restrictions are useful when reducing the severity threshold -# setting on one of the appenders below INFO. -log4j.logger.org.apache.cassandra=INFO -log4j.logger.org.apache.hadoop=INFO -log4j.logger.org.apache.zookeeper=INFO -log4j.logger.org.apache.cassandra.db.Memtable=OFF -log4j.logger.org.janusgraph.hadoop.JanusGraphIndexRepairMapper=DEBUG -log4j.logger.org.janusgraph.DaemonRunner=DEBUG -log4j.logger.org.janusgraph.diskstorage.es.ElasticsearchRunner=DEBUG -log4j.logger.org.janusgraph.diskstorage.es.ElasticsearchStatus=DEBUG -log4j.logger.org.janusgraph.hadoop.formats.util.JanusGraphVertexDeserializer=DEBUG diff --git a/janusgraph-hadoop/src/main/java/org/janusgraph/hadoop/ImmutableConfiguration.java b/janusgraph-hadoop/src/main/java/org/janusgraph/hadoop/ImmutableConfiguration.java deleted file mode 100644 index 4b7eeea938..0000000000 --- a/janusgraph-hadoop/src/main/java/org/janusgraph/hadoop/ImmutableConfiguration.java +++ /dev/null @@ -1,443 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.hadoop; - -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.io.Reader; -import java.io.Writer; -import java.net.InetSocketAddress; -import java.net.URL; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; -import java.util.regex.Pattern; - -@Deprecated -public class ImmutableConfiguration extends Configuration { - - private final Configuration encapsulated; - - public ImmutableConfiguration(Configuration encapsulated) { - this.encapsulated = encapsulated; - } - - @Deprecated - public static void addDeprecation(String key, String[] newKeys, String customMessage) { - Configuration.addDeprecation(key, newKeys, customMessage); - } - - public static void addDeprecation(String key, String newKey, String customMessage) { - Configuration.addDeprecation(key, newKey, customMessage); - } - - @Deprecated - public static void addDeprecation(String key, String[] newKeys) { - Configuration.addDeprecation(key, newKeys); - } - - public static void addDeprecation(String key, String newKey) { - Configuration.addDeprecation(key, newKey); - } - - public static boolean isDeprecated(String key) { - return Configuration.isDeprecated(key); - } - - public static void addDefaultResource(String name) { - Configuration.addDefaultResource(name); - } - - @Override - public void addResource(String name) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public void addResource(URL url) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public void addResource(Path file) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public void addResource(InputStream in) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public void addResource(InputStream in, String name) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public void reloadConfiguration() { - //throw new UnsupportedOperationException("This configuration instance is immutable"); - encapsulated.reloadConfiguration(); // allowed to simplify testing - } - - @Override - public String get(String name) { - return encapsulated.get(name); - } - - @Override - public String getTrimmed(String name) { - return encapsulated.getTrimmed(name); - } - - @Override - public String getTrimmed(String name, String defaultValue) { - return encapsulated.getTrimmed(name, defaultValue); - } - - @Override - public String getRaw(String name) { - return encapsulated.getRaw(name); - } - - @Override - public void set(String name, String value) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public void set(String name, String value, String source) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public void unset(String name) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public void setIfUnset(String name, String value) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public String get(String name, String defaultValue) { - return encapsulated.get(name, defaultValue); - } - - @Override - public int getInt(String name, int defaultValue) { - return encapsulated.getInt(name, defaultValue); - } - - @Override - public int[] getInts(String name) { - return encapsulated.getInts(name); - } - - @Override - public void setInt(String name, int value) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public long getLong(String name, long defaultValue) { - return encapsulated.getLong(name, defaultValue); - } - - @Override - public long getLongBytes(String name, long defaultValue) { - return encapsulated.getLongBytes(name, defaultValue); - } - - @Override - public void setLong(String name, long value) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public float getFloat(String name, float defaultValue) { - return encapsulated.getFloat(name, defaultValue); - } - - @Override - public void setFloat(String name, float value) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public double getDouble(String name, double defaultValue) { - return encapsulated.getDouble(name, defaultValue); - } - - @Override - public void setDouble(String name, double value) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public boolean getBoolean(String name, boolean defaultValue) { - return encapsulated.getBoolean(name, defaultValue); - } - - @Override - public void setBoolean(String name, boolean value) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public void setBooleanIfUnset(String name, boolean value) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public > void setEnum(String name, T value) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public > T getEnum(String name, T defaultValue) { - return encapsulated.getEnum(name, defaultValue); - } - - @Override - public void setTimeDuration(String name, long value, TimeUnit unit) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public long getTimeDuration(String name, long defaultValue, TimeUnit unit) { - return encapsulated.getTimeDuration(name, defaultValue, unit); - } - - @Override - public Pattern getPattern(String name, Pattern defaultValue) { - return encapsulated.getPattern(name, defaultValue); - } - - @Override - public void setPattern(String name, Pattern pattern) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - @InterfaceStability.Unstable - public String[] getPropertySources(String name) { - return encapsulated.getPropertySources(name); - } - - @Override - public IntegerRanges getRange(String name, String defaultValue) { - return encapsulated.getRange(name, defaultValue); - } - - @Override - public Collection getStringCollection(String name) { - return encapsulated.getStringCollection(name); - } - - @Override - public String[] getStrings(String name) { - return encapsulated.getStrings(name); - } - - @Override - public String[] getStrings(String name, String... defaultValue) { - return encapsulated.getStrings(name, defaultValue); - } - - @Override - public Collection getTrimmedStringCollection(String name) { - return encapsulated.getTrimmedStringCollection(name); - } - - @Override - public String[] getTrimmedStrings(String name) { - return encapsulated.getTrimmedStrings(name); - } - - @Override - public String[] getTrimmedStrings(String name, String... defaultValue) { - return encapsulated.getTrimmedStrings(name, defaultValue); - } - - @Override - public void setStrings(String name, String... values) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public InetSocketAddress getSocketAddr(String name, String defaultAddress, int defaultPort) { - return encapsulated.getSocketAddr(name, defaultAddress, defaultPort); - } - - @Override - public void setSocketAddr(String name, InetSocketAddress address) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public InetSocketAddress updateConnectAddr(String name, InetSocketAddress address) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public Class getClassByName(String name) throws ClassNotFoundException { - return encapsulated.getClassByName(name); - } - - @Override - public Class getClassByNameOrNull(String name) { - return encapsulated.getClassByNameOrNull(name); - } - - @Override - public Class[] getClasses(String name, Class... defaultValue) { - return encapsulated.getClasses(name, defaultValue); - } - - @Override - public Class getClass(String name, Class defaultValue) { - return encapsulated.getClass(name, defaultValue); - } - - @Override - public Class getClass(String name, Class defaultValue, Class xface) { - return encapsulated.getClass(name, defaultValue, xface); - } - - @Override - public List getInstances(String name, Class xface) { - return encapsulated.getInstances(name, xface); - } - - @Override - public void setClass(String name, Class theClass, Class xface) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public Path getLocalPath(String dirsProp, String path) throws IOException { - return encapsulated.getLocalPath(dirsProp, path); - } - - @Override - public File getFile(String dirsProp, String path) throws IOException { - return encapsulated.getFile(dirsProp, path); - } - - @Override - public URL getResource(String name) { - return encapsulated.getResource(name); - } - - @Override - public InputStream getConfResourceAsInputStream(String name) { - return encapsulated.getConfResourceAsInputStream(name); - } - - @Override - public Reader getConfResourceAsReader(String name) { - return encapsulated.getConfResourceAsReader(name); - } - - @Override - public int size() { - return encapsulated.size(); - } - - @Override - public void clear() { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - /* - * The encapsulated method impl creates a new HashMap for each invocation and returns an - * iterator on that newly-created HashMap. So, remove is allowed but doesn't mutate the - * state of the Configuration. This is not documented and might change. It also might - * be safer to throw an exception on remove since it won't have the effect the client code - * probably intends. - */ - @Override - public Iterator> iterator() { - return encapsulated.iterator(); - } - - @Override - public void writeXml(OutputStream out) throws IOException { - encapsulated.writeXml(out); - } - - @Override - public void writeXml(Writer out) throws IOException { - encapsulated.writeXml(out); - } - - public static void dumpConfiguration(Configuration config, Writer out) throws IOException { - Configuration.dumpConfiguration(config, out); - } - - @Override - public ClassLoader getClassLoader() { - return encapsulated.getClassLoader(); - } - - @Override - public void setClassLoader(ClassLoader classLoader) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - @Override - public String toString() { - return encapsulated.toString(); - } - - @Override - public void setQuietMode(boolean quietMode) { - throw new UnsupportedOperationException("This configuration instance is immutable"); - } - - public static void main(String[] args) throws Exception { - Configuration.main(args); - } - - @Override - public void readFields(DataInput in) throws IOException { - encapsulated.readFields(in); - } - - @Override - public void write(DataOutput out) throws IOException { - encapsulated.write(out); - } - - @Override - public Map getValByRegex(String regex) { - return encapsulated.getValByRegex(regex); - } - - public static void dumpDeprecatedKeys() { - Configuration.dumpDeprecatedKeys(); - } -} diff --git a/janusgraph-hadoop/src/main/java/org/janusgraph/hadoop/MapReduceIndexManagement.java b/janusgraph-hadoop/src/main/java/org/janusgraph/hadoop/MapReduceIndexManagement.java index eb4ac62d28..92d1c0ae75 100644 --- a/janusgraph-hadoop/src/main/java/org/janusgraph/hadoop/MapReduceIndexManagement.java +++ b/janusgraph-hadoop/src/main/java/org/janusgraph/hadoop/MapReduceIndexManagement.java @@ -25,13 +25,14 @@ import org.janusgraph.core.RelationType; import org.janusgraph.core.schema.Index; import org.janusgraph.core.schema.JanusGraphIndex; -import org.janusgraph.core.schema.JanusGraphManagement; import org.janusgraph.core.schema.RelationTypeIndex; import org.janusgraph.core.schema.SchemaAction; import org.janusgraph.diskstorage.Backend; import org.janusgraph.diskstorage.BackendException; import org.janusgraph.diskstorage.configuration.ConfigElement; -import org.janusgraph.diskstorage.keycolumnvalue.scan.ScanMetrics; +import org.janusgraph.diskstorage.keycolumnvalue.scan.CompletedJobFuture; +import org.janusgraph.diskstorage.keycolumnvalue.scan.FailedJobFuture; +import org.janusgraph.diskstorage.keycolumnvalue.scan.ScanJobFuture; import org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration; import org.janusgraph.graphdb.database.StandardJanusGraph; import org.janusgraph.graphdb.olap.job.IndexRemoveJob; @@ -48,9 +49,6 @@ import java.util.Collection; import java.util.EnumSet; import java.util.Iterator; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; public class MapReduceIndexManagement { @@ -68,7 +66,7 @@ public MapReduceIndexManagement(JanusGraph g) { this.graph = (StandardJanusGraph)g; } - public JanusGraphManagement.IndexJobFuture updateIndex(Index index, SchemaAction updateAction) throws BackendException { + public ScanJobFuture updateIndex(Index index, SchemaAction updateAction) throws BackendException { return updateIndex(index, updateAction, new Configuration()); } @@ -83,7 +81,7 @@ public JanusGraphManagement.IndexJobFuture updateIndex(Index index, SchemaAction * this method blocks until the Hadoop MapReduce job completes */ // TODO make this future actually async and update javadoc @return accordingly - public JanusGraphManagement.IndexJobFuture updateIndex(Index index, SchemaAction updateAction, Configuration hadoopConf) + public ScanJobFuture updateIndex(Index index, SchemaAction updateAction, Configuration hadoopConf) throws BackendException { Preconditions.checkNotNull(index, "Index parameter must not be null", index); @@ -108,12 +106,10 @@ public JanusGraphManagement.IndexJobFuture updateIndex(Index index, SchemaAction if (updateAction.equals(SchemaAction.REINDEX)) { indexJobClass = IndexRepairJob.class; mapperClass = HadoopVertexScanMapper.class; - } else if (updateAction.equals(SchemaAction.REMOVE_INDEX)) { + } else { + assert updateAction.equals(SchemaAction.REMOVE_INDEX); indexJobClass = IndexRemoveJob.class; mapperClass = HadoopScanMapper.class; - } else { - // Shouldn't get here -- if this exception is ever thrown, update SUPPORTED_ACTIONS - throw new IllegalStateException("Unrecognized " + SchemaAction.class.getSimpleName() + ": " + updateAction); } // The column family that serves as input to the IndexUpdateJob @@ -200,82 +196,4 @@ private static void copyIndexJobKeys(org.apache.hadoop.conf.Configuration hadoop ConfigElement.getPath(GraphDatabaseConfiguration.JOB_START_TIME), String.valueOf(System.currentTimeMillis())); } - - private static class CompletedJobFuture implements JanusGraphManagement.IndexJobFuture { - - private final ScanMetrics completedJobMetrics; - - private CompletedJobFuture(ScanMetrics completedJobMetrics) { - this.completedJobMetrics = completedJobMetrics; - } - - @Override - public ScanMetrics getIntermediateResult() { - return completedJobMetrics; - } - - @Override - public boolean cancel(boolean mayInterruptIfRunning) { - return false; - } - - @Override - public boolean isCancelled() { - return false; - } - - @Override - public boolean isDone() { - return true; - } - - @Override - public ScanMetrics get() throws InterruptedException, ExecutionException { - return completedJobMetrics; - } - - @Override - public ScanMetrics get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { - return completedJobMetrics; - } - } - - private static class FailedJobFuture implements JanusGraphManagement.IndexJobFuture { - - private final Throwable cause; - - public FailedJobFuture(Throwable cause) { - this.cause = cause; - } - - @Override - public ScanMetrics getIntermediateResult() throws ExecutionException { - throw new ExecutionException(cause); - } - - @Override - public boolean cancel(boolean mayInterruptIfRunning) { - return false; - } - - @Override - public boolean isCancelled() { - return false; - } - - @Override - public boolean isDone() { - return true; - } - - @Override - public ScanMetrics get() throws InterruptedException, ExecutionException { - throw new ExecutionException(cause); - } - - @Override - public ScanMetrics get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { - throw new ExecutionException(cause); - } - } } diff --git a/janusgraph-hadoop/src/test/java/org/janusgraph/hadoop/AbstractIndexManagementIT.java b/janusgraph-hadoop/src/test/java/org/janusgraph/hadoop/AbstractIndexManagementIT.java index 8a28db27b8..e7debc0d19 100644 --- a/janusgraph-hadoop/src/test/java/org/janusgraph/hadoop/AbstractIndexManagementIT.java +++ b/janusgraph-hadoop/src/test/java/org/janusgraph/hadoop/AbstractIndexManagementIT.java @@ -89,7 +89,7 @@ public void testRemoveGraphIndexWithToolRunner() throws Exception { assertThrows(FileNotFoundException.class, () -> ToolRunner.run(app, new String[] {"-files", "invalid-file.txt"})); // submit the MapReduce job together with a dummy file - ToolRunner.run(app, new String[] {"-files", getClass().getClassLoader().getResource("log4j.properties").getPath()}); + ToolRunner.run(app, new String[] {"-files", getClass().getClassLoader().getResource("log4j2-test.xml").getPath()}); assertEquals(12, app.getMetrics().getCustom(IndexRemoveJob.DELETED_RECORDS_COUNT)); } diff --git a/janusgraph-hadoop/src/test/resources/log4j.properties b/janusgraph-hadoop/src/test/resources/log4j.properties deleted file mode 100644 index 11af5ec56f..0000000000 --- a/janusgraph-hadoop/src/test/resources/log4j.properties +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A1 is a FileAppender. -log4j.appender.A1=org.apache.log4j.FileAppender -log4j.appender.A1.File=target/test.log -log4j.appender.A1.Threshold=ALL -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c{2}: %m%n - -# A2 is a ConsoleAppender. -log4j.appender.A2=org.apache.log4j.ConsoleAppender -log4j.appender.A2.Threshold=ALL -# A2 uses PatternLayout. -log4j.appender.A2.layout=org.apache.log4j.PatternLayout -log4j.appender.A2.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c{2}: %m%n - -# Set both appenders (A1 and A2) on the root logger. -#log4j.rootLogger=INFO, A1, A2 -log4j.rootLogger=ERROR, A1 - -# Restrict some of JanusGraph's dependencies to INFO and scarier. -# These restrictions are useful when reducing the severity threshold -# setting on one of the appenders below INFO. -log4j.logger.org.apache.cassandra=INFO -log4j.logger.org.apache.hadoop=INFO -log4j.logger.org.apache.zookeeper=INFO -log4j.logger.org.apache.cassandra.db.Memtable=OFF -log4j.logger.org.janusgraph.hadoop.JanusGraphIndexRepairMapper=DEBUG -log4j.logger.org.janusgraph.DaemonRunner=DEBUG -log4j.logger.org.janusgraph.diskstorage.es.ElasticsearchRunner=DEBUG -log4j.logger.org.janusgraph.diskstorage.es.ElasticsearchStatus=DEBUG -log4j.logger.org.janusgraph.hadoop.formats.util.JanusGraphVertexDeserializer=DEBUG diff --git a/janusgraph-hadoop/src/test/resources/log4j2-test.xml b/janusgraph-hadoop/src/test/resources/log4j2-test.xml new file mode 100644 index 0000000000..8f0c992a9f --- /dev/null +++ b/janusgraph-hadoop/src/test/resources/log4j2-test.xml @@ -0,0 +1,15 @@ + + + + + + %d{HH:mm:ss} %-5level %class.%method{36} - %msg%n + + + + + + + + + \ No newline at end of file diff --git a/janusgraph-hadoop/src/test/resources/org/janusgraph/hadoop/formats/graphson/incremental-custom-cerberus-load.groovy b/janusgraph-hadoop/src/test/resources/org/janusgraph/hadoop/formats/graphson/incremental-custom-cerberus-load.groovy index 8c9a39240b..e153f2d73d 100644 --- a/janusgraph-hadoop/src/test/resources/org/janusgraph/hadoop/formats/graphson/incremental-custom-cerberus-load.groovy +++ b/janusgraph-hadoop/src/test/resources/org/janusgraph/hadoop/formats/graphson/incremental-custom-cerberus-load.groovy @@ -12,7 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. + +import org.janusgraph.core.JanusGraphVertex import org.janusgraph.core.JanusGraphVertexProperty +import org.apache.tinkerpop.gremlin.structure.Vertex; JanusGraphVertex getOrCreateVertex(faunusVertex, graph, context, log) { String uniqueKey = "name" @@ -21,7 +24,7 @@ JanusGraphVertex getOrCreateVertex(faunusVertex, graph, context, log) { if (null == uniqueValue) { throw new RuntimeException("The provided Faunus vertex does not have a property for the unique key: " + faunusVertex) } - Iterator itty = graph.query().has(uniqueKey, uniqueValue).vertices().iterator() + Iterator itty = graph.query().has(uniqueKey, uniqueValue).vertexStream().iterator() if (itty.hasNext()) { janusgraphVertex = itty.next() if (itty.hasNext()) { diff --git a/janusgraph-hadoop/src/test/resources/org/janusgraph/hadoop/formats/graphson/incremental-load.groovy b/janusgraph-hadoop/src/test/resources/org/janusgraph/hadoop/formats/graphson/incremental-load.groovy index a5bd55ab29..0bea431676 100644 --- a/janusgraph-hadoop/src/test/resources/org/janusgraph/hadoop/formats/graphson/incremental-load.groovy +++ b/janusgraph-hadoop/src/test/resources/org/janusgraph/hadoop/formats/graphson/incremental-load.groovy @@ -12,6 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. + +import org.apache.tinkerpop.gremlin.structure.Vertex +import org.janusgraph.core.JanusGraph +import org.janusgraph.core.JanusGraphVertex + JanusGraphVertex getOrCreateVertex(FaunusVertex faunusVertex, JanusGraph graph, TaskInputOutputContext context, Logger log) { String uniqueKey = "name" Object uniqueValue = faunusVertex.value(uniqueKey) @@ -19,7 +24,7 @@ JanusGraphVertex getOrCreateVertex(FaunusVertex faunusVertex, JanusGraph graph, if (null == uniqueValue) { throw new RuntimeException("The provided Faunus vertex does not have a property for the unique key: " + faunusVertex) } - Iterator itty = graph.query().has(uniqueKey, uniqueValue).vertices().iterator() + Iterator itty = graph.query().has(uniqueKey, uniqueValue).vertexStream().iterator() if (itty.hasNext()) { janusgraphVertex = itty.next() if (itty.hasNext()) { diff --git a/janusgraph-hadoop/src/test/resources/org/janusgraph/hadoop/formats/graphson/incremental-naive-cerberus-load.groovy b/janusgraph-hadoop/src/test/resources/org/janusgraph/hadoop/formats/graphson/incremental-naive-cerberus-load.groovy index ec0b9b24d4..202a8e4602 100644 --- a/janusgraph-hadoop/src/test/resources/org/janusgraph/hadoop/formats/graphson/incremental-naive-cerberus-load.groovy +++ b/janusgraph-hadoop/src/test/resources/org/janusgraph/hadoop/formats/graphson/incremental-naive-cerberus-load.groovy @@ -19,7 +19,7 @@ JanusGraphVertex getOrCreateVertex(faunusVertex, graph, context, log) { if (null == uniqueValue) { throw new RuntimeException("The provided Faunus vertex does not have a property for the unique key: " + faunusVertex) } - Iterator itty = graph.query().has(uniqueKey, uniqueValue).vertices().iterator() + Iterator itty = graph.query().has(uniqueKey, uniqueValue).vertexStream().iterator() if (itty.hasNext()) { janusgraphVertex = itty.next() if (itty.hasNext()) { diff --git a/janusgraph-hbase/pom.xml b/janusgraph-hbase/pom.xml index 72d90b859c..aa61f7feca 100644 --- a/janusgraph-hbase/pom.xml +++ b/janusgraph-hbase/pom.xml @@ -3,7 +3,7 @@ org.janusgraph janusgraph - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT janusgraph-hbase JanusGraph-HBase: Distributed Graph Database @@ -21,13 +21,16 @@ - org.slf4j - slf4j-log4j12 + org.apache.logging.log4j + log4j-slf4j-impl + runtime true ch.qos.logback logback-classic + runtime + true @@ -38,15 +41,59 @@ org.janusgraph - janusgraph-backend-testutils + janusgraph-hadoop ${project.version} - test + true + + + org.apache.hbase + hbase-shaded-client + ${hbase2.version} + + + junit + junit + + + + + org.apache.hbase + hbase-shaded-mapreduce + ${hbase2.version} + + + junit + junit + + + org.apache.hadoop + hadoop-annotations + + + javax.enterprise + cdi-api + + + org.apache.htrace + htrace-core4 + + + + + javax.enterprise + cdi-api + 1.0-SP4 + + + org.apache.htrace + htrace-core4 + 4.2.0-incubating org.janusgraph - janusgraph-hadoop + janusgraph-backend-testutils ${project.version} - true + test org.janusgraph @@ -86,7 +133,6 @@ maven-surefire-plugin - file:${project.build.testOutputDirectory}/log4j.properties ${hbase.docker.version} ${hbase.docker.uid} ${hbase.docker.gid} @@ -122,74 +168,4 @@ - - - - hbase1 - - ${hbase1.version} - - - - org.apache.hbase - hbase-shaded-client - ${hbase1.version} - - - junit - junit - - - true - - - org.apache.hbase - hbase-shaded-server - ${hbase1.version} - true - - - - - hbase2 - - ${hbase2.version} - - - - !hbase.profile - - - - - org.apache.hbase - hbase-shaded-client - ${hbase2.version} - - - junit - junit - - - true - - - org.apache.hbase - hbase-shaded-mapreduce - ${hbase2.version} - - - junit - junit - - - org.apache.hadoop - hadoop-annotations - - - true - - - - diff --git a/janusgraph-hbase/src/main/java/com/google/common/base/Stopwatch.java b/janusgraph-hbase/src/main/java/com/google/common/base/Stopwatch.java deleted file mode 100644 index 7e83d4bd50..0000000000 --- a/janusgraph-hbase/src/main/java/com/google/common/base/Stopwatch.java +++ /dev/null @@ -1,284 +0,0 @@ -/* - * Copyright (C) 2008 The Guava Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.common.base; - -import com.google.common.annotations.Beta; -import com.google.common.annotations.GwtCompatible; -import com.google.common.annotations.GwtIncompatible; - -import java.util.concurrent.TimeUnit; - -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkState; -import static java.util.concurrent.TimeUnit.DAYS; -import static java.util.concurrent.TimeUnit.HOURS; -import static java.util.concurrent.TimeUnit.MICROSECONDS; -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static java.util.concurrent.TimeUnit.MINUTES; -import static java.util.concurrent.TimeUnit.NANOSECONDS; -import static java.util.concurrent.TimeUnit.SECONDS; - -/** - * An object that measures elapsed time in nanoseconds. It is useful to measure - * elapsed time using this class instead of direct calls to {@link - * System#nanoTime} for a few reasons: - * - *

    - *
  • An alternate time source can be substituted, for testing or performance - * reasons. - *
  • As documented by {@code nanoTime}, the value returned has no absolute - * meaning, and can only be interpreted as relative to another timestamp - * returned by {@code nanoTime} at a different time. {@code Stopwatch} is a - * more effective abstraction because it exposes only these relative values, - * not the absolute ones. - *
- * - *

Basic usage: - *

- *   Stopwatch stopwatch = Stopwatch.{@link #createStarted createStarted}();
- *   doSomething();
- *   stopwatch.{@link #stop stop}(); // optional
- *
- *   long millis = stopwatch.elapsed(MILLISECONDS);
- *
- *   log.info("time: " + stopwatch); // formatted string like "12.3 ms"
- * - *

Stopwatch methods are not idempotent; it is an error to start or stop a - * stopwatch that is already in the desired state. - * - *

When testing code that uses this class, use - * {@link #createUnstarted(Ticker)} or {@link #createStarted(Ticker)} to - * supply a fake or mock ticker. - * This allows you to - * simulate any valid behavior of the stopwatch. - * - *

Note: This class is not thread-safe. - * - * @author Kevin Bourrillion - * @since 10.0 - */ -@Beta -@GwtCompatible(emulated = true) -public final class Stopwatch { - private final Ticker ticker; - private boolean isRunning; - private long elapsedNanos; - private long startTick; - - /** - * Creates (but does not start) a new stopwatch using {@link System#nanoTime} - * as its time source. - * - * @return - * @since 15.0 - */ - public static Stopwatch createUnstarted() { - return new Stopwatch(); - } - - /** - * Creates (but does not start) a new stopwatch, using the specified time - * source. - * - * @return - * @since 15.0 - */ - public static Stopwatch createUnstarted(Ticker ticker) { - return new Stopwatch(ticker); - } - - /** - * Creates (and starts) a new stopwatch using {@link System#nanoTime} - * as its time source. - * - * @return - * @since 15.0 - */ - public static Stopwatch createStarted() { - return new Stopwatch().start(); - } - - /** - * Creates (and starts) a new stopwatch, using the specified time - * source. - * - * @return - * @since 15.0 - */ - public static Stopwatch createStarted(Ticker ticker) { - return new Stopwatch(ticker).start(); - } - - /** - * Creates (but does not start) a new stopwatch using {@link System#nanoTime} - * as its time source. - * - * @deprecated Use {@link Stopwatch#createUnstarted()} instead. - */ - @Deprecated - public Stopwatch() { - this(Ticker.systemTicker()); - } - - /** - * Creates (but does not start) a new stopwatch, using the specified time - * source. - * - * @param ticker - * @deprecated Use {@link Stopwatch#createUnstarted(Ticker)} instead. - */ - @Deprecated - public Stopwatch(Ticker ticker) { - this.ticker = checkNotNull(ticker, "ticker"); - } - - /** - * Returns {@code true} if {@link #start()} has been called on this stopwatch, - * and {@link #stop()} has not been called since the last call to {@code - * start()}. - * @return - */ - public boolean isRunning() { - return isRunning; - } - - /** - * Starts the stopwatch. - * - * @return this {@code Stopwatch} instance - * @throws IllegalStateException if the stopwatch is already running. - */ - public Stopwatch start() { - checkState(!isRunning, "This stopwatch is already running."); - isRunning = true; - startTick = ticker.read(); - return this; - } - - /** - * Stops the stopwatch. Future reads will return the fixed duration that had - * elapsed up to this point. - * - * @return this {@code Stopwatch} instance - * @throws IllegalStateException if the stopwatch is already stopped. - */ - public Stopwatch stop() { - long tick = ticker.read(); - checkState(isRunning, "This stopwatch is already stopped."); - isRunning = false; - elapsedNanos += tick - startTick; - return this; - } - - /** - * Sets the elapsed time for this stopwatch to zero, - * and places it in a stopped state. - * - * @return this {@code Stopwatch} instance - */ - public Stopwatch reset() { - elapsedNanos = 0; - isRunning = false; - return this; - } - - private long elapsedNanos() { - return isRunning ? ticker.read() - startTick + elapsedNanos : elapsedNanos; - } - - /** - * Returns the current elapsed time shown on this stopwatch, expressed - * in the desired time unit, with any fraction rounded down. - * - *

Note that the overhead of measurement can be more than a microsecond, so - * it is generally not useful to specify {@link TimeUnit#NANOSECONDS} - * precision here. - * - * @return - * @since 14.0 (since 10.0 as {@code elapsedTime()}) - */ - public long elapsed(TimeUnit desiredUnit) { - return desiredUnit.convert(elapsedNanos(), NANOSECONDS); - } - - // Guava is an outstanding library, but Stopwatch has caused an absurd compat headache relative to the problem it - // solves. Remember the createStarted() change before this? This particular class isn't even close to being worth - // the shading/debugging/compat-problem-solving time it has consumed due to these little stylistic ABI changes. - @Deprecated - public long elapsedMillis() - { - return TimeUnit.MILLISECONDS.convert(elapsedNanos(), NANOSECONDS); - } - - /** - * Returns a string representation of the current elapsed time. - * @return - */ - @GwtIncompatible("String.format()") - @Override public String toString() { - long nanos = elapsedNanos(); - - TimeUnit unit = chooseUnit(nanos); - double value = (double) nanos / NANOSECONDS.convert(1, unit); - - // Too bad this functionality is not exposed as a regular method call - return String.format("%.4g %s", value, abbreviate(unit)); - } - - private static TimeUnit chooseUnit(long nanos) { - if (DAYS.convert(nanos, NANOSECONDS) > 0) { - return DAYS; - } - if (HOURS.convert(nanos, NANOSECONDS) > 0) { - return HOURS; - } - if (MINUTES.convert(nanos, NANOSECONDS) > 0) { - return MINUTES; - } - if (SECONDS.convert(nanos, NANOSECONDS) > 0) { - return SECONDS; - } - if (MILLISECONDS.convert(nanos, NANOSECONDS) > 0) { - return MILLISECONDS; - } - if (MICROSECONDS.convert(nanos, NANOSECONDS) > 0) { - return MICROSECONDS; - } - return NANOSECONDS; - } - - private static String abbreviate(TimeUnit unit) { - switch (unit) { - case NANOSECONDS: - return "ns"; - case MICROSECONDS: - return "\u03bcs"; // μs - case MILLISECONDS: - return "ms"; - case SECONDS: - return "s"; - case MINUTES: - return "min"; - case HOURS: - return "h"; - case DAYS: - return "d"; - default: - throw new AssertionError(); - } - } -} diff --git a/janusgraph-hbase/src/main/java/com/google/common/io/Closeables.java b/janusgraph-hbase/src/main/java/com/google/common/io/Closeables.java deleted file mode 100644 index 99ed43c3f3..0000000000 --- a/janusgraph-hbase/src/main/java/com/google/common/io/Closeables.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright (C) 2007 The Guava Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.common.io; - -import com.google.common.annotations.Beta; -import com.google.common.annotations.VisibleForTesting; - -import java.io.Closeable; -import java.io.IOException; -import java.io.InputStream; -import java.io.Reader; -import java.util.logging.Level; -import java.util.logging.Logger; -import javax.annotation.Nullable; - -/** - * Utility methods for working with {@link Closeable} objects. - * - * @author Michael Lancaster - * @since 1.0 - */ -@Beta -public final class Closeables { - @VisibleForTesting static final Logger logger - = Logger.getLogger(Closeables.class.getName()); - - private Closeables() {} - - /** - * Closes a {@link Closeable}, with control over whether an {@code IOException} may be thrown. - * This is primarily useful in a finally block, where a thrown exception needs to be logged but - * not propagated (otherwise the original exception will be lost). - * - *

If {@code swallowIOException} is true then we never throw {@code IOException} but merely log - * it. - * - *

Example:

   {@code
-     *
-     *   public void useStreamNicely() throws IOException {
-     *     SomeStream stream = new SomeStream("foo");
-     *     boolean threw = true;
-     *     try {
-     *       // ... code which does something with the stream ...
-     *       threw = false;
-     *     } finally {
-     *       // If an exception occurs, rethrow it only if threw==false:
-     *       Closeables.close(stream, threw);
-     *     }
-     *   }}
- * - * @param closeable the {@code Closeable} object to be closed, or null, in which case this method - * does nothing - * @param swallowIOException if true, don't propagate IO exceptions thrown by the {@code close} - * methods - * @throws IOException if {@code swallowIOException} is false and {@code close} throws an - * {@code IOException}. - */ - public static void close(@Nullable Closeable closeable, - boolean swallowIOException) throws IOException { - if (closeable == null) { - return; - } - try { - closeable.close(); - } catch (IOException e) { - if (swallowIOException) { - logger.log(Level.WARNING, - "IOException thrown while closing Closeable.", e); - } else { - throw e; - } - } - } - - public static void closeQuietly(@Nullable Closeable closeable) { - try { - close(closeable, true); - } catch (IOException ignored) {} - } - - /** - * Closes the given {@link InputStream}, logging any {@code IOException} that's thrown rather - * than propagating it. - * - *

While it's not safe in the general case to ignore exceptions that are thrown when closing - * an I/O resource, it should generally be safe in the case of a resource that's being used only - * for reading, such as an {@code InputStream}. Unlike with writable resources, there's no - * chance that a failure that occurs when closing the stream indicates a meaningful problem such - * as a failure to flush all bytes to the underlying resource. - * - * @param inputStream the input stream to be closed, or {@code null} in which case this method - * does nothing - * @since 17.0 - */ - public static void closeQuietly(@Nullable InputStream inputStream) { - try { - close(inputStream, true); - } catch (IOException impossible) { - throw new AssertionError(impossible); - } - } - - /** - * Closes the given {@link Reader}, logging any {@code IOException} that's thrown rather than - * propagating it. - * - *

While it's not safe in the general case to ignore exceptions that are thrown when closing - * an I/O resource, it should generally be safe in the case of a resource that's being used only - * for reading, such as a {@code Reader}. Unlike with writable resources, there's no chance that - * a failure that occurs when closing the reader indicates a meaningful problem such as a failure - * to flush all bytes to the underlying resource. - * - * @param reader the reader to be closed, or {@code null} in which case this method does nothing - * @since 17.0 - */ - public static void closeQuietly(@Nullable Reader reader) { - try { - close(reader, true); - } catch (IOException impossible) { - throw new AssertionError(impossible); - } - } -} diff --git a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/AdminMask.java b/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/AdminMask.java deleted file mode 100644 index 68b7136b75..0000000000 --- a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/AdminMask.java +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * Copyright DataStax, Inc. - *

- * Please see the included license file for details. - */ -package org.janusgraph.diskstorage.hbase; - -import org.apache.hadoop.hbase.ClusterStatus; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.client.HBaseAdmin; - -import java.io.Closeable; -import java.io.IOException; - -/** - * This interface hides ABI/API breaking changes that HBase has made to its Admin/HBaseAdmin over the course - * of development from 0.94 to 1.0 and beyond. - */ -public interface AdminMask extends Closeable -{ - - void clearTable(String tableName, long timestamp) throws IOException; - - /** - * Drop given table. Table can be either enabled or disabled. - * @param tableName Name of the table to delete - * @throws IOException - */ - void dropTable(String tableName) throws IOException; - - HTableDescriptor getTableDescriptor(String tableName) throws IOException; - - boolean tableExists(String tableName) throws IOException; - - void createTable(HTableDescriptor desc) throws IOException; - - void createTable(HTableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions) throws IOException; - - /** - * Estimate the number of regionservers in the HBase cluster. - * - * This is usually implemented by calling - * {@link HBaseAdmin#getClusterStatus()} and then - * {@link ClusterStatus#getServers()} and finally {@code size()} on the - * returned server list. - * - * @return the number of servers in the cluster or -1 if it could not be determined - */ - int getEstimatedRegionServerCount(); - - void disableTable(String tableName) throws IOException; - - void enableTable(String tableName) throws IOException; - - boolean isTableDisabled(String tableName) throws IOException; - - void addColumn(String tableName, HColumnDescriptor columnDescriptor) throws IOException; - - void snapshot(String snapshotName, String table) throws IllegalArgumentException, IOException; - - void deleteSnapshot(String snapshotName) throws IOException; -} diff --git a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/ConnectionMask.java b/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/ConnectionMask.java deleted file mode 100644 index f24e2eadcd..0000000000 --- a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/ConnectionMask.java +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * Copyright DataStax, Inc. - *

- * Please see the included license file for details. - */ -package org.janusgraph.diskstorage.hbase; - -import org.apache.hadoop.hbase.HRegionLocation; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; - -/** - * This interface hides ABI/API breaking changes that HBase has made to its (H)Connection class over the course - * of development from 0.94 to 1.0 and beyond. - */ -public interface ConnectionMask extends Closeable -{ - - /** - * Retrieve the TableMask compatibility layer object for the supplied table name. - * @param name - * @return The TableMask for the specified table. - * @throws IOException in the case of backend exceptions. - */ - TableMask getTable(String name) throws IOException; - - /** - * Retrieve the AdminMask compatibility layer object for this Connection. - * @return The AdminMask for this Connection - * @throws IOException in the case of backend exceptions. - */ - AdminMask getAdmin() throws IOException; - - /** - * Retrieve the RegionLocations for the supplied table name. - * @param tableName - * @return A map of HRegionInfo to ServerName that describes the storage regions for the named table. - * @throws IOException in the case of backend exceptions. - */ - List getRegionLocations(String tableName) throws IOException; -} diff --git a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseAdmin1_0.java b/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseAdmin1_0.java deleted file mode 100644 index f82c2a36c4..0000000000 --- a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseAdmin1_0.java +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.diskstorage.hbase; - -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.Table; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; - -public class HBaseAdmin1_0 implements AdminMask -{ - - private static final Logger log = LoggerFactory.getLogger(HBaseAdmin1_0.class); - - private final Admin adm; - - public HBaseAdmin1_0(Admin adm) - { - this.adm = adm; - } - - /** - * Delete all rows from the given table. This method is intended only for development and testing use. - * @param tableString - * @param timestamp - * @throws IOException - */ - @Override - public void clearTable(String tableString, long timestamp) throws IOException - { - TableName tableName = TableName.valueOf(tableString); - - if (!adm.tableExists(tableName)) { - log.debug("Attempted to clear table {} before it exists (noop)", tableString); - return; - } - - // Unfortunately, linear scanning and deleting rows is faster in HBase when running integration tests than - // disabling and deleting/truncating tables. - final Scan scan = new Scan(); - scan.setCacheBlocks(false); - scan.setCaching(2000); - scan.setTimeRange(0, Long.MAX_VALUE); - scan.setMaxVersions(1); - - try (final Table table = adm.getConnection().getTable(tableName); - final ResultScanner scanner = table.getScanner(scan)) { - final Iterator iterator = scanner.iterator(); - final int batchSize = 1000; - final List deleteList = new ArrayList<>(); - while (iterator.hasNext()) { - deleteList.add(new Delete(iterator.next().getRow(), timestamp)); - if (!iterator.hasNext() || deleteList.size() == batchSize) { - table.delete(deleteList); - deleteList.clear(); - } - } - } - } - - @Override - public void dropTable(String tableString) throws IOException { - final TableName tableName = TableName.valueOf(tableString); - - if (!adm.tableExists(tableName)) { - log.debug("Attempted to drop table {} before it exists (noop)", tableString); - return; - } - - if (adm.isTableEnabled(tableName)) { - adm.disableTable(tableName); - } - adm.deleteTable(tableName); - } - - @Override - public HTableDescriptor getTableDescriptor(String tableString) throws IOException - { - return adm.getTableDescriptor(TableName.valueOf(tableString)); - } - - @Override - public boolean tableExists(String tableString) throws IOException - { - return adm.tableExists(TableName.valueOf(tableString)); - } - - @Override - public void createTable(HTableDescriptor desc) throws IOException - { - adm.createTable(desc); - } - - @Override - public void createTable(HTableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions) throws IOException - { - adm.createTable(desc, startKey, endKey, numRegions); - } - - @Override - public int getEstimatedRegionServerCount() - { - int serverCount = -1; - try { - serverCount = adm.getClusterStatus().getServers().size(); - log.debug("Read {} servers from HBase ClusterStatus", serverCount); - } catch (IOException e) { - log.debug("Unable to retrieve HBase cluster status", e); - } - return serverCount; - } - - @Override - public void disableTable(String tableString) throws IOException - { - adm.disableTable(TableName.valueOf(tableString)); - } - - @Override - public void enableTable(String tableString) throws IOException - { - adm.enableTable(TableName.valueOf(tableString)); - } - - @Override - public boolean isTableDisabled(String tableString) throws IOException - { - return adm.isTableDisabled(TableName.valueOf(tableString)); - } - - @Override - public void addColumn(String tableString, HColumnDescriptor columnDescriptor) throws IOException - { - adm.addColumn(TableName.valueOf(tableString), columnDescriptor); - } - - @Override - public void close() throws IOException - { - adm.close(); - } - - @Override - public void snapshot(String snapshotName, String table) throws IllegalArgumentException, IOException { - adm.snapshot(snapshotName, TableName.valueOf(table)); - } - - @Override - public void deleteSnapshot(String snapshotName) throws IOException { - adm.deleteSnapshot(snapshotName); - } -} diff --git a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseCompat.java b/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseCompat.java deleted file mode 100644 index e73155d530..0000000000 --- a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseCompat.java +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.diskstorage.hbase; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.client.Delete; - -import java.io.IOException; - -public interface HBaseCompat { - - /** - * Configure the compression scheme {@code algorithm} on a column family - * descriptor {@code cd}. The {@code algorithm} parameter is a string value - * corresponding to one of the values of HBase's Compression enum. The - * Compression enum has moved between packages as HBase has evolved, which - * is why this method has a String argument in the signature instead of the - * enum itself. - * - * @param cd - * column family to configure - * @param algorithm - * compression type to use - */ - void setCompression(HColumnDescriptor cd, String algorithm); - - /** - * Create and return a HTableDescriptor instance with the given name. The - * constructors on this method have remained stable over HBase development - * so far, but the old HTableDescriptor(String) constructor & byte[] friends - * are now marked deprecated and may eventually be removed in favor of the - * HTableDescriptor(TableName) constructor. That constructor (and the - * TableName type) only exists in newer HBase versions. Hence this method. - * - * @param tableName - * HBase table name - * @return a new table descriptor instance - */ - HTableDescriptor newTableDescriptor(String tableName); - - ConnectionMask createConnection(Configuration conf) throws IOException; - - void addColumnFamilyToTableDescriptor(HTableDescriptor tableDescriptor, HColumnDescriptor columnDescriptor); - - void setTimestamp(Delete d, long timestamp); -} diff --git a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseCompat1_0.java b/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseCompat1_0.java deleted file mode 100644 index 25b7600e9d..0000000000 --- a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseCompat1_0.java +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.diskstorage.hbase; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.io.compress.Compression; - -import java.io.IOException; - -public class HBaseCompat1_0 implements HBaseCompat { - - @Override - public void setCompression(HColumnDescriptor cd, String algorithm) { - cd.setCompressionType(Compression.Algorithm.valueOf(algorithm)); - } - - @Override - public HTableDescriptor newTableDescriptor(String tableName) { - TableName tn = TableName.valueOf(tableName); - return new HTableDescriptor(tn); - } - - @Override - public ConnectionMask createConnection(Configuration conf) throws IOException - { - return new HConnection1_0(ConnectionFactory.createConnection(conf)); - } - - @Override - public void addColumnFamilyToTableDescriptor(HTableDescriptor tableDescriptor, HColumnDescriptor columnDescriptor) - { - tableDescriptor.addFamily(columnDescriptor); - } - - @Override - public void setTimestamp(Delete d, long timestamp) - { - d.setTimestamp(timestamp); - } - -} diff --git a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseCompatLoader.java b/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseCompatLoader.java deleted file mode 100644 index 36ee451bd2..0000000000 --- a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseCompatLoader.java +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.diskstorage.hbase; - -import org.apache.hadoop.hbase.util.VersionInfo; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class HBaseCompatLoader { - - private static final Logger log = LoggerFactory.getLogger(HBaseCompatLoader.class); - - private static final String DEFAULT_HBASE_COMPAT_VERSION = "1.x"; - - private static final String DEFAULT_HBASE_COMPAT_CLASS_NAME = - "org.janusgraph.diskstorage.hbase.HBaseCompat1_0"; - - private static final String[] HBASE_SUPPORTED_VERSIONS = - new String[] { "1.2", "1.3", "1.4", "1.6", "2.0", "2.1", "2.2" }; - - private static HBaseCompat cachedCompat; - - public static synchronized HBaseCompat getCompat(String classOverride) { - - if (null != cachedCompat) { - log.debug("Returning cached HBase compatibility layer: {}", cachedCompat); - return cachedCompat; - } - - HBaseCompat compat; - String className = null; - String classNameSource = null; - - if (null != classOverride) { - className = classOverride; - classNameSource = "from explicit configuration"; - } else { - String hbaseVersion = VersionInfo.getVersion(); - for (String supportedVersion : HBASE_SUPPORTED_VERSIONS) { - if (hbaseVersion.startsWith(supportedVersion + ".")) { - // All HBase 1.x and 2.x maps to HBaseCompat1_0 for now. - className = DEFAULT_HBASE_COMPAT_CLASS_NAME; - classNameSource = "supporting runtime HBase version " + hbaseVersion; - break; - } - } - if (null == className) { - log.info("The HBase version {} is not explicitly supported by JanusGraph. " + - "Loading JanusGraph's compatibility layer for its most recent supported HBase version ({})", - hbaseVersion, DEFAULT_HBASE_COMPAT_VERSION); - className = DEFAULT_HBASE_COMPAT_CLASS_NAME; - classNameSource = " by default"; - } - } - - final String errTemplate = " when instantiating HBase compatibility class " + className; - - try { - compat = (HBaseCompat)Class.forName(className).newInstance(); - log.info("Instantiated HBase compatibility layer {}: {}", classNameSource, compat.getClass().getCanonicalName()); - } catch (IllegalAccessException | InstantiationException | ClassNotFoundException e) { - throw new RuntimeException(e.getClass().getSimpleName() + errTemplate, e); - } - - return cachedCompat = compat; - } -} diff --git a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseKeyColumnValueStore.java b/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseKeyColumnValueStore.java index 46fcff3c94..b3d827f57f 100644 --- a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseKeyColumnValueStore.java +++ b/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseKeyColumnValueStore.java @@ -18,10 +18,13 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import com.google.common.collect.Iterators; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.ColumnPaginationFilter; import org.apache.hadoop.hbase.filter.ColumnRangeFilter; import org.apache.hadoop.hbase.filter.Filter; @@ -83,24 +86,19 @@ public class HBaseKeyColumnValueStore implements KeyColumnValueStore { private static final Logger logger = LoggerFactory.getLogger(HBaseKeyColumnValueStore.class); - private final String tableName; + private final TableName tableName; private final HBaseStoreManager storeManager; - // When using shortened CF names, columnFamily is the shortname and storeName is the longname - // When not using shortened CF names, they are the same - //private final String columnFamily; private final String storeName; - // This is columnFamily.getBytes() private final byte[] columnFamilyBytes; private final HBaseGetter entryGetter; - private final ConnectionMask cnx; + private final Connection cnx; - HBaseKeyColumnValueStore(HBaseStoreManager storeManager, ConnectionMask cnx, String tableName, String columnFamily, String storeName) { + HBaseKeyColumnValueStore(HBaseStoreManager storeManager, Connection cnx, TableName tableName, String columnFamily, String storeName) { this.storeManager = storeManager; this.cnx = cnx; this.tableName = tableName; - //this.columnFamily = columnFamily; this.storeName = storeName; this.columnFamilyBytes = Bytes.toBytes(columnFamily); this.entryGetter = new HBaseGetter(storeManager.getMetaDataSchema(storeName)); @@ -131,7 +129,7 @@ public void mutate(StaticBuffer key, List additions, List d public void acquireLock(StaticBuffer key, StaticBuffer column, StaticBuffer expectedValue, - StoreTransaction txh) throws BackendException { + StoreTransaction txh) { throw new UnsupportedOperationException(); } @@ -154,7 +152,7 @@ public KeyIterator getKeys(SliceQuery query, StoreTransaction txh) throws Backen } @Override - public KeySlicesIterator getKeys(MultiSlicesQuery queries, StoreTransaction txh) throws BackendException { + public KeySlicesIterator getKeys(MultiSlicesQuery queries, StoreTransaction txh) { throw new UnsupportedOperationException(); } @@ -192,7 +190,7 @@ private Map getHelper(List keys, Filter ge final Map resultMap = new HashMap<>(keys.size()); try { - TableMask table = null; + Table table = null; final Result[] results; try { @@ -254,16 +252,16 @@ private KeyIterator executeKeySliceQuery(@Nullable byte[] startKey, } if (startKey != null) - scan.setStartRow(startKey); + scan.withStartRow(startKey); if (endKey != null) - scan.setStopRow(endKey); + scan.withStopRow(endKey); if (columnSlice != null) { filters.addFilter(getFilter(columnSlice)); } - TableMask table = null; + Table table = null; try { table = cnx.getTable(tableName); @@ -381,12 +379,10 @@ public EntryMetaData[] getMetaSchema(Map.Entry> element, EntryMetaData meta) { - switch(meta) { - case TIMESTAMP: - return element.getValue().lastEntry().getKey(); - default: - throw new UnsupportedOperationException("Unsupported meta data: " + meta); + if (meta == EntryMetaData.TIMESTAMP) { + return element.getValue().lastEntry().getKey(); } + throw new UnsupportedOperationException("Unsupported meta data: " + meta); } } } diff --git a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseStoreManager.java b/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseStoreManager.java index baabc4ffa3..51801ab285 100644 --- a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseStoreManager.java +++ b/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HBaseStoreManager.java @@ -22,23 +22,31 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Sets; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Row; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.VersionInfo; import org.janusgraph.core.JanusGraphException; import org.janusgraph.diskstorage.BackendException; import org.janusgraph.diskstorage.BaseTransactionConfig; @@ -76,6 +84,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; +import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -202,42 +211,6 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol "The number of regions per regionserver to set when creating JanusGraph's HBase table", ConfigOption.Type.MASKABLE, Integer.class); - /** - * If this key is present in either the JVM system properties or the process - * environment (checked in the listed order, first hit wins), then its value - * must be the full package and class name of an implementation of - * {@link HBaseCompat} that has a no-arg public constructor. - *

- * When this is not set, JanusGraph attempts to automatically detect the - * HBase runtime version by calling {@link VersionInfo#getVersion()}. JanusGraph - * then checks the returned version string against a hard-coded list of - * supported version prefixes and instantiates the associated compat layer - * if a match is found. - *

- * When this is set, JanusGraph will not call - * {@code VersionInfo.getVersion()} or read its hard-coded list of supported - * version prefixes. JanusGraph will instead attempt to instantiate the class - * specified (via the no-arg constructor which must exist) and then attempt - * to cast it to HBaseCompat and use it as such. JanusGraph will assume the - * supplied implementation is compatible with the runtime HBase version and - * make no attempt to verify that assumption. - *

- * Setting this key incorrectly could cause runtime exceptions at best or - * silent data corruption at worst. This setting is intended for users - * running exotic HBase implementations that don't support VersionInfo or - * implementations which return values from {@code VersionInfo.getVersion()} - * that are inconsistent with Apache's versioning convention. It may also be - * useful to users who want to run against a new release of HBase that JanusGraph - * doesn't yet officially support. - * - */ - public static final ConfigOption COMPAT_CLASS = - new ConfigOption<>(HBASE_NS, "compat-class", - "The package and class name of the HBaseCompat implementation. HBaseCompat masks version-specific HBase API differences. " + - "When this option is unset, JanusGraph calls HBase's VersionInfo.getVersion() and loads the matching compat class " + - "at runtime. Setting this option forces JanusGraph to instead reflectively load and instantiate the specified class.", - ConfigOption.Type.MASKABLE, String.class); - public static final int PORT_DEFAULT = 2181; // Not used. Just for the parent constructor. public static final TimestampProviders PREFERRED_TIMESTAMPS = TimestampProviders.MILLI; @@ -249,14 +222,13 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol // Immutable instance fields private final BiMap shortCfNameMap; - private final String tableName; + private final TableName tableName; private final String compression; private final int regionCount; private final int regionsPerServer; - private final ConnectionMask cnx; + private final Connection cnx; private final boolean shortCfNames; private final boolean skipSchemaCheck; - private final HBaseCompat compat; // Cached return value of getDeployment() as requesting it can be expensive. private Deployment deployment = null; @@ -281,8 +253,6 @@ public HBaseStoreManager(org.janusgraph.diskstorage.configuration.Configuration this.regionCount = config.has(REGION_COUNT) ? config.get(REGION_COUNT) : -1; this.regionsPerServer = config.has(REGIONS_PER_SERVER) ? config.get(REGIONS_PER_SERVER) : -1; this.skipSchemaCheck = config.get(SKIP_SCHEMA_CHECK); - final String compatClass = config.has(COMPAT_CLASS) ? config.get(COMPAT_CLASS) : null; - this.compat = HBaseCompatLoader.getCompat(compatClass); /* * Specifying both region count options is permitted but may be @@ -331,8 +301,7 @@ public HBaseStoreManager(org.janusgraph.diskstorage.configuration.Configuration this.shortCfNames = config.get(SHORT_CF_NAMES); try { - //this.cnx = HConnectionManager.createConnection(hconf); - this.cnx = compat.createConnection(hconf); + this.cnx = ConnectionFactory.createConnection(hconf); } catch (IOException e) { throw new PermanentBackendException(e); } @@ -451,7 +420,7 @@ public void mutateMany(Map> mutations, St } try { - TableMask table = null; + Table table = null; try { table = cnx.getTable(tableName); @@ -500,13 +469,13 @@ public KeyColumnValueStore openDatabase(String longName, StoreMetaData.Container } @Override - public StoreTransaction beginTransaction(final BaseTransactionConfig config) throws BackendException { + public StoreTransaction beginTransaction(final BaseTransactionConfig config) { return new HBaseTransaction(config); } @Override public String getName() { - return tableName; + return tableName.getNameAsString(); } /** @@ -515,11 +484,11 @@ public String getName() { */ @Override public void clearStorage() throws BackendException { - try (AdminMask adm = getAdminInterface()) { + try (Admin adm = getAdminInterface()) { if (this.storageConfig.get(DROP_ON_CLEAR)) { - adm.dropTable(tableName); + dropTable(adm); } else { - adm.clearTable(tableName, times.getTime(times.getTime())); + clearTable(adm, times.getTime(times.getTime())); } } catch (IOException e) { @@ -527,9 +496,51 @@ public void clearStorage() throws BackendException { } } + private void clearTable(Admin adm, long timestamp) throws IOException + { + if (!adm.tableExists(tableName)) { + logger.debug("Attempted to clear table {} before it exists (noop)", tableName.getNameAsString()); + return; + } + + // Unfortunately, linear scanning and deleting rows is faster in HBase when running integration tests than + // disabling and deleting/truncating tables. + final Scan scan = new Scan(); + scan.setCacheBlocks(false); + scan.setCaching(2000); + scan.setTimeRange(0, Long.MAX_VALUE); + scan.readVersions(1); + + try (final Table table = adm.getConnection().getTable(tableName); + final ResultScanner scanner = table.getScanner(scan)) { + final Iterator iterator = scanner.iterator(); + final int batchSize = 1000; + final List deleteList = new ArrayList<>(); + while (iterator.hasNext()) { + deleteList.add(new Delete(iterator.next().getRow(), timestamp)); + if (!iterator.hasNext() || deleteList.size() == batchSize) { + table.delete(deleteList); + deleteList.clear(); + } + } + } + } + + private void dropTable(Admin adm) throws IOException { + if (!adm.tableExists(tableName)) { + logger.debug("Attempted to drop table {} before it exists (noop)", tableName.getNameAsString()); + return; + } + + if (adm.isTableEnabled(tableName)) { + adm.disableTable(tableName); + } + adm.deleteTable(tableName); + } + @Override public boolean exists() throws BackendException { - try (final AdminMask adm = getAdminInterface()) { + try (final Admin adm = getAdminInterface()) { return adm.tableExists(tableName); } catch (IOException e) { throw new TemporaryBackendException(e); @@ -548,7 +559,7 @@ public List getLocalKeyPartition() throws BackendException { ensureTableExists( tableName, getCfNameForStoreName(GraphDatabaseConfiguration.SYSTEM_PROPERTIES_STORE_NAME), 0); } - Map normed = normalizeKeyBounds(cnx.getRegionLocations(tableName)); + Map normed = normalizeKeyBounds(getRegionLocations(tableName)); for (Map.Entry e : normed.entrySet()) { if (NetworkUtil.isLocalConnection(e.getValue().getHostname())) { @@ -568,9 +579,15 @@ public List getLocalKeyPartition() throws BackendException { return result; } + private List getRegionLocations(TableName tableName) + throws IOException + { + return cnx.getRegionLocator(tableName).getAllRegionLocations(); + } + /** - * Given a map produced by {@link HTable#getRegionLocations()}, transform - * each key from an {@link HRegionInfo} to a {@link KeyRange} expressing the + * Given a map produced by {@link Connection#getRegionLocator(TableName)}, transform + * each key from an {@link RegionInfo} to a {@link KeyRange} expressing the * region's start and end key bounds using JanusGraph-partitioning-friendly * conventions (start inclusive, end exclusive, zero bytes appended where * necessary to make all keys at least 4 bytes long). @@ -620,7 +637,7 @@ private Map normalizeKeyBounds(List locat ImmutableMap.Builder b = ImmutableMap.builder(); for (HRegionLocation location : locations) { - HRegionInfo regionInfo = location.getRegionInfo(); + RegionInfo regionInfo = location.getRegion(); ServerName serverName = location.getServerName(); byte[] startKey = regionInfo.getStartKey(); byte[] endKey = regionInfo.getEndKey(); @@ -722,10 +739,10 @@ public static String shortenCfName(BiMap shortCfNameMap, String return s; } - private HTableDescriptor ensureTableExists(String tableName, String initialCFName, int ttlInSeconds) throws BackendException { - AdminMask adm = null; + private TableDescriptor ensureTableExists(TableName tableName, String initialCFName, int ttlInSeconds) throws BackendException { + Admin adm = null; - HTableDescriptor desc; + TableDescriptor desc; try { // Create our table, if necessary adm = getAdminInterface(); @@ -735,11 +752,11 @@ private HTableDescriptor ensureTableExists(String tableName, String initialCFNam * the table avoids HBase carping. */ if (adm.tableExists(tableName)) { - desc = adm.getTableDescriptor(tableName); + desc = adm.getDescriptor(tableName); // Check and warn if long and short cf names are interchangeably used for the same table. if (shortCfNames && initialCFName.equals(shortCfNameMap.get(SYSTEM_PROPERTIES_STORE_NAME))) { String longCFName = shortCfNameMap.inverse().get(initialCFName); - if (desc.getFamily(Bytes.toBytes(longCFName)) != null) { + if (desc.getColumnFamily(Bytes.toBytes(longCFName)) != null) { logger.warn("Configuration {}=true, but the table \"{}\" already has column family with long name \"{}\".", SHORT_CF_NAMES.getName(), tableName, longCFName); logger.warn("Check {} configuration.", SHORT_CF_NAMES.getName()); @@ -747,7 +764,7 @@ private HTableDescriptor ensureTableExists(String tableName, String initialCFNam } else if (!shortCfNames && initialCFName.equals(SYSTEM_PROPERTIES_STORE_NAME)) { String shortCFName = shortCfNameMap.get(initialCFName); - if (desc.getFamily(Bytes.toBytes(shortCFName)) != null) { + if (desc.getColumnFamily(Bytes.toBytes(shortCFName)) != null) { logger.warn("Configuration {}=false, but the table \"{}\" already has column family with short name \"{}\".", SHORT_CF_NAMES.getName(), tableName, shortCFName); logger.warn("Check {} configuration.", SHORT_CF_NAMES.getName()); @@ -765,13 +782,14 @@ else if (!shortCfNames && initialCFName.equals(SYSTEM_PROPERTIES_STORE_NAME)) { return desc; } - private HTableDescriptor createTable(String tableName, String cfName, int ttlInSeconds, AdminMask adm) throws IOException { - HTableDescriptor desc = compat.newTableDescriptor(tableName); + private TableDescriptor createTable(TableName tableName, String cfName, int ttlInSeconds, Admin adm) throws IOException { - HColumnDescriptor columnDescriptor = new HColumnDescriptor(cfName); - setCFOptions(columnDescriptor, ttlInSeconds); + TableDescriptorBuilder desc = TableDescriptorBuilder.newBuilder(tableName); - compat.addColumnFamilyToTableDescriptor(desc, columnDescriptor); + ColumnFamilyDescriptorBuilder columnDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(cfName)); + setCFOptions(columnDescriptor, ttlInSeconds); + desc.setColumnFamily(columnDescriptor.build()); + TableDescriptor td = desc.build(); int count; // total regions to create String src; @@ -779,7 +797,7 @@ private HTableDescriptor createTable(String tableName, String cfName, int ttlInS if (MIN_REGION_COUNT <= (count = regionCount)) { src = "region count configuration"; } else if (0 < regionsPerServer && - MIN_REGION_COUNT <= (count = regionsPerServer * adm.getEstimatedRegionServerCount())) { + MIN_REGION_COUNT <= (count = regionsPerServer * getEstimatedRegionServerCount(adm))) { src = "ClusterStatus server count"; } else { count = -1; @@ -787,19 +805,31 @@ private HTableDescriptor createTable(String tableName, String cfName, int ttlInS } if (MIN_REGION_COUNT < count) { - adm.createTable(desc, getStartKey(count), getEndKey(count), count); + adm.createTable(td, getStartKey(count), getEndKey(count), count); logger.debug("Created table {} with region count {} from {}", tableName, count, src); } else { - adm.createTable(desc); + adm.createTable(td); logger.debug("Created table {} with default start key, end key, and region count", tableName); } - return desc; + return td; + } + + private int getEstimatedRegionServerCount(Admin adm) + { + int serverCount = -1; + try { + serverCount = adm.getRegionServers().size(); + logger.debug("Read {} servers from HBase ClusterStatus", serverCount); + } catch (IOException e) { + logger.debug("Unable to retrieve HBase cluster status", e); + } + return serverCount; } /** * This method generates the second argument to - * {@link HBaseAdmin#createTable(HTableDescriptor, byte[], byte[], int)}. + * {@link Admin#createTable(TableDescriptor, byte[], byte[], int)} *

* From the {@code createTable} javadoc: * "The start key specified will become the end key of the first region of @@ -825,15 +855,14 @@ private byte[] getEndKey(int regionCount) { return StaticArrayBuffer.of(regionWidth).getBytes(0, 4); } - private void ensureColumnFamilyExists(String tableName, String columnFamily, int ttlInSeconds) throws BackendException { - AdminMask adm = null; + private void ensureColumnFamilyExists(TableName tableName, String columnFamily, int ttlInSeconds) throws BackendException { + Admin adm = null; try { adm = getAdminInterface(); - HTableDescriptor desc = ensureTableExists(tableName, columnFamily, ttlInSeconds); + TableDescriptor desc = ensureTableExists(tableName, columnFamily, ttlInSeconds); Preconditions.checkNotNull(desc); - - HColumnDescriptor cf = desc.getFamily(Bytes.toBytes(columnFamily)); + ColumnFamilyDescriptor cf = desc.getColumnFamily(Bytes.toBytes(columnFamily)); // Create our column family, if necessary if (cf == null) { @@ -848,11 +877,11 @@ private void ensureColumnFamilyExists(String tableName, String columnFamily, int } try { - HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamily); + ColumnFamilyDescriptorBuilder columnDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(columnFamily)); setCFOptions(columnDescriptor, ttlInSeconds); - adm.addColumn(tableName, columnDescriptor); + adm.addColumnFamily(tableName, columnDescriptor.build()); try { logger.debug("Added HBase ColumnFamily {}, waiting for 1 sec. to propagate.", columnFamily); @@ -876,9 +905,9 @@ private void ensureColumnFamilyExists(String tableName, String columnFamily, int } } - private void setCFOptions(HColumnDescriptor columnDescriptor, int ttlInSeconds) { + private void setCFOptions(ColumnFamilyDescriptorBuilder columnDescriptor, int ttlInSeconds) { if (null != compression && !compression.equals(COMPRESSION_DEFAULT)) - compat.setCompression(columnDescriptor, compression); + columnDescriptor.setCompressionType(Compression.Algorithm.valueOf(compression)); if (ttlInSeconds > 0) columnDescriptor.setTimeToLive(ttlInSeconds); @@ -925,7 +954,7 @@ Map, Delete>> convertToCommands(Map getRegionLocations(String tableName) - throws IOException - { - return this.cnx.getRegionLocator(TableName.valueOf(tableName)).getAllRegionLocations(); - } -} diff --git a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HTable1_0.java b/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HTable1_0.java deleted file mode 100644 index 6cc2870afd..0000000000 --- a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/HTable1_0.java +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package org.janusgraph.diskstorage.hbase; - -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Row; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.Table; - -import java.io.IOException; -import java.util.List; - -public class HTable1_0 implements TableMask -{ - private final Table table; - - public HTable1_0(Table table) - { - this.table = table; - } - - @Override - public ResultScanner getScanner(Scan filter) throws IOException - { - return table.getScanner(filter); - } - - @Override - public Result[] get(List gets) throws IOException - { - return table.get(gets); - } - - @Override - public void batch(List writes, Object[] results) throws IOException, InterruptedException - { - table.batch(writes, results); - /* table.flushCommits(); not needed anymore */ - } - - @Override - public void close() throws IOException - { - table.close(); - } -} diff --git a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/TableMask.java b/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/TableMask.java deleted file mode 100644 index 0addc96f25..0000000000 --- a/janusgraph-hbase/src/main/java/org/janusgraph/diskstorage/hbase/TableMask.java +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2017 JanusGraph Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * Copyright DataStax, Inc. - *

- * Please see the included license file for details. - */ -package org.janusgraph.diskstorage.hbase; - -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Row; -import org.apache.hadoop.hbase.client.Scan; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; - -/** - * This interface hides ABI/API breaking changes that HBase has made to its Table/HTableInterface over the course - * of development from 0.94 to 1.0 and beyond. - */ -public interface TableMask extends Closeable -{ - - ResultScanner getScanner(Scan filter) throws IOException; - - Result[] get(List gets) throws IOException; - - void batch(List writes, Object[] results) throws IOException, InterruptedException; - -} diff --git a/janusgraph-hbase/src/main/java/org/janusgraph/hadoop/config/HBaseAuthHelper.java b/janusgraph-hbase/src/main/java/org/janusgraph/hadoop/config/HBaseAuthHelper.java index 32c4755d0f..9d59950ada 100644 --- a/janusgraph-hbase/src/main/java/org/janusgraph/hadoop/config/HBaseAuthHelper.java +++ b/janusgraph-hbase/src/main/java/org/janusgraph/hadoop/config/HBaseAuthHelper.java @@ -19,7 +19,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; import java.lang.reflect.Method; /** @@ -54,7 +53,7 @@ public static Configuration wrapConfiguration(Configuration inner) { return inner; } - public static void setHBaseAuthToken(Configuration configuration, Job job) throws IOException { + public static void setHBaseAuthToken(Configuration configuration, Job job) { // Get HBase authentication token (when configured) String hbaseAuthentication = configuration.get("hbase.security.authentication"); diff --git a/janusgraph-hbase/src/main/java/org/janusgraph/hadoop/formats/hbase/HBaseBinaryInputFormat.java b/janusgraph-hbase/src/main/java/org/janusgraph/hadoop/formats/hbase/HBaseBinaryInputFormat.java index cabb41d585..6977bb9703 100644 --- a/janusgraph-hbase/src/main/java/org/janusgraph/hadoop/formats/hbase/HBaseBinaryInputFormat.java +++ b/janusgraph-hbase/src/main/java/org/janusgraph/hadoop/formats/hbase/HBaseBinaryInputFormat.java @@ -52,12 +52,12 @@ public class HBaseBinaryInputFormat extends AbstractBinaryInputFormat { private byte[] edgeStoreFamily; @Override - public List getSplits(final JobContext jobContext) throws IOException, InterruptedException { + public List getSplits(final JobContext jobContext) throws IOException { return this.tableInputFormat.getSplits(jobContext); } @Override - public RecordReader> createRecordReader(final InputSplit inputSplit, final TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException { + public RecordReader> createRecordReader(final InputSplit inputSplit, final TaskAttemptContext taskAttemptContext) throws IOException { tableReader = tableInputFormat.createRecordReader(inputSplit, taskAttemptContext); return new HBaseBinaryRecordReader(tableReader, edgeStoreFamily); } diff --git a/janusgraph-hbase/src/main/java/org/janusgraph/hadoop/formats/hbase/HBaseSnapshotBinaryInputFormat.java b/janusgraph-hbase/src/main/java/org/janusgraph/hadoop/formats/hbase/HBaseSnapshotBinaryInputFormat.java index 08e80b7126..5534b0a79f 100644 --- a/janusgraph-hbase/src/main/java/org/janusgraph/hadoop/formats/hbase/HBaseSnapshotBinaryInputFormat.java +++ b/janusgraph-hbase/src/main/java/org/janusgraph/hadoop/formats/hbase/HBaseSnapshotBinaryInputFormat.java @@ -95,7 +95,7 @@ public List getSplits(final JobContext jobContext) throws IOExceptio } @Override - public RecordReader> createRecordReader(final InputSplit inputSplit, final TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException { + public RecordReader> createRecordReader(final InputSplit inputSplit, final TaskAttemptContext taskAttemptContext) throws IOException { tableReader = tableSnapshotInputFormat.createRecordReader(inputSplit, taskAttemptContext); janusgraphRecordReader = new HBaseBinaryRecordReader(tableReader, edgeStoreFamily); return janusgraphRecordReader; @@ -155,9 +155,4 @@ public void setConf(final Configuration config) { throw new RuntimeException(e); } } - - @Override - public Configuration getConf() { - return super.getConf(); - } } diff --git a/janusgraph-hbase/src/test/java/org/janusgraph/HBaseContainer.java b/janusgraph-hbase/src/test/java/org/janusgraph/HBaseContainer.java index 6f97546c0d..44be2da9c2 100644 --- a/janusgraph-hbase/src/test/java/org/janusgraph/HBaseContainer.java +++ b/janusgraph-hbase/src/test/java/org/janusgraph/HBaseContainer.java @@ -17,15 +17,15 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.janusgraph.core.JanusGraphException; import org.janusgraph.diskstorage.BackendException; import org.janusgraph.diskstorage.TemporaryBackendException; import org.janusgraph.diskstorage.configuration.ModifiableConfiguration; import org.janusgraph.diskstorage.configuration.WriteConfiguration; -import org.janusgraph.diskstorage.hbase.AdminMask; -import org.janusgraph.diskstorage.hbase.ConnectionMask; -import org.janusgraph.diskstorage.hbase.HBaseCompat; -import org.janusgraph.diskstorage.hbase.HBaseCompatLoader; import org.janusgraph.diskstorage.hbase.HBaseStoreManager; import org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration; import org.janusgraph.graphdb.database.idassigner.placement.SimpleBulkPlacementStrategy; @@ -45,12 +45,11 @@ public class HBaseContainer extends GenericContainer { private static final Logger logger = LoggerFactory.getLogger(HBaseContainer.class); public static final String HBASE_TARGET_DIR = "test.hbase.targetdir"; + public static final String HBASE_DOCKER_PATH = "janusgraph-hbase/docker"; private static final String DEFAULT_VERSION = "2.2.7"; private static final String DEFAULT_UID = "1000"; private static final String DEFAULT_GID = "1000"; - private final HBaseCompat compat; - private static String getVersion() { String property = System.getProperty("hbase.docker.version"); if (StringUtils.isNotEmpty(property)) @@ -58,6 +57,25 @@ private static String getVersion() { return DEFAULT_VERSION; } + private static Path getPath() { + try { + Path path = Paths.get(".").toRealPath(); + if (path.getParent().endsWith("janusgraph")) { + path = Paths.get(path.toString(), "..").toRealPath(); + } + return Paths.get(path.toString(), getRelativePath()); + } catch (IOException ex) { + throw new JanusGraphException(ex); + } + } + + private static String getRelativePath() { + String property = System.getProperty("hbase.docker.path"); + if (StringUtils.isNotEmpty(property)) + return property; + return HBASE_DOCKER_PATH; + } + private static String getUid() { String property = System.getProperty("hbase.docker.uid"); if (StringUtils.isNotEmpty(property)) @@ -85,7 +103,7 @@ public HBaseContainer() { public HBaseContainer(boolean mountRoot) { super(new ImageFromDockerfile() - .withFileFromPath(".", Paths.get("docker")) + .withFileFromPath(".", getPath()) .withBuildArg("HBASE_VERSION", getVersion()) .withBuildArg("HBASE_UID", getUid()) .withBuildArg("HBASE_GID", getGid())); @@ -95,14 +113,14 @@ public HBaseContainer(boolean mountRoot) { addFixedExposedPort(16020, 16020); addFixedExposedPort(16030, 16030); - if (mountRoot){ + if (mountRoot) { try { - Files.createDirectories(getHBaseRootdir()); + Files.createDirectories(getHBaseRootDir()); } catch (IOException e) { logger.warn("failed to create folder", e); throw new JanusGraphException(e); } - addFileSystemBind(getHBaseRootdir().toString(), "/data/hbase", BindMode.READ_WRITE); + addFileSystemBind(getHBaseRootDir().toString(), "/data/hbase", BindMode.READ_WRITE); } withCreateContainerCmdModifier(createContainerCmd -> { @@ -110,17 +128,16 @@ public HBaseContainer(boolean mountRoot) { .withHostName("localhost"); }); waitingFor(Wait.forLogMessage(".*Master has completed initialization.*", 1)); - compat = HBaseCompatLoader.getCompat(null); } - public Path getHBaseRootdir() { + public Path getHBaseRootDir() { return Paths.get(getTargetDir(), "hbase-root"); } - private ConnectionMask createConnectionMask() throws IOException { + private Connection createConnection() throws IOException { Configuration entries = HBaseConfiguration.create(); entries.set("hbase.zookeeper.quorum", "localhost"); - return compat.createConnection(entries); + return ConnectionFactory.createConnection(entries); } /** @@ -132,9 +149,8 @@ private ConnectionMask createConnectionMask() throws IOException { */ public synchronized void createSnapshot(String snapshotName, String table) throws BackendException { - try (ConnectionMask hc = createConnectionMask(); - AdminMask admin = hc.getAdmin()) { - admin.snapshot(snapshotName, table); + try (Connection hc = createConnection(); Admin admin = hc.getAdmin()) { + admin.snapshot(snapshotName, TableName.valueOf(table)); } catch (Exception e) { logger.warn("Create HBase snapshot failed", e); throw new TemporaryBackendException("Create HBase snapshot failed", e); @@ -148,8 +164,7 @@ public synchronized void createSnapshot(String snapshotName, String table) * @throws IOException */ public synchronized void deleteSnapshot(String snapshotName) throws IOException { - try (ConnectionMask hc = createConnectionMask(); - AdminMask admin = hc.getAdmin()) { + try (Connection hc = createConnection(); Admin admin = hc.getAdmin()) { admin.deleteSnapshot(snapshotName); } } diff --git a/janusgraph-hbase/src/test/java/org/janusgraph/diskstorage/hbase/HBaseStoreManagerConfigTest.java b/janusgraph-hbase/src/test/java/org/janusgraph/diskstorage/hbase/HBaseStoreManagerConfigTest.java index aabff82945..c35cb1cc3e 100644 --- a/janusgraph-hbase/src/test/java/org/janusgraph/diskstorage/hbase/HBaseStoreManagerConfigTest.java +++ b/janusgraph-hbase/src/test/java/org/janusgraph/diskstorage/hbase/HBaseStoreManagerConfigTest.java @@ -14,11 +14,8 @@ package org.janusgraph.diskstorage.hbase; -import org.apache.log4j.Appender; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.WriterAppender; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; import org.janusgraph.HBaseContainer; import org.janusgraph.diskstorage.BackendException; import org.janusgraph.diskstorage.PermanentBackendException; @@ -31,6 +28,10 @@ import org.janusgraph.diskstorage.util.time.TimestampProviders; import org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration; import org.janusgraph.graphdb.configuration.builder.GraphDatabaseConfigurationBuilder; +import org.apache.logging.log4j.core.Appender; +import org.apache.logging.log4j.core.appender.WriterAppender; +import org.apache.logging.log4j.core.filter.LevelMatchFilter; +import org.apache.logging.log4j.core.layout.PatternLayout; import org.junit.jupiter.api.Test; import org.testcontainers.junit.jupiter.Container; import org.testcontainers.junit.jupiter.Testcontainers; @@ -48,11 +49,10 @@ public class HBaseStoreManagerConfigTest { @Test public void testShortCfNames() throws Exception { - Logger log = Logger.getLogger(HBaseStoreManager.class); - Level savedLevel = log.getLevel(); - log.setLevel(Level.WARN); + org.apache.logging.log4j.core.Logger log = (org.apache.logging.log4j.core.Logger)LogManager.getLogger(HBaseStoreManager.class); StringWriter writer = new StringWriter(); - Appender appender = new WriterAppender(new PatternLayout("%p: %m%n"), writer); + Appender appender = WriterAppender.createAppender(PatternLayout.newBuilder().withPattern("%p: %m%n").build(), LevelMatchFilter.newBuilder().setLevel(Level.WARN).build(), writer, "test", false, false); + appender.start(); log.addAppender(appender); // Open the HBaseStoreManager and store with default SHORT_CF_NAMES true. @@ -74,7 +74,6 @@ public void testShortCfNames() throws Exception { // Verify we get WARN. assertTrue(writer.toString().startsWith("WARN: Configuration"), writer.toString()); log.removeAppender(appender); - log.setLevel(savedLevel); store.close(); manager.close(); @@ -82,7 +81,7 @@ public void testShortCfNames() throws Exception { @Test // Test HBase preferred timestamp provider MILLI is set by default - public void testHBaseTimestampProvider() throws BackendException { + public void testHBaseTimestampProvider() { // Get an empty configuration // GraphDatabaseConfiguration.buildGraphConfiguration() only build an empty one. ModifiableConfiguration config = GraphDatabaseConfiguration.buildGraphConfiguration(); @@ -109,11 +108,12 @@ public void testHBaseStoragePort() throws BackendException { @Test // Test HBase skip-schema-check config public void testHBaseSkipSchemaCheck() throws Exception { - Logger log = Logger.getLogger(HBaseStoreManager.class); + org.apache.logging.log4j.core.Logger log = (org.apache.logging.log4j.core.Logger)LogManager.getLogger(HBaseStoreManager.class); Level savedLevel = log.getLevel(); log.setLevel(Level.DEBUG); StringWriter writer = new StringWriter(); - Appender appender = new WriterAppender(new PatternLayout("%p: %m%n"), writer); + Appender appender = WriterAppender.createAppender(PatternLayout.newBuilder().withPattern("%p: %m%n").build(), LevelMatchFilter.newBuilder().setLevel(Level.DEBUG).build(), writer, "test", false, false); + appender.start(); log.addAppender(appender); // Open the HBaseStoreManager with default skip-schema-check false. diff --git a/janusgraph-hbase/src/test/java/org/janusgraph/diskstorage/hbase/HBaseStoreManagerMutationTest.java b/janusgraph-hbase/src/test/java/org/janusgraph/diskstorage/hbase/HBaseStoreManagerMutationTest.java index 12a0276b2c..4c754f2b73 100644 --- a/janusgraph-hbase/src/test/java/org/janusgraph/diskstorage/hbase/HBaseStoreManagerMutationTest.java +++ b/janusgraph-hbase/src/test/java/org/janusgraph/diskstorage/hbase/HBaseStoreManagerMutationTest.java @@ -76,14 +76,13 @@ public void testKCVMutationToPuts() throws Exception { if (row == 1) { expectedColumnsWithTTL.add((long) i); } - additions.add(e); } else { // Collect the columns without TTL. Only do this for one row if (row == 1) { expectedColumnsWithoutTTL.add((long) i); } - additions.add(e); } + additions.add(e); } // Add one deletion to the row if (row == 1) { diff --git a/janusgraph-hbase/src/test/java/org/janusgraph/graphdb/hbase/HBaseOperationCountingTest.java b/janusgraph-hbase/src/test/java/org/janusgraph/graphdb/hbase/HBaseOperationCountingTest.java index f135e5f4d3..eec5e35bfb 100644 --- a/janusgraph-hbase/src/test/java/org/janusgraph/graphdb/hbase/HBaseOperationCountingTest.java +++ b/janusgraph-hbase/src/test/java/org/janusgraph/graphdb/hbase/HBaseOperationCountingTest.java @@ -18,6 +18,7 @@ import org.janusgraph.diskstorage.configuration.WriteConfiguration; import org.janusgraph.graphdb.JanusGraphOperationCountingTest; import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; import org.testcontainers.junit.jupiter.Container; import org.testcontainers.junit.jupiter.Testcontainers; @@ -36,6 +37,7 @@ public WriteConfiguration getBaseConfiguration() { @Override @Disabled + @Test public void testCacheConcurrency() { //Don't run this test; } diff --git a/janusgraph-hbase/src/test/java/org/janusgraph/hadoop/HBaseSnapshotInputFormatIT.java b/janusgraph-hbase/src/test/java/org/janusgraph/hadoop/HBaseSnapshotInputFormatIT.java index dc0c74163c..7547f2672f 100644 --- a/janusgraph-hbase/src/test/java/org/janusgraph/hadoop/HBaseSnapshotInputFormatIT.java +++ b/janusgraph-hbase/src/test/java/org/janusgraph/hadoop/HBaseSnapshotInputFormatIT.java @@ -305,7 +305,7 @@ protected Graph getGraph() throws IOException, ConfigurationException { config.setProperty("gremlin.hadoop.outputLocation", outDir); // Set the hbase.rootdir property. This is needed by HBaseSnapshotInputFormat. config.setProperty("janusgraphmr.ioformat.conf.storage.hbase.ext.hbase.rootdir", - hBaseContainer.getHBaseRootdir().toString()); + hBaseContainer.getHBaseRootDir().toString()); return GraphFactory.open(config); } diff --git a/janusgraph-hbase/src/test/resources/log4j.properties b/janusgraph-hbase/src/test/resources/log4j.properties deleted file mode 100644 index b8f0e78f14..0000000000 --- a/janusgraph-hbase/src/test/resources/log4j.properties +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A1 is set to be a FileAppender. -log4j.appender.A1=org.apache.log4j.FileAppender -log4j.appender.A1.File=target/test.log -log4j.appender.A1.Threshold=ALL -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c{2}: %m%n - -# A2 is a ConsoleAppender. -log4j.appender.A2=org.apache.log4j.ConsoleAppender -log4j.appender.A2.Threshold=ALL -# A2 uses PatternLayout. -log4j.appender.A2.layout=org.apache.log4j.PatternLayout -log4j.appender.A2.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c{2}: %m%n - -# Set both appenders (A1 and A2) on the root logger. -#log4j.rootLogger=INFO, A1, A2 -log4j.rootLogger=ERROR, A1 - diff --git a/janusgraph-hbase/src/test/resources/log4j2-test.xml b/janusgraph-hbase/src/test/resources/log4j2-test.xml new file mode 100644 index 0000000000..3872de4332 --- /dev/null +++ b/janusgraph-hbase/src/test/resources/log4j2-test.xml @@ -0,0 +1,15 @@ + + + + + + %d{HH:mm:ss} %-5level %class.%method{36} - %msg%n + + + + + + + + + \ No newline at end of file diff --git a/janusgraph-inmemory/pom.xml b/janusgraph-inmemory/pom.xml index 1d52f57c19..5482a627b4 100644 --- a/janusgraph-inmemory/pom.xml +++ b/janusgraph-inmemory/pom.xml @@ -4,7 +4,7 @@ org.janusgraph janusgraph - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT ../pom.xml janusgraph-inmemory @@ -21,6 +21,7 @@ janusgraph-core ${project.version} + org.janusgraph janusgraph-backend-testutils diff --git a/janusgraph-inmemory/src/test/resources/log4j.properties b/janusgraph-inmemory/src/test/resources/log4j.properties deleted file mode 100644 index d5467091fe..0000000000 --- a/janusgraph-inmemory/src/test/resources/log4j.properties +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A1 is set to be a FileAppender. -#log4j.appender.A1=org.apache.log4j.ConsoleAppender -log4j.appender.A1=org.apache.log4j.FileAppender -log4j.appender.A1.File=target/test.log - -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n - -# Set root logger level to the designated level and its only appender to A1. -log4j.rootLogger=DEBUG, A1 diff --git a/janusgraph-inmemory/src/test/resources/log4j2-test.xml b/janusgraph-inmemory/src/test/resources/log4j2-test.xml new file mode 100644 index 0000000000..8f0c992a9f --- /dev/null +++ b/janusgraph-inmemory/src/test/resources/log4j2-test.xml @@ -0,0 +1,15 @@ + + + + + + %d{HH:mm:ss} %-5level %class.%method{36} - %msg%n + + + + + + + + + \ No newline at end of file diff --git a/janusgraph-lucene/pom.xml b/janusgraph-lucene/pom.xml index f142cf47e6..77c8a8c2a6 100644 --- a/janusgraph-lucene/pom.xml +++ b/janusgraph-lucene/pom.xml @@ -3,7 +3,7 @@ org.janusgraph janusgraph - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT ../pom.xml janusgraph-lucene diff --git a/janusgraph-lucene/src/test/resources/log4j.properties b/janusgraph-lucene/src/test/resources/log4j.properties deleted file mode 100644 index e1aa7a5bde..0000000000 --- a/janusgraph-lucene/src/test/resources/log4j.properties +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A1 is a FileAppender. -log4j.appender.A1=org.apache.log4j.FileAppender -log4j.appender.A1.File=target/test.log -log4j.appender.A1.Threshold=ALL -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c{2}: %m%n - -# A2 is a ConsoleAppender. -log4j.appender.A2=org.apache.log4j.ConsoleAppender -log4j.appender.A2.Threshold=ALL -# A2 uses PatternLayout. -log4j.appender.A2.layout=org.apache.log4j.PatternLayout -log4j.appender.A2.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c{2}: %m%n - -# Set both appenders (A1 and A2) on the root logger. -#log4j.rootLogger=INFO, A1, A2 -log4j.rootLogger=ERROR, A1 - diff --git a/janusgraph-lucene/src/test/resources/log4j2-test.xml b/janusgraph-lucene/src/test/resources/log4j2-test.xml new file mode 100644 index 0000000000..8f0c992a9f --- /dev/null +++ b/janusgraph-lucene/src/test/resources/log4j2-test.xml @@ -0,0 +1,15 @@ + + + + + + %d{HH:mm:ss} %-5level %class.%method{36} - %msg%n + + + + + + + + + \ No newline at end of file diff --git a/janusgraph-server/pom.xml b/janusgraph-server/pom.xml index f8e52fa6d9..658a379266 100644 --- a/janusgraph-server/pom.xml +++ b/janusgraph-server/pom.xml @@ -4,7 +4,7 @@ org.janusgraph janusgraph - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT janusgraph-server JanusGraph-Server: Server Components for JanusGraph @@ -81,6 +81,10 @@ junit junit + + io.grpc + grpc-context + diff --git a/janusgraph-server/src/main/java/org/janusgraph/graphdb/grpc/schema/util/GrpcUtils.java b/janusgraph-server/src/main/java/org/janusgraph/graphdb/grpc/schema/util/GrpcUtils.java index c512a95c92..9ca6a4fa43 100644 --- a/janusgraph-server/src/main/java/org/janusgraph/graphdb/grpc/schema/util/GrpcUtils.java +++ b/janusgraph-server/src/main/java/org/janusgraph/graphdb/grpc/schema/util/GrpcUtils.java @@ -15,32 +15,149 @@ package org.janusgraph.graphdb.grpc.schema.util; import com.google.protobuf.Int64Value; +import org.janusgraph.core.Cardinality; import org.janusgraph.core.Multiplicity; +import org.janusgraph.core.PropertyKey; +import org.janusgraph.core.attribute.Geoshape; import org.janusgraph.graphdb.grpc.types.EdgeLabel; +import org.janusgraph.graphdb.grpc.types.PropertyDataType; import org.janusgraph.graphdb.grpc.types.VertexLabel; +import org.janusgraph.graphdb.grpc.types.VertexProperty; + +import java.util.Collection; +import java.util.Date; +import java.util.UUID; public class GrpcUtils { public static VertexLabel createVertexLabelProto(org.janusgraph.core.VertexLabel vertexLabel) { - return VertexLabel.newBuilder() + VertexLabel.Builder builder = VertexLabel.newBuilder() .setId(Int64Value.of(vertexLabel.longId()))// TODO: we have to check that id is permanent .setName(vertexLabel.name()) .setPartitioned(vertexLabel.isPartitioned()) - .setReadOnly(vertexLabel.isStatic()) + .setReadOnly(vertexLabel.isStatic()); + Collection propertyKeys = vertexLabel.mappedProperties(); + for (PropertyKey propertyKey : propertyKeys) { + builder.addProperties(createVertexPropertyProto(propertyKey)); + } + return builder.build(); + } + + public static Cardinality convertGrpcCardinality(VertexProperty.Cardinality cardinality) { + switch (cardinality) { + case CARDINALITY_LIST: + return Cardinality.LIST; + case CARDINALITY_SET: + return Cardinality.SET; + case CARDINALITY_SINGLE: + default: + return Cardinality.SINGLE; + } + } + + public static VertexProperty.Cardinality convertToGrpcCardinality(Cardinality cardinality) { + switch (cardinality) { + case LIST: + return VertexProperty.Cardinality.CARDINALITY_LIST; + case SET: + return VertexProperty.Cardinality.CARDINALITY_SET; + case SINGLE: + default: + return VertexProperty.Cardinality.CARDINALITY_SINGLE; + } + } + + public static Class convertGrpcPropertyDataType(PropertyDataType propertyDataType) { + switch (propertyDataType) { + case PROPERTY_DATA_TYPE_CHARACTER: + return Character.class; + case PROPERTY_DATA_TYPE_BOOLEAN: + return Boolean.class; + case PROPERTY_DATA_TYPE_INT8: + return Byte.class; + case PROPERTY_DATA_TYPE_INT16: + return Short.class; + case PROPERTY_DATA_TYPE_INT32: + return Integer.class; + case PROPERTY_DATA_TYPE_INT64: + return Long.class; + case PROPERTY_DATA_TYPE_FLOAT32: + return Float.class; + case PROPERTY_DATA_TYPE_FLOAT64: + return Double.class; + case PROPERTY_DATA_TYPE_DATE: + return Date.class; + case PROPERTY_DATA_TYPE_GEO_SHAPE: + return Geoshape.class; + case PROPERTY_DATA_TYPE_STRING: + return String.class; + case PROPERTY_DATA_TYPE_UUID: + return UUID.class; + case PROPERTY_DATA_TYPE_JAVA_OBJECT: + default: + return Object.class; + } + } + + public static PropertyDataType convertToGrpcPropertyDataType(Class propertyDataType) { + if (propertyDataType == Boolean.class) { + return PropertyDataType.PROPERTY_DATA_TYPE_BOOLEAN; + } + if (propertyDataType == Character.class) { + return PropertyDataType.PROPERTY_DATA_TYPE_CHARACTER; + } + if (propertyDataType == Byte.class) { + return PropertyDataType.PROPERTY_DATA_TYPE_INT8; + } + if (propertyDataType == Short.class) { + return PropertyDataType.PROPERTY_DATA_TYPE_INT16; + } + if (propertyDataType == Integer.class) { + return PropertyDataType.PROPERTY_DATA_TYPE_INT32; + } + if (propertyDataType == Long.class) { + return PropertyDataType.PROPERTY_DATA_TYPE_INT64; + } + if (propertyDataType == Float.class) { + return PropertyDataType.PROPERTY_DATA_TYPE_FLOAT32; + } + if (propertyDataType == Double.class) { + return PropertyDataType.PROPERTY_DATA_TYPE_FLOAT64; + } + if (propertyDataType == Date.class) { + return PropertyDataType.PROPERTY_DATA_TYPE_DATE; + } + if (propertyDataType == Geoshape.class) { + return PropertyDataType.PROPERTY_DATA_TYPE_GEO_SHAPE; + } + if (propertyDataType == String.class) { + return PropertyDataType.PROPERTY_DATA_TYPE_STRING; + } + if (propertyDataType == UUID.class) { + return PropertyDataType.PROPERTY_DATA_TYPE_UUID; + } + return PropertyDataType.PROPERTY_DATA_TYPE_JAVA_OBJECT; + } + + private static VertexProperty createVertexPropertyProto(PropertyKey propertyKey) { + return VertexProperty.newBuilder() + .setDataType(convertToGrpcPropertyDataType(propertyKey.dataType())) + .setCardinality(convertToGrpcCardinality(propertyKey.cardinality())) + .setName(propertyKey.name()) .build(); } public static Multiplicity convertGrpcEdgeMultiplicity(EdgeLabel.Multiplicity multiplicity) { switch (multiplicity) { - case MANY2ONE: + case MULTIPLICITY_MANY2ONE: return Multiplicity.MANY2ONE; - case ONE2MANY: + case MULTIPLICITY_ONE2MANY: return Multiplicity.ONE2MANY; - case ONE2ONE: + case MULTIPLICITY_ONE2ONE: return Multiplicity.ONE2ONE; - case SIMPLE: + case MULTIPLICITY_SIMPLE: return Multiplicity.SIMPLE; - case MULTI: + case MULTIPLICITY_MULTI: default: return Multiplicity.MULTI; } @@ -49,16 +166,16 @@ public static Multiplicity convertGrpcEdgeMultiplicity(EdgeLabel.Multiplicity mu public static EdgeLabel.Multiplicity convertToGrpcMultiplicity(Multiplicity multiplicity) { switch (multiplicity) { case SIMPLE: - return EdgeLabel.Multiplicity.SIMPLE; + return EdgeLabel.Multiplicity.MULTIPLICITY_SIMPLE; case ONE2MANY: - return EdgeLabel.Multiplicity.ONE2MANY; + return EdgeLabel.Multiplicity.MULTIPLICITY_ONE2MANY; case MANY2ONE: - return EdgeLabel.Multiplicity.MANY2ONE; + return EdgeLabel.Multiplicity.MULTIPLICITY_MANY2ONE; case ONE2ONE: - return EdgeLabel.Multiplicity.ONE2ONE; + return EdgeLabel.Multiplicity.MULTIPLICITY_ONE2ONE; case MULTI: default: - return EdgeLabel.Multiplicity.MULTI; + return EdgeLabel.Multiplicity.MULTIPLICITY_MULTI; } } @@ -67,7 +184,7 @@ public static EdgeLabel createEdgeLabelProto(org.janusgraph.core.EdgeLabel edgeL .setId(Int64Value.of(edgeLabel.longId()))// TODO: we have to check that id is permanent .setName(edgeLabel.name()) .setMultiplicity(convertToGrpcMultiplicity(edgeLabel.multiplicity())) - .setDirection(edgeLabel.isDirected() ? EdgeLabel.Direction.BOTH : EdgeLabel.Direction.OUT) + .setDirection(edgeLabel.isDirected() ? EdgeLabel.Direction.DIRECTION_BOTH : EdgeLabel.Direction.DIRECTION_OUT) .build(); } } diff --git a/janusgraph-server/src/test/java/org/janusgraph/graphdb/grpc/JanusGraphGrpcServerBaseTest.java b/janusgraph-server/src/test/java/org/janusgraph/graphdb/grpc/JanusGraphGrpcServerBaseTest.java index e3c7aa4442..42866e3153 100644 --- a/janusgraph-server/src/test/java/org/janusgraph/graphdb/grpc/JanusGraphGrpcServerBaseTest.java +++ b/janusgraph-server/src/test/java/org/janusgraph/graphdb/grpc/JanusGraphGrpcServerBaseTest.java @@ -22,15 +22,18 @@ import org.apache.tinkerpop.gremlin.server.Settings; import org.apache.tinkerpop.gremlin.server.util.DefaultGraphManager; import org.janusgraph.core.JanusGraph; +import org.janusgraph.core.PropertyKey; import org.janusgraph.core.VertexLabel; import org.janusgraph.core.schema.EdgeLabelMaker; import org.janusgraph.core.schema.JanusGraphManagement; +import org.janusgraph.core.schema.PropertyKeyMaker; import org.janusgraph.core.schema.VertexLabelMaker; import org.janusgraph.graphdb.grpc.schema.SchemaManagerImpl; import org.janusgraph.graphdb.grpc.schema.util.GrpcUtils; import org.janusgraph.graphdb.grpc.types.EdgeLabel; import org.janusgraph.graphdb.grpc.types.EdgeLabelOrBuilder; import org.janusgraph.graphdb.grpc.types.VertexLabelOrBuilder; +import org.janusgraph.graphdb.grpc.types.VertexProperty; import org.janusgraph.graphdb.server.TestingServerClosable; import org.javatuples.Pair; import org.junit.jupiter.api.AfterEach; @@ -56,28 +59,36 @@ protected static GraphManager getGraphManager() { return new DefaultGraphManager(settings); } - public long createVertexLabel(String graph, VertexLabelOrBuilder vertexLabel) { + public long createVertexLabel(String graph, VertexLabelOrBuilder builder) { JanusGraphManagement management = ((JanusGraph) graphManager.getGraph(graph)).openManagement(); - VertexLabelMaker vertexLabelMaker = management.makeVertexLabel(vertexLabel.getName()); - if (vertexLabel.getReadOnly()) { + VertexLabelMaker vertexLabelMaker = management.makeVertexLabel(builder.getName()); + if (builder.getReadOnly()) { vertexLabelMaker.setStatic(); } - if (vertexLabel.getPartitioned()) { + if (builder.getPartitioned()) { vertexLabelMaker.partition(); } - VertexLabel createdVertexLabel = vertexLabelMaker.make(); + VertexLabel vertexLabel = vertexLabelMaker.make(); + for (VertexProperty vertexProperty : builder.getPropertiesList()) { + PropertyKeyMaker propertyKeyMaker = management.makePropertyKey(vertexProperty.getName()); + PropertyKey propertyKey = propertyKeyMaker + .cardinality(GrpcUtils.convertGrpcCardinality(vertexProperty.getCardinality())) + .dataType(GrpcUtils.convertGrpcPropertyDataType(vertexProperty.getDataType())) + .make(); + management.addProperties(vertexLabel, propertyKey); + } management.commit(); - return createdVertexLabel.longId(); + return vertexLabel.longId(); } public long createEdgeLabel(String graph, EdgeLabelOrBuilder edgeLabel) { JanusGraphManagement management = ((JanusGraph) graphManager.getGraph(graph)).openManagement(); EdgeLabelMaker edgeLabelMaker = management.makeEdgeLabel(edgeLabel.getName()); - if (edgeLabel.getDirection() == EdgeLabel.Direction.BOTH) { - edgeLabelMaker.directed(); - } else { + if (edgeLabel.getDirection() == EdgeLabel.Direction.DIRECTION_OUT) { edgeLabelMaker.unidirected(); + } else { + edgeLabelMaker.directed(); } edgeLabelMaker.multiplicity(GrpcUtils.convertGrpcEdgeMultiplicity(edgeLabel.getMultiplicity())); org.janusgraph.core.EdgeLabel createdEdgeLabel = edgeLabelMaker.make(); diff --git a/janusgraph-server/src/test/java/org/janusgraph/graphdb/grpc/schema/SchemaManagerClientTest.java b/janusgraph-server/src/test/java/org/janusgraph/graphdb/grpc/schema/SchemaManagerClientTest.java index 2d6c6b116c..c92a110fba 100644 --- a/janusgraph-server/src/test/java/org/janusgraph/graphdb/grpc/schema/SchemaManagerClientTest.java +++ b/janusgraph-server/src/test/java/org/janusgraph/graphdb/grpc/schema/SchemaManagerClientTest.java @@ -20,7 +20,9 @@ import org.janusgraph.graphdb.grpc.JanusGraphManagerClient; import org.janusgraph.graphdb.grpc.types.EdgeLabel; import org.janusgraph.graphdb.grpc.types.JanusGraphContext; +import org.janusgraph.graphdb.grpc.types.PropertyDataType; import org.janusgraph.graphdb.grpc.types.VertexLabel; +import org.janusgraph.graphdb.grpc.types.VertexProperty; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; @@ -31,6 +33,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.params.provider.EnumSource.Mode.EXCLUDE; @@ -145,6 +148,103 @@ public void testGetVertexLabels(int numberOfVertices) { } } + @ParameterizedTest + @ValueSource(strings = {"test", "test2"}) + public void testGetVertexLabelByNameVertexLabelWithVertexProperty(String propertyName) { + final String vertexLabelName = "testVertexProperty"; + SchemaManagerClient schemaManagerClient = new SchemaManagerClient(getDefaultContext(), managedChannel); + + //create property + VertexProperty test = VertexProperty.newBuilder() + .setName(propertyName) + .setDataType(PropertyDataType.PROPERTY_DATA_TYPE_BOOLEAN) + .setCardinality(VertexProperty.Cardinality.CARDINALITY_SINGLE) + .build(); + //create vertex + createVertexLabel(defaultGraphName, VertexLabel.newBuilder() + .setName(vertexLabelName) + .addProperties(test)); + + VertexLabel vertexLabel = schemaManagerClient.getVertexLabelByName(vertexLabelName); + + VertexProperty property = vertexLabel.getProperties(0); + assertEquals(PropertyDataType.PROPERTY_DATA_TYPE_BOOLEAN, property.getDataType()); + assertEquals(VertexProperty.Cardinality.CARDINALITY_SINGLE, property.getCardinality()); + assertEquals(propertyName, property.getName()); + assertNotEquals(0, property.getId()); + } + + @ParameterizedTest + @EnumSource(value = PropertyDataType.class, mode = EXCLUDE, names = { "PROPERTY_DATA_TYPE_UNSPECIFIED", "UNRECOGNIZED" }) + public void testGetVertexLabelByNameVertexLabelWithVertexProperty(PropertyDataType propertyDataType) { + final String name = "testPropertyType"; + SchemaManagerClient schemaManagerClient = new SchemaManagerClient(getDefaultContext(), managedChannel); + + //create property + VertexProperty test = VertexProperty.newBuilder() + .setName(name) + .setDataType(propertyDataType) + .build(); + //create vertex + createVertexLabel(defaultGraphName, VertexLabel.newBuilder() + .setName(name) + .addProperties(test)); + + VertexLabel vertexLabel = schemaManagerClient.getVertexLabelByName(name); + + VertexProperty property = vertexLabel.getProperties(0); + assertEquals(propertyDataType, property.getDataType()); + assertEquals(name, property.getName()); + } + + @ParameterizedTest + @EnumSource(value = VertexProperty.Cardinality.class, mode = EXCLUDE, names = { "CARDINALITY_UNSPECIFIED", "UNRECOGNIZED" }) + public void testGetVertexLabelByNameVertexLabelWithVertexProperty(VertexProperty.Cardinality cardinality) { + final String name = "testPropertyCardinality"; + SchemaManagerClient schemaManagerClient = new SchemaManagerClient(getDefaultContext(), managedChannel); + + //create property + VertexProperty test = VertexProperty.newBuilder() + .setName(name) + .setDataType(PropertyDataType.PROPERTY_DATA_TYPE_BOOLEAN) + .setCardinality(cardinality) + .build(); + //create vertex + createVertexLabel(defaultGraphName, VertexLabel.newBuilder() + .setName(name) + .addProperties(test)); + + VertexLabel vertexLabel = schemaManagerClient.getVertexLabelByName(name); + + VertexProperty property = vertexLabel.getProperties(0); + assertEquals(cardinality, property.getCardinality()); + assertEquals(name, property.getName()); + } + + @ParameterizedTest + @ValueSource(ints = {1, 4, 8, 16}) + public void testGetVertexLabelByNameVertexLabelWithMultipleVertexProperties(int numberOfProperties) { + final String vertexLabelName = "testMultipleVertexProperties"; + SchemaManagerClient schemaManagerClient = new SchemaManagerClient(getDefaultContext(), managedChannel); + + VertexLabel.Builder builder = VertexLabel.newBuilder() + .setName(vertexLabelName); + + for (int i = 0; i < numberOfProperties; i++) { + VertexProperty test = VertexProperty.newBuilder() + .setName("test"+i) + .setDataType(PropertyDataType.PROPERTY_DATA_TYPE_BOOLEAN) + .setCardinality(VertexProperty.Cardinality.CARDINALITY_SINGLE) + .build(); + builder.addProperties(test); + } + createVertexLabel(defaultGraphName, builder); + + VertexLabel vertexLabel = schemaManagerClient.getVertexLabelByName(vertexLabelName); + + assertEquals(numberOfProperties, vertexLabel.getPropertiesCount()); + } + @Test public void testGetEdgeLabelByNameNotFound() { SchemaManagerClient schemaManagerClient = new SchemaManagerClient(getDefaultContext(), managedChannel); @@ -182,13 +282,13 @@ public void testGetEdgeLabelByNameEdgeLabelExists(String edgeLabelName) { EdgeLabel edgeLabel = schemaManagerClient.getEdgeLabelByName(edgeLabelName); assertEquals(edgeLabelName, edgeLabel.getName()); - assertEquals(EdgeLabel.Direction.BOTH, edgeLabel.getDirection()); - assertEquals(EdgeLabel.Multiplicity.MULTI, edgeLabel.getMultiplicity()); + assertEquals(EdgeLabel.Direction.DIRECTION_BOTH, edgeLabel.getDirection()); + assertEquals(EdgeLabel.Multiplicity.MULTIPLICITY_MULTI, edgeLabel.getMultiplicity()); assertEquals(id, edgeLabel.getId().getValue()); } @ParameterizedTest - @EnumSource(mode = EXCLUDE, names = {"UNRECOGNIZED"}) + @EnumSource(value = EdgeLabel.Multiplicity.class, mode = EXCLUDE, names = {"MULTIPLICITY_UNSPECIFIED", "UNRECOGNIZED"}) public void testGetEdgeLabelByNameWithDefinedMultiplicity(EdgeLabel.Multiplicity multiplicity) { final String edgeLabelName = "testMultiplicity"; SchemaManagerClient schemaManagerClient = new SchemaManagerClient(getDefaultContext(), managedChannel); @@ -196,7 +296,7 @@ public void testGetEdgeLabelByNameWithDefinedMultiplicity(EdgeLabel.Multiplicity //create edge createEdgeLabel(defaultGraphName, EdgeLabel.newBuilder() .setName(edgeLabelName) - .setDirection(EdgeLabel.Direction.BOTH) + .setDirection(EdgeLabel.Direction.DIRECTION_BOTH) .setMultiplicity(multiplicity)); EdgeLabel edgeLabel = schemaManagerClient.getEdgeLabelByName(edgeLabelName); @@ -205,7 +305,7 @@ public void testGetEdgeLabelByNameWithDefinedMultiplicity(EdgeLabel.Multiplicity } @ParameterizedTest - @EnumSource(mode = EXCLUDE, names = {"UNRECOGNIZED"}) + @EnumSource(value = EdgeLabel.Direction.class, mode = EXCLUDE, names = {"DIRECTION_UNSPECIFIED", "UNRECOGNIZED"}) public void testGetEdgeLabelByNameWithDefinedDirection(EdgeLabel.Direction direction) { final String edgeLabelName = "testDirection"; SchemaManagerClient schemaManagerClient = new SchemaManagerClient(getDefaultContext(), managedChannel); diff --git a/janusgraph-server/src/test/resources/log4j.properties b/janusgraph-server/src/test/resources/log4j.properties deleted file mode 100644 index b8f0e78f14..0000000000 --- a/janusgraph-server/src/test/resources/log4j.properties +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A1 is set to be a FileAppender. -log4j.appender.A1=org.apache.log4j.FileAppender -log4j.appender.A1.File=target/test.log -log4j.appender.A1.Threshold=ALL -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c{2}: %m%n - -# A2 is a ConsoleAppender. -log4j.appender.A2=org.apache.log4j.ConsoleAppender -log4j.appender.A2.Threshold=ALL -# A2 uses PatternLayout. -log4j.appender.A2.layout=org.apache.log4j.PatternLayout -log4j.appender.A2.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c{2}: %m%n - -# Set both appenders (A1 and A2) on the root logger. -#log4j.rootLogger=INFO, A1, A2 -log4j.rootLogger=ERROR, A1 - diff --git a/janusgraph-server/src/test/resources/log4j2-test.xml b/janusgraph-server/src/test/resources/log4j2-test.xml new file mode 100644 index 0000000000..8f0c992a9f --- /dev/null +++ b/janusgraph-server/src/test/resources/log4j2-test.xml @@ -0,0 +1,15 @@ + + + + + + %d{HH:mm:ss} %-5level %class.%method{36} - %msg%n + + + + + + + + + \ No newline at end of file diff --git a/janusgraph-solr/pom.xml b/janusgraph-solr/pom.xml index 568c209f40..15260c296d 100644 --- a/janusgraph-solr/pom.xml +++ b/janusgraph-solr/pom.xml @@ -3,7 +3,7 @@ org.janusgraph janusgraph - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT ../pom.xml janusgraph-solr @@ -11,8 +11,7 @@ https://janusgraph.org ${basedir}/.. - 7.7.2 - 8.9.0 + 8.11.1 ${solr8.docker.version} solr ${skipTests} @@ -67,13 +66,16 @@ - org.slf4j - slf4j-log4j12 + org.apache.logging.log4j + log4j-slf4j-impl + runtime true ch.qos.logback logback-classic + runtime + true @@ -100,6 +102,10 @@ ${lucene-solr.version} test + + org.apache.logging.log4j + log4j-api + org.apache.kerby kerb-core @@ -148,6 +154,11 @@ org.ow2.asm asm-commons + + + org.apiguardian + apiguardian-api + @@ -266,12 +277,6 @@ - - solr7 - - ${solr7.docker.version} - - solr8 diff --git a/janusgraph-solr/src/test/java/org/janusgraph/diskstorage/solr/JanusGraphSolrContainer.java b/janusgraph-solr/src/test/java/org/janusgraph/diskstorage/solr/JanusGraphSolrContainer.java index f317f9f174..d16c3e2b3f 100644 --- a/janusgraph-solr/src/test/java/org/janusgraph/diskstorage/solr/JanusGraphSolrContainer.java +++ b/janusgraph-solr/src/test/java/org/janusgraph/diskstorage/solr/JanusGraphSolrContainer.java @@ -28,7 +28,7 @@ import static org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration.INDEX_BACKEND; public class JanusGraphSolrContainer extends SolrContainer { - private static final String DEFAULT_SOLR_VERSION = "8.9.0"; + private static final String DEFAULT_SOLR_VERSION = "8.11.1"; private static final String DEFAULT_SOLR_IMAGE = "solr"; private static final String COLLECTIONS = "store1 store2 vertex edge namev namee composite psearch esearch vsearch mi mixed index1 index2 index3 ecategory vcategory pcategory theIndex vertices edges booleanIndex dateIndex instantIndex uuidIndex randomMixedIndex collectionIndex nameidx oridx otheridx lengthidx"; diff --git a/janusgraph-solr/src/test/resources/log4j.properties b/janusgraph-solr/src/test/resources/log4j.properties deleted file mode 100644 index b8f0e78f14..0000000000 --- a/janusgraph-solr/src/test/resources/log4j.properties +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A1 is set to be a FileAppender. -log4j.appender.A1=org.apache.log4j.FileAppender -log4j.appender.A1.File=target/test.log -log4j.appender.A1.Threshold=ALL -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c{2}: %m%n - -# A2 is a ConsoleAppender. -log4j.appender.A2=org.apache.log4j.ConsoleAppender -log4j.appender.A2.Threshold=ALL -# A2 uses PatternLayout. -log4j.appender.A2.layout=org.apache.log4j.PatternLayout -log4j.appender.A2.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5p %c{2}: %m%n - -# Set both appenders (A1 and A2) on the root logger. -#log4j.rootLogger=INFO, A1, A2 -log4j.rootLogger=ERROR, A1 - diff --git a/janusgraph-solr/src/test/resources/log4j2-test.xml b/janusgraph-solr/src/test/resources/log4j2-test.xml new file mode 100644 index 0000000000..8f0c992a9f --- /dev/null +++ b/janusgraph-solr/src/test/resources/log4j2-test.xml @@ -0,0 +1,15 @@ + + + + + + %d{HH:mm:ss} %-5level %class.%method{36} - %msg%n + + + + + + + + + \ No newline at end of file diff --git a/janusgraph-solr/src/test/resources/solr/core-template/solrconfig.xml b/janusgraph-solr/src/test/resources/solr/core-template/solrconfig.xml index 317f68dea3..6bceb661ca 100644 --- a/janusgraph-solr/src/test/resources/solr/core-template/solrconfig.xml +++ b/janusgraph-solr/src/test/resources/solr/core-template/solrconfig.xml @@ -35,7 +35,7 @@ that you fully re-index after changing this setting as it can affect both how text is indexed and queried. --> - 7.0.0 + 8.0.0 - org.slf4j - slf4j-log4j12 + org.apache.logging.log4j + log4j-slf4j-impl + runtime + true + + + ch.qos.logback + logback-classic + runtime true @@ -69,18 +76,6 @@ org.locationtech.jts jts-core - - org.openjdk.jmh - jmh-core - ${jmh.version} - test - - - org.openjdk.jmh - jmh-generator-annprocess - ${jmh.version} - test - diff --git a/janusgraph-test/src/test/java/org/janusgraph/diskstorage/cache/ExpirationCacheTest.java b/janusgraph-test/src/test/java/org/janusgraph/diskstorage/cache/ExpirationCacheTest.java index 9492c077ca..6cbe9e119b 100644 --- a/janusgraph-test/src/test/java/org/janusgraph/diskstorage/cache/ExpirationCacheTest.java +++ b/janusgraph-test/src/test/java/org/janusgraph/diskstorage/cache/ExpirationCacheTest.java @@ -15,6 +15,7 @@ package org.janusgraph.diskstorage.cache; import com.google.common.collect.Lists; +import io.github.artsok.RepeatedIfExceptionsTest; import org.janusgraph.diskstorage.EntryList; import org.janusgraph.diskstorage.StaticBuffer; import org.janusgraph.diskstorage.keycolumnvalue.KeyColumnValueStore; @@ -52,8 +53,7 @@ private static KCVSCache getCache(KeyColumnValueStore store, Duration expiration return new ExpirationKCVSCache(store,METRICS_STRING,expirationTime.toMillis(),graceWait.toMillis(),CACHE_SIZE); } - - @Test + @RepeatedIfExceptionsTest(repeats = 4, minSuccess = 2) public void testExpiration() throws Exception { testExpiration(Duration.ofMillis(200)); testExpiration(Duration.ofSeconds(4)); diff --git a/janusgraph-test/src/test/java/org/janusgraph/diskstorage/common/LocalStoreManagerTest.java b/janusgraph-test/src/test/java/org/janusgraph/diskstorage/common/LocalStoreManagerTest.java index 4f7e326540..407692c885 100644 --- a/janusgraph-test/src/test/java/org/janusgraph/diskstorage/common/LocalStoreManagerTest.java +++ b/janusgraph-test/src/test/java/org/janusgraph/diskstorage/common/LocalStoreManagerTest.java @@ -26,6 +26,7 @@ import org.janusgraph.util.system.ConfigurationUtil; import org.junit.jupiter.api.Test; +import java.io.File; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -98,7 +99,8 @@ public void directoryShouldEqualSuppliedDirectory() throws BackendException { final Map map = getBaseConfigurationMap(); map.put(STORAGE_DIRECTORY, "specific/absolute/directory"); final LocalStoreManager mgr = getStoreManager(map); - assertEquals("specific/absolute/directory", mgr.directory.getPath()); + File expectedDirectory = new File("specific/absolute/directory"); + assertEquals(expectedDirectory.getPath(), mgr.directory.getPath()); } @Test @@ -107,7 +109,8 @@ public void directoryShouldEqualStorageRootPlusGraphName() throws BackendExcepti map.put(STORAGE_ROOT, "temp/root"); map.put(GRAPH_NAME, "randomGraphName"); final LocalStoreManager mgr = getStoreManager(map); - assertEquals("temp/root/randomGraphName", mgr.directory.getPath()); + File expectedFile = new File("temp/root/randomGraphName"); + assertEquals(expectedFile.getPath(), mgr.directory.getPath()); } @Test diff --git a/janusgraph-test/src/test/java/org/janusgraph/diskstorage/keycolumnvalue/scan/CompletedJobFutureTest.java b/janusgraph-test/src/test/java/org/janusgraph/diskstorage/keycolumnvalue/scan/CompletedJobFutureTest.java new file mode 100644 index 0000000000..afd469d99c --- /dev/null +++ b/janusgraph-test/src/test/java/org/janusgraph/diskstorage/keycolumnvalue/scan/CompletedJobFutureTest.java @@ -0,0 +1,60 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph.diskstorage.keycolumnvalue.scan; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.ExecutionException; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class CompletedJobFutureTest { + private StandardScanMetrics metrics; + private CompletedJobFuture future; + + @BeforeEach + public void setUp() { + metrics = new StandardScanMetrics(); + future = new CompletedJobFuture(metrics); + } + + @Test + public void testGetIntermediateResult() { + assertEquals(metrics, future.getIntermediateResult()); + } + + @Test + public void testGet() throws ExecutionException, InterruptedException { + assertEquals(metrics, future.get()); + } + + @Test + public void testCancel() { + assertFalse(future.cancel(true)); + } + + @Test + public void testIsCancelled() { + assertFalse(future.isCancelled()); + } + + @Test + public void testIsDone() { + assertTrue(future.isDone()); + } +} diff --git a/janusgraph-test/src/test/java/org/janusgraph/diskstorage/keycolumnvalue/scan/EmptyScanJobFutureTest.java b/janusgraph-test/src/test/java/org/janusgraph/diskstorage/keycolumnvalue/scan/EmptyScanJobFutureTest.java new file mode 100644 index 0000000000..d333c2a451 --- /dev/null +++ b/janusgraph-test/src/test/java/org/janusgraph/diskstorage/keycolumnvalue/scan/EmptyScanJobFutureTest.java @@ -0,0 +1,58 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph.diskstorage.keycolumnvalue.scan; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.ExecutionException; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class EmptyScanJobFutureTest { + private EmptyScanJobFuture future; + + @BeforeEach + public void setUp() { + future = new EmptyScanJobFuture(); + } + + @Test + public void testGetIntermediateResult() { + assertNull(future.getIntermediateResult()); + } + + @Test + public void testGet() throws ExecutionException, InterruptedException { + assertNull(future.get()); + } + + @Test + public void testCancel() { + assertFalse(future.cancel(true)); + } + + @Test + public void testIsCancelled() { + assertFalse(future.isCancelled()); + } + + @Test + public void testIsDone() { + assertTrue(future.isDone()); + } +} diff --git a/janusgraph-test/src/test/java/org/janusgraph/diskstorage/keycolumnvalue/scan/FailedJobFutureTest.java b/janusgraph-test/src/test/java/org/janusgraph/diskstorage/keycolumnvalue/scan/FailedJobFutureTest.java new file mode 100644 index 0000000000..bea66880fa --- /dev/null +++ b/janusgraph-test/src/test/java/org/janusgraph/diskstorage/keycolumnvalue/scan/FailedJobFutureTest.java @@ -0,0 +1,63 @@ +// Copyright 2021 JanusGraph Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package org.janusgraph.diskstorage.keycolumnvalue.scan; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.ExecutionException; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class FailedJobFutureTest { + private FailedJobFuture future; + private Throwable cause; + + @BeforeEach + public void setUp() { + cause = new IllegalArgumentException(); + future = new FailedJobFuture(cause); + } + + @Test + public void testGetIntermediateResult() { + Throwable ex = assertThrows(ExecutionException.class, () -> future.getIntermediateResult()); + assertEquals(cause, ex.getCause()); + } + + @Test + public void testGet() throws ExecutionException, InterruptedException { + Throwable ex = assertThrows(ExecutionException.class, () -> future.getIntermediateResult()); + assertEquals(cause, ex.getCause()); + } + + @Test + public void testCancel() { + assertFalse(future.cancel(true)); + } + + @Test + public void testIsCancelled() { + assertFalse(future.isCancelled()); + } + + @Test + public void testIsDone() { + assertTrue(future.isDone()); + } +} diff --git a/janusgraph-test/src/test/java/org/janusgraph/graphdb/serializer/SerializerSpeedTest.java b/janusgraph-test/src/test/java/org/janusgraph/graphdb/serializer/SerializerSpeedTest.java index 08d5845bff..85e36ace22 100644 --- a/janusgraph-test/src/test/java/org/janusgraph/graphdb/serializer/SerializerSpeedTest.java +++ b/janusgraph-test/src/test/java/org/janusgraph/graphdb/serializer/SerializerSpeedTest.java @@ -21,19 +21,13 @@ import org.janusgraph.graphdb.serializer.attributes.TClass2Serializer; import org.janusgraph.graphdb.serializer.attributes.TEnum; import org.janusgraph.graphdb.serializer.attributes.TEnumSerializer; -import org.janusgraph.testutil.JUnitBenchmarkProvider; -import org.junit.Rule; +import org.junit.jupiter.api.RepeatedTest; import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; -import org.junit.rules.TestRule; @Tag(TestCategory.PERFORMANCE_TESTS) public class SerializerSpeedTest extends SerializerTestCommon { - @Rule - public TestRule benchmark = JUnitBenchmarkProvider.get(); - - @Test + @RepeatedTest(10) public void performanceTestStringSerialization() { int runs = 100000; for (int i = 0; i < runs; i++) { @@ -41,7 +35,7 @@ public void performanceTestStringSerialization() { } } - @Test + @RepeatedTest(10) public void performanceTestObjectSerialization() { serialize.registerClass(2,TClass1.class, new TClass1Serializer()); serialize.registerClass(80342,TClass2.class, new TClass2Serializer()); diff --git a/janusgraph-test/src/test/resources/log4j.properties b/janusgraph-test/src/test/resources/log4j.properties deleted file mode 100644 index 2e409de089..0000000000 --- a/janusgraph-test/src/test/resources/log4j.properties +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2019 JanusGraph Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A1 is set to be a FileAppender. -#log4j.appender.A1=org.apache.log4j.ConsoleAppender -log4j.appender.A1=org.apache.log4j.FileAppender -log4j.appender.A1.File=target/test.log - -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n - -# Set root logger level to the designated level and its only appender to A1. -log4j.rootLogger=DEBUG, A1 - -log4j.logger.org.apache.cassandra=INFO -log4j.logger.org.apache.hadoop=INFO -log4j.logger.org.apache.zookeeper=INFO - -log4j.logger.org.testcontainers=INFO -log4j.logger.com.github.dockerjava=WARN diff --git a/janusgraph-test/src/test/resources/log4j2-test.xml b/janusgraph-test/src/test/resources/log4j2-test.xml new file mode 100644 index 0000000000..fd3dda8c23 --- /dev/null +++ b/janusgraph-test/src/test/resources/log4j2-test.xml @@ -0,0 +1,30 @@ + + + + + + %d{HH:mm:ss} %-5level %class.%method{36} - %msg%n + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 596c80b383..6d56a9558d 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -165,8 +165,8 @@ nav: - Changelog: changelog.md extra: - latest_version: 0.6.1 - snapshot_version: 0.6.2-SNAPSHOT + latest_version: 1.0.0 + snapshot_version: 1.0.0-SNAPSHOT tinkerpop_version: 3.5.1 hadoop2_version: 2.8.5 jamm_version: 0.3.0 diff --git a/pom.xml b/pom.xml index f2a9469ba8..f2b49a30e1 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ 4.0.0 org.janusgraph janusgraph - 0.6.2-SNAPSHOT + 1.0.0-SNAPSHOT pom 3.0.0 @@ -46,49 +46,49 @@ - ossrh - https://oss.sonatype.org/content/repositories/snapshots + github + GitHub Packages + https://maven.pkg.github.com/mapped/janusgraph - ossrh - https://oss.sonatype.org/service/local/staging/deploy/maven2/ + github + GitHub Packages + https://maven.pkg.github.com/mapped/janusgraph 1.0.0,1.1.0-SNAPSHOT 3.5.1 - 1.7.2 - 5.7.2 - 3.8.0 + 1.8.1 + 5.8.1 + 3.12.4 0.3.0 4.1.18 - 1.7.30 + 1.7.35 4.5.13 - 4.4.14 + 4.4.15 2.8.5 - 1.6.0 2.2.7 4.2.0-incubating - 1.21.0 + 1.24.0 1.7.1 1.9.13 - 2.10.0 - 8.9.0 + 2.13.1 + 8.11.1 7.14.0 1.9.4 3.2.2 3.6.2 - 4.1.61.Final - 5.7.0 - 1.12.1 + 4.1.73.Final + 5.10.0 UTF-8 UTF-8 ${project.build.directory}/janusgraph-test false - 1.6 + 3.0.1 -Xms256m -Xmx768m -XX:+HeapDumpOnOutOfMemoryError -ea ${test.extra.jvm.opts} -Xms256m -Xmx256m -ea -XX:+HeapDumpOnOutOfMemoryError ${test.extra.jvm.opts} @@ -96,7 +96,7 @@ false true ${basedir} - 3.2.0 + 3.3.1 1.8 1.8 MEMORY_TESTS,PERFORMANCE_TESTS,BRITTLE_TESTS @@ -104,12 +104,15 @@ 3.11.10 4.13.0 4.4.0 - 1.16.0 - 4.2 - 3.15.6 - 1.38.1 + 1.16.2 + 4.3 + 3.19.2 + 1.42.1 3.15.3 - 1.21 + 1.33 + + 9.4.44.v20210927 + 2.17.1 janusgraph-grpc @@ -128,6 +131,7 @@ janusgraph-lucene janusgraph-all janusgraph-dist + janusgraph-benchmark janusgraph-doc janusgraph-solr janusgraph-examples @@ -151,7 +155,7 @@ org.apache.maven.plugins maven-enforcer-plugin - 3.0.0-M3 + 3.0.0 enforce-dependency-convergence @@ -240,6 +244,7 @@ **/target/** **/.classpath **/.project + **/.factorypath **/.settings/** **/src/main/resources/META-INF/services/** **/src/test/resources/META-INF/services/** @@ -258,6 +263,7 @@ **/*.id **/*.gz .editorconfig + .github/dependabot.yml CC-BY-4.0.txt **/cqlshrc **/regionservers @@ -265,7 +271,6 @@ **/src/test/resources/excludes **/src/test/resources/longTests1 **/src/test/resources/longTests2 - **/src/pkg/static/debian/** **/src/test/resources/mockito-extensions/** **/src/test/resources/org/janusgraph/hadoop/formats/edgelist/rdf/** @@ -344,7 +349,7 @@ maven-compiler-plugin - 3.8.1 + 3.10.0 ${compiler.source} ${compiler.target} @@ -377,13 +382,6 @@ ${test.excluded.groups} ${test.skip.default} - - - - log4j.configuration - file:${project.build.directory}/test-classes/log4j.properties - - @@ -409,7 +407,6 @@ ${test.skip.tp} ${project.build.directory} - file:${project.build.directory}/test-classes/log4j.properties true @@ -419,15 +416,6 @@ maven-failsafe-plugin 2.22.2 - - - - - log4j.configuration - file:${project.build.directory}/test-classes/log4j.properties - - - maven-deploy-plugin @@ -502,7 +490,7 @@ org.jacoco jacoco-maven-plugin - 0.8.6 + 0.8.8 @@ -575,6 +563,10 @@ spark-gremlin ${tinkerpop.version} + + org.slf4j + slf4j-log4j12 + org.eclipse.jetty.orbit javax.servlet @@ -673,7 +665,7 @@ commons-net commons-net - 3.7.2 + 3.8.0 commons-beanutils @@ -703,7 +695,7 @@ org.yaml snakeyaml - 1.28 + 1.29 net.oneandone.reflections8 @@ -723,12 +715,7 @@ org.apache.avro avro - 1.10.1 - - - jboss-logging - org.jboss.logging - 3.4.1.Final + 1.11.0 org.slf4j @@ -736,9 +723,14 @@ ${slf4j.version} - org.slf4j - slf4j-log4j12 - ${slf4j.version} + org.apache.logging.log4j + log4j-core + ${log4j2.version} + + + org.apache.logging.log4j + log4j-slf4j-impl + ${log4j2.version} log4j @@ -748,7 +740,7 @@ ch.qos.logback logback-classic - 1.1.3 + 1.2.7 @@ -870,12 +862,12 @@ --> com.google.guava guava - 29.0-jre + 31.0.1-jre com.google.errorprone error_prone_annotations - 2.7.1 + 2.10.0 commons-codec @@ -887,11 +879,6 @@ commons-cli 1.4 - - org.jboss.netty - netty - 3.2.10.Final - io.netty netty @@ -945,12 +932,12 @@ commons-io commons-io - 2.8.0 + 2.11.0 org.jacoco org.jacoco.ant - 0.8.6 + 0.8.7 io.dropwizard.metrics @@ -973,20 +960,15 @@ ${metrics.version} - com.boundary - high-scale-lib - 1.0.6 + org.jctools + jctools-core + 3.3.0 com.clearspring.analytics stream 2.9.8 - - it.unimi.dsi - fastutil - 8.5.2 - org.apache.cassandra cassandra-all @@ -997,26 +979,10 @@ javassist 3.27.0-GA - - com.carrotsearch - junit-benchmarks - 0.7.2 - com.carrotsearch hppc - 0.8.2 - - - com.carrotsearch.randomizedtesting - randomizedtesting-runner - 2.7.8 - - - junit - junit - - + 0.9.1 org.apache.commons @@ -1138,7 +1104,7 @@ org.xerial.snappy snappy-java - 1.1.7.6 + 1.1.8.4 javax.activation @@ -1148,7 +1114,7 @@ org.slf4j jcl-over-slf4j - 1.7.30 + ${slf4j.version} com.nimbusds @@ -1160,6 +1126,56 @@ commons-text 1.9 + + junit + junit + 4.13.2 + + + org.projectlombok + lombok + 1.18.18 + + + org.osgi + org.osgi.core + 6.0.0 + + + javax.xml.bind + jaxb-api + 2.3.1 + + + net.java.dev.jna + jna + ${jna.version} + + + org.graalvm.nativeimage + svm + 21.0.0.2 + + + javax.servlet + javax.servlet-api + 4.0.1 + + + org.eclipse.jetty + jetty-servlet + ${jetty.version} + + + org.eclipse.jetty + jetty-server + ${jetty.version} + + + org.eclipse.jetty + jetty-util + ${jetty.version} + @@ -1170,14 +1186,14 @@ slf4j-api - org.slf4j - slf4j-log4j12 + org.apache.logging.log4j + log4j-slf4j-impl runtime true - log4j - log4j + org.apache.logging.log4j + log4j-core runtime true @@ -1281,6 +1297,10 @@ hadoop-client ${hadoop2.version} + + org.slf4j + slf4j-log4j12 + javax.servlet servlet-api @@ -1489,7 +1509,7 @@ org.jacoco org.jacoco.ant - 0.8.6 + 0.8.7 diff --git a/requirements.txt b/requirements.txt index eb6af054a6..d590ec55d5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ -mkdocs==1.1.2 +mkdocs==1.2.3 mkdocs-material==6.2.3 -Pygments==2.6.1 +Pygments==2.7.4 pymdown-extensions==7.1 markdown==3.2.2 mkdocs-markdownextradata-plugin==0.1.7