diff --git a/.github/workflows/go-driver.yml b/.github/workflows/go-driver.yml
index f60ae8029..afba9cbe6 100644
--- a/.github/workflows/go-driver.yml
+++ b/.github/workflows/go-driver.yml
@@ -2,10 +2,10 @@ name: Go Driver Tests
on:
push:
- branches: [ "master", "PG11" ]
+ branches: [ "master", "PG12" ]
pull_request:
- branches: [ "master", "PG11" ]
+ branches: [ "master", "PG12" ]
jobs:
build:
@@ -17,7 +17,7 @@ jobs:
defaults:
run:
working-directory: drivers/golang/age/
-
+
steps:
- uses: actions/checkout@v3
@@ -26,17 +26,17 @@ jobs:
if [[ "$GITHUB_EVENT_NAME" == "push" ]]; then
if [[ "$GITHUB_REF" == "refs/heads/master" ]]; then
echo "TAG=latest" >> $GITHUB_ENV
- elif [[ "$GITHUB_REF" == "refs/heads/PG11" ]]; then
- echo "TAG=PG11_latest" >> $GITHUB_ENV
+ elif [[ "$GITHUB_REF" == "refs/heads/PG12" ]]; then
+ echo "TAG=PG12_latest" >> $GITHUB_ENV
fi
elif [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then
if [[ "$GITHUB_BASE_REF" == "master" ]]; then
echo "TAG=latest" >> $GITHUB_ENV
- elif [[ "$GITHUB_BASE_REF" == "PG11" ]]; then
- echo "TAG=PG11_latest" >> $GITHUB_ENV
+ elif [[ "$GITHUB_BASE_REF" == "PG12" ]]; then
+ echo "TAG=PG12_latest" >> $GITHUB_ENV
fi
fi
-
+
- name: Run apache/age docker image
run: |
export TAG=$TAG
@@ -46,7 +46,7 @@ jobs:
uses: actions/setup-go@v3
with:
go-version: ${{ matrix.go-version }}
-
+
- name: Generate
run: go generate ./../...
@@ -54,4 +54,4 @@ jobs:
run: go build -v ./...
- name: Test
- run: go test . -v
\ No newline at end of file
+ run: go test . -v
diff --git a/.github/workflows/installcheck.yaml b/.github/workflows/installcheck.yaml
index b18921f21..c35979cd5 100644
--- a/.github/workflows/installcheck.yaml
+++ b/.github/workflows/installcheck.yaml
@@ -2,32 +2,32 @@ name: Build / Regression
on:
push:
- branches: [ 'master', 'PG11' ]
+ branches: [ 'master', 'PG12' ]
pull_request:
- branches: [ 'master', 'PG11' ]
+ branches: [ 'master', 'PG12' ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- - name: Get latest commit id of PostgreSQL 11
+ - name: Get latest commit id of PostgreSQL 12
run: |
- echo "PG_COMMIT_HASH=$(git ls-remote git://git.postgresql.org/git/postgresql.git refs/heads/REL_11_STABLE | awk '{print $1}')" >> $GITHUB_ENV
+ echo "PG_COMMIT_HASH=$(git ls-remote git://git.postgresql.org/git/postgresql.git refs/heads/REL_12_STABLE | awk '{print $1}')" >> $GITHUB_ENV
- - name: Cache PostgreSQL 11
+ - name: Cache PostgreSQL 12
uses: actions/cache@v3
- id: pg11cache
+ id: pg12cache
with:
- path: ~/pg11
- key: ${{ runner.os }}-v1-pg11-${{ env.PG_COMMIT_HASH }}
+ path: ~/pg12
+ key: ${{ runner.os }}-v1-pg12-${{ env.PG_COMMIT_HASH }}
- - name: Install PostgreSQL 11
- if: steps.pg11cache.outputs.cache-hit != 'true'
+ - name: Install PostgreSQL 12
+ if: steps.pg12cache.outputs.cache-hit != 'true'
run: |
- git clone --depth 1 --branch REL_11_STABLE git://git.postgresql.org/git/postgresql.git ~/pg11source
- cd ~/pg11source
- ./configure --prefix=$HOME/pg11 CFLAGS="-std=gnu99 -ggdb -O0" --enable-cassert
+ git clone --depth 1 --branch REL_12_STABLE git://git.postgresql.org/git/postgresql.git ~/pg12source
+ cd ~/pg12source
+ ./configure --prefix=$HOME/pg12 CFLAGS="-std=gnu99 -ggdb -O0" --enable-cassert
make install -j$(nproc) > /dev/null
- uses: actions/checkout@v3
@@ -35,14 +35,14 @@ jobs:
- name: Build
id: build
run: |
- make PG_CONFIG=$HOME/pg11/bin/pg_config install -j$(nproc)
+ make PG_CONFIG=$HOME/pg12/bin/pg_config install -j$(nproc)
- name: Regression tests
id: regression_tests
run: |
- make PG_CONFIG=$HOME/pg11/bin/pg_config installcheck
+ make PG_CONFIG=$HOME/pg12/bin/pg_config installcheck
continue-on-error: true
-
+
- name: Dump regression test errors
if: steps.regression_tests.outcome != 'success'
run: |
diff --git a/.github/workflows/jdbc-driver.yaml b/.github/workflows/jdbc-driver.yaml
index c4037382d..d76ce6faa 100644
--- a/.github/workflows/jdbc-driver.yaml
+++ b/.github/workflows/jdbc-driver.yaml
@@ -2,22 +2,21 @@ name: JDBC Driver Tests
on:
push:
- branches: [ "master", "PG11" ]
-
+ branches: [ "master", "PG12" ]
+
pull_request:
- branches: [ "master", "PG11" ]
+ branches: [ "master", "PG12" ]
jobs:
build:
runs-on: ubuntu-latest
-
defaults:
run:
working-directory: drivers/jdbc
steps:
- uses: actions/checkout@v3
-
+
- name: Set up Java
uses: actions/setup-java@v3
with:
@@ -29,18 +28,18 @@ jobs:
if [[ "$GITHUB_EVENT_NAME" == "push" ]]; then
if [[ "$GITHUB_REF" == "refs/heads/master" ]]; then
echo "TAG=latest" >> $GITHUB_ENV
- elif [[ "$GITHUB_REF" == "refs/heads/PG11" ]]; then
- echo "TAG=PG11_latest" >> $GITHUB_ENV
+ elif [[ "$GITHUB_REF" == "refs/heads/PG12" ]]; then
+ echo "TAG=PG12_latest" >> $GITHUB_ENV
fi
elif [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then
if [[ "$GITHUB_BASE_REF" == "master" ]]; then
echo "TAG=latest" >> $GITHUB_ENV
- elif [[ "$GITHUB_BASE_REF" == "PG11" ]]; then
- echo "TAG=PG11_latest" >> $GITHUB_ENV
+ elif [[ "$GITHUB_BASE_REF" == "PG12" ]]; then
+ echo "TAG=PG12_latest" >> $GITHUB_ENV
fi
fi
- name: Build and Test
run: |
export TAG=$TAG
- gradle build
\ No newline at end of file
+ gradle build
diff --git a/.github/workflows/nodejs-driver.yaml b/.github/workflows/nodejs-driver.yaml
index 55dccae58..b2a4e0ea9 100644
--- a/.github/workflows/nodejs-driver.yaml
+++ b/.github/workflows/nodejs-driver.yaml
@@ -2,10 +2,10 @@ name: Nodejs Driver Tests
on:
push:
- branches: [ "master", "PG11" ]
+ branches: [ "master", "PG12" ]
pull_request:
- branches: [ "master", "PG11" ]
+ branches: [ "master", "PG12" ]
jobs:
build:
@@ -14,26 +14,26 @@ jobs:
defaults:
run:
working-directory: drivers/nodejs/
-
+
steps:
- uses: actions/checkout@v3
-
+
- name: Set tag based on branch
run: |
if [[ "$GITHUB_EVENT_NAME" == "push" ]]; then
if [[ "$GITHUB_REF" == "refs/heads/master" ]]; then
echo "TAG=latest" >> $GITHUB_ENV
- elif [[ "$GITHUB_REF" == "refs/heads/PG11" ]]; then
- echo "TAG=PG11_latest" >> $GITHUB_ENV
+ elif [[ "$GITHUB_REF" == "refs/heads/PG12" ]]; then
+ echo "TAG=PG12_latest" >> $GITHUB_ENV
fi
elif [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then
if [[ "$GITHUB_BASE_REF" == "master" ]]; then
echo "TAG=latest" >> $GITHUB_ENV
- elif [[ "$GITHUB_BASE_REF" == "PG11" ]]; then
- echo "TAG=PG11_latest" >> $GITHUB_ENV
+ elif [[ "$GITHUB_BASE_REF" == "PG12" ]]; then
+ echo "TAG=PG12_latest" >> $GITHUB_ENV
fi
fi
-
+
- name: Run apache/age docker image
run: |
export TAG=$TAG
@@ -51,4 +51,4 @@ jobs:
run: npm run build
- name: Test
- run: npm test
\ No newline at end of file
+ run: npm test
diff --git a/.github/workflows/python-driver.yaml b/.github/workflows/python-driver.yaml
index 92e1a48a5..df3ed71be 100644
--- a/.github/workflows/python-driver.yaml
+++ b/.github/workflows/python-driver.yaml
@@ -2,10 +2,10 @@ name: Python Driver Tests
on:
push:
- branches: [ "master", "PG11" ]
-
+ branches: [ "master", "PG12" ]
+
pull_request:
- branches: [ "master", "PG11" ]
+ branches: [ "master", "PG12" ]
jobs:
build:
@@ -14,26 +14,26 @@ jobs:
defaults:
run:
working-directory: drivers/python
-
+
steps:
- uses: actions/checkout@v3
-
+
- name: Set tag based on branch
run: |
if [[ "$GITHUB_EVENT_NAME" == "push" ]]; then
if [[ "$GITHUB_REF" == "refs/heads/master" ]]; then
echo "TAG=latest" >> $GITHUB_ENV
- elif [[ "$GITHUB_REF" == "refs/heads/PG11" ]]; then
- echo "TAG=PG11_latest" >> $GITHUB_ENV
+ elif [[ "$GITHUB_REF" == "refs/heads/PG12" ]]; then
+ echo "TAG=PG12_latest" >> $GITHUB_ENV
fi
elif [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then
if [[ "$GITHUB_BASE_REF" == "master" ]]; then
echo "TAG=latest" >> $GITHUB_ENV
- elif [[ "$GITHUB_BASE_REF" == "PG11" ]]; then
- echo "TAG=PG11_latest" >> $GITHUB_ENV
+ elif [[ "$GITHUB_BASE_REF" == "PG12" ]]; then
+ echo "TAG=PG12_latest" >> $GITHUB_ENV
fi
fi
-
+
- name: Run apache/age docker image
run: |
export TAG=$TAG
diff --git a/.gitignore b/.gitignore
index 660286b03..ac4fd622b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,9 +1,8 @@
*.o
*.so
-.gitignore
build.sh
.idea
.deps
.DS_Store
*.tokens
-*.interp
\ No newline at end of file
+*.interp
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 69eab0548..d13839179 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -20,7 +20,7 @@ We strongly recommend you to subscribe the mailing lists, join the Apache AGE Di
## Pull Requests
-Changes to AGE source code are proposed, reviewed, and committed via GitHub pull requests (described in Code Convention). Pull requests are a great way to get your ideas into this repository. Anyone can view and comment on active changes here. Reviewing others' changes are a good way to learn how the change process works and gain exposure to activity in various parts of the code. You can help by reviewing the changes, asking questions, or pointing out issues as simple as typos.
+Changes to AGE source code are proposed, reviewed, and committed via Github pull requests (described in Code Convention). Pull requests are a great way to get your ideas into this repository. Anyone can view and comment on active changes here. Reviewing others' changes are a good way to learn how the change process works and gain exposure to activity in various parts of the code. You can help by reviewing the changes, asking questions, or pointing out issues as simple as typos.
## Documentation Changes
diff --git a/Dockerfile b/Dockerfile
index 11d882b2e..24e1e2d9c 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -16,18 +16,17 @@
# limitations under the License.
#
+FROM postgres:12
-FROM postgres:11-buster
-
-RUN apt-get update
+RUN apt-get update
RUN apt-get install --assume-yes --no-install-recommends --no-install-suggests \
bison \
build-essential \
flex \
- postgresql-server-dev-11
+ postgresql-server-dev-12
-COPY . /age
-RUN cd /age && make install
+COPY . /age
+RUN cd /age && make install
COPY docker-entrypoint-initdb.d/00-create-extension-age.sql /docker-entrypoint-initdb.d/00-create-extension-age.sql
diff --git a/META.json b/META.json
new file mode 100644
index 000000000..96f74022f
--- /dev/null
+++ b/META.json
@@ -0,0 +1,45 @@
+{
+ "name": "ApacheAGE",
+ "abstract": "Apache AGE is a PostgreSQL Extension that provides graph database functionality",
+ "description": "Apache AGE is a PostgreSQL Extension that provides graph database functionality. AGE is an acronym for A Graph Extension, and is inspired by Bitnine's fork of PostgreSQL 10, AgensGraph, which is a multi-model database. The goal of the project is to create single storage that can handle both relational and graph model data so that users can use standard ANSI SQL along with openCypher, the Graph query language. A graph consists of a set of vertices (also called nodes) and edges, where each individual vertex and edge possesses a map of properties. A vertex is the basic object of a graph, that can exist independently of everything else in the graph. An edge creates a directed connection between two vertices. A graph database is simply composed of vertices and edges. This type of database is useful when the meaning is in the relationships between the data. Relational databases can easily handle direct relationships, but indirect relationships are more difficult to deal with in relational databases. A graph database stores relationship information as a first-class entity. Apache AGE gives you the best of both worlds, simultaneously.",
+ "version": "1.3.0",
+ "maintainer": [
+ "users@age.apache.org"
+ ],
+ "license": "apache_2_0",
+ "provides": {
+ "ApacheAGE": {
+ "abstract": "Apache AGE is a PostgreSQL Extension that provides graph database functionality",
+ "file": "age--1.3.0.sql",
+ "docfile": "README.md",
+ "version": "1.3.0"
+ }
+ },
+ "prereqs": {
+ "runtime": {
+ "requires": {
+ "PostgreSQL": "12.0.0"
+ }
+ }
+ },
+ "resources": {
+ "homepage": "https://github.com/apache/age/tree/master",
+ "bugtracker": {
+ "web": "https://github.com/apache/age/issues"
+ },
+ "repository": {
+ "url": "https://github.com/apache/age.git",
+ "web": "https://github.com/apache/age",
+ "type": "git"
+ }
+ },
+ "generated_by": "users@age.apache.org",
+ "meta-spec": {
+ "version": "1.0.0",
+ "url": "http://pgxn.org/meta/spec.txt"
+ },
+ "tags": [
+ "graphdb",
+ "graph-database"
+ ]
+}
diff --git a/Makefile b/Makefile
index e9be9cda8..759078fb2 100644
--- a/Makefile
+++ b/Makefile
@@ -108,15 +108,23 @@ ag_regress_dir = $(srcdir)/regress
REGRESS_OPTS = --load-extension=age --inputdir=$(ag_regress_dir) --outputdir=$(ag_regress_dir) --temp-instance=$(ag_regress_dir)/instance --port=61958 --encoding=UTF-8
ag_regress_out = instance/ log/ results/ regression.*
-EXTRA_CLEAN = $(addprefix $(ag_regress_dir)/, $(ag_regress_out)) src/backend/parser/cypher_gram.c src/include/parser/cypher_gram_def.h
+EXTRA_CLEAN = $(addprefix $(ag_regress_dir)/, $(ag_regress_out)) src/backend/parser/cypher_gram.c src/include/parser/cypher_gram_def.h src/include/parser/cypher_kwlist_d.h
+
+GEN_KEYWORDLIST = $(PERL) -I ./tools/ ./tools/gen_keywordlist.pl
+GEN_KEYWORDLIST_DEPS = ./tools/gen_keywordlist.pl tools/PerfectHash.pm
ag_include_dir = $(srcdir)/src/include
PG_CPPFLAGS = -I$(ag_include_dir) -I$(ag_include_dir)/parser
-PG_CONFIG = pg_config
+PG_CONFIG ?= pg_config
PGXS := $(shell $(PG_CONFIG) --pgxs)
include $(PGXS)
+src/backend/parser/cypher_keywords.o: src/include/parser/cypher_kwlist_d.h
+
+src/include/parser/cypher_kwlist_d.h: src/include/parser/cypher_kwlist.h $(GEN_KEYWORDLIST_DEPS)
+ $(GEN_KEYWORDLIST) --extern --varname CypherKeyword --output src/include/parser $<
+
src/include/parser/cypher_gram_def.h: src/backend/parser/cypher_gram.c
src/backend/parser/cypher_gram.c: BISONFLAGS += --defines=src/include/parser/cypher_gram_def.h
diff --git a/NOTICE b/NOTICE
index 40ce5ef8a..a93fb0d3b 100644
--- a/NOTICE
+++ b/NOTICE
@@ -1,5 +1,5 @@
Apache AGE
-Copyright 2022 The Apache Software Foundation.
+Copyright 2023 The Apache Software Foundation.
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).
diff --git a/README.md b/README.md
index f376c1bdd..de3a898a9 100644
--- a/README.md
+++ b/README.md
@@ -33,9 +33,9 @@
-
-
-
+
+
+
@@ -56,7 +56,7 @@
-
What is Apache AGE?
+
What is Apache AGE?
[Apache AGE](https://age.apache.org/#) is an extension for PostgreSQL that enables users to leverage a graph database on top of the existing relational databases. AGE is an acronym for A Graph Extension and is inspired by Bitnine's AgensGraph, a multi-model database fork of PostgreSQL. The basic principle of the project is to create a single storage that handles both the relational and graph data model so that the users can use the standard ANSI SQL along with openCypher, one of the most popular graph query languages today.
@@ -158,7 +158,7 @@ You can download the Postgres
Clone the github repository or download the download an official release.
-Run the pg_config utility and check the version of PostgreSQL. Currently, only PostgreSQL versions 11, 12 & 13 are supported. If you have any other version of Postgres, you will need to install PostgreSQL version 11, 12 or 13.
+Run the pg_config utility and check the version of PostgreSQL. Currently, only PostgreSQL versions 11, 12, & 13 are supported. If you have any other version of Postgres, you will need to install PostgreSQL version 11, 12, or 13.
```bash
diff --git a/RELEASE b/RELEASE
index 7707ed1b4..cb6f0de25 100644
--- a/RELEASE
+++ b/RELEASE
@@ -15,33 +15,53 @@
# specific language governing permissions and limitations
# under the License.
-Release Notes for Apache AGE release v1.3.0
+Release Notes for Apache AGE release v1.3.0 for PG 12
Apache AGE 1.3.0 - Release Notes
- Add additional comments for create_graph function. (#582)
- Optimize age_exists function. (#586)
- Implement plus-equal operator in SET clause. (#638)
- Implement CI test for python driver. (#587)
- Move from travis CI to github actions for build. (#673)
- Update all driver CIs to GitHub actions.
- Fix build warnings.
- Fix golang driver workflow (#563)
- Updated Readme for drivers folder. (#642)
- Removed async from function definitions. (#680)
- Barbell graph generation (#648) and Barbell regress tests. (#708)
- Updated Python Driver ANTLR 4.9.3 -> 4.11.1 (#706)
- Modify docker url for JDBC driver tests (#716)
- Fix WITH ignoring WHERE clause. (#646)
- Implement isEmpty() predicate function. (#710)
- Fix cypher function input argument checks. (#718)
- Fix Issue 693 - server termination from return exists(path). (#721)
- Update regression tests for cypher_with. (#725)
- Fix issue 733 - create_complete_graph() terminates the server. (#734)
- Prevent MATCH from following OPTIONAL MATCH. (#740)
- Fix property constraints against resolved variables. (#724) (#751) (#701) (#747)
- Include invalid labels in reused variables. (#751) (#762)
- Remove check for scalar agtypes in unwind. (#736)
- Update PG11 CI workflows.
- Update readme and version for python driver. (#780)
- Update README.md
+NOTE: Due to additions to core tables, there is no upgrade path from the
+ previous version.
+
+Implement CALL ...[YIELD] for cypher functions. (#630)
+Graph names with the empty string '' are no more allowed. (#251)
+Fix typos at multiple locations. (#470)
+Fix Bug with CALL... [YIELD], clause ignores WHERE.
+Fix EXPLAIN to allow for nested cypher commands.
+Fix delete_global_graphs and add regression tests. (#336)
+Invalid labels now return NULL.
+Update CONTRIBUTING.md (#348)
+Fix null pointer on name compare. (#376)
+Fix Travis CI warning messages.
+Additional regression tests added for age_global_graph. (#341)
+Readme Added for AGE-JDBC-Driver. (#383)
+Updated volitility categories for many functions.
+Fix issue 339 - entities in WHERE clause have wrong Expr. (#391)
+Create complete graph function. (#342) (#662)
+Fix issue 317: Graph naming convention. (#349)
+Update SET clause to support assigning a map to a variable. (#468)
+Patch to address PR 203 that appears to be inactive. (#671)
+Add additional comments for create_graph function. (#582)
+Optimize age_exists function. (#586)
+Implement plus-equal operator in SET clause. (#638)
+Implement CI test for python driver. (#587)
+Move from travis CI to github actions for build. (#673)
+Update all driver CIs to Github actions.
+Fix build warnings.
+Updated Readme for drivers folder. (#642)
+Removed async from function definitions. (#680)
+Barbell graph generation (#648) and Barbell regress tests. (#708)
+UPDATED Python Driver ANTLR 4.9.3 -> 4.11.1 (#706)
+Fix WITH ignoring WHERE clause. (#646)
+Implement isEmpty() predicate function. (#710)
+Fix cypher function input argument checks. (#718)
+Fix Issue 693 - server termination from return exists(path). (#721)
+Update regression tests for cypher_with. (#725)
+Fix issue 733 - create_complete_graph() terminates the server. (#734)
+Prevent MATCH from following OPTIONAL MATCH. (#740)
+Fix property constraints against resolved variables. (#724) (#751) (#701) (#747)
+Include invalid labels in reused variables. (#751) (#762)
+Fix update_entity_tuple to use correct CommandId. (#769)
+Remove check for scalar agtypes in unwind. (#736)
+Update PG12 CI workflows. (#776)
+Update readme and version for python driver. (#780)
+Update README.md
diff --git a/age--0.6.0--0.7.0.sql b/age--0.6.0--0.7.0.sql
deleted file mode 100644
index cdbaf4406..000000000
--- a/age--0.6.0--0.7.0.sql
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
--- complain if script is sourced in psql, rather than via CREATE EXTENSION
-\echo Use "ALTER EXTENSION age UPDATE TO '0.7.0'" to load this file. \quit
-
-CREATE FUNCTION ag_catalog.create_vlabel(graph_name name, label_name name)
- RETURNS void
- LANGUAGE c
-AS 'MODULE_PATHNAME';
-
-CREATE FUNCTION ag_catalog.create_elabel(graph_name name, label_name name)
- RETURNS void
- LANGUAGE c
-AS 'MODULE_PATHNAME';
-
--- binary I/O functions
-CREATE FUNCTION ag_catalog.graphid_send(graphid)
-RETURNS bytea
-LANGUAGE c
-IMMUTABLE
-RETURNS NULL ON NULL INPUT
-PARALLEL SAFE
-AS 'MODULE_PATHNAME';
-
-CREATE FUNCTION ag_catalog.graphid_recv(internal)
-RETURNS graphid
-LANGUAGE c
-IMMUTABLE
-RETURNS NULL ON NULL INPUT
-PARALLEL SAFE
-AS 'MODULE_PATHNAME';
-
-UPDATE pg_type SET
-typsend = 'ag_catalog.graphid_send',
-typreceive = 'ag_catalog.graphid_recv'
-WHERE typname = 'graphid';
-
--- binary I/O functions
-CREATE FUNCTION ag_catalog.agtype_send(agtype)
-RETURNS bytea
-LANGUAGE c
-IMMUTABLE
-RETURNS NULL ON NULL INPUT
-PARALLEL SAFE
-AS 'MODULE_PATHNAME';
-
-CREATE FUNCTION ag_catalog.agtype_recv(internal)
-RETURNS agtype
-LANGUAGE c
-IMMUTABLE
-RETURNS NULL ON NULL INPUT
-PARALLEL SAFE
-AS 'MODULE_PATHNAME';
-
-UPDATE pg_type SET
-typsend = 'ag_catalog.agtype_send',
-typreceive = 'ag_catalog.agtype_recv'
-WHERE typname = 'agtype';
-
--- agtype -> int4[]
-CREATE FUNCTION ag_catalog.agtype_to_int4_array(variadic "any")
- RETURNS int[]
- LANGUAGE c
- STABLE
-RETURNS NULL ON NULL INPUT
-PARALLEL SAFE
-AS 'MODULE_PATHNAME';
-
-CREATE CAST (agtype AS int[])
- WITH FUNCTION ag_catalog.agtype_to_int4_array(variadic "any");
-
-CREATE FUNCTION ag_catalog.age_eq_tilde(agtype, agtype)
-RETURNS agtype
-LANGUAGE c
-STABLE
-PARALLEL SAFE
-AS 'MODULE_PATHNAME';
-
-CREATE OR REPLACE FUNCTION ag_catalog.age_vle(IN agtype, IN agtype, IN agtype,
- IN agtype, IN agtype, IN agtype,
- IN agtype, OUT edges agtype)
-RETURNS SETOF agtype
-LANGUAGE C
-STABLE
-CALLED ON NULL INPUT
-PARALLEL UNSAFE -- might be safe
-AS 'MODULE_PATHNAME';
-
--- function to build an edge for a VLE match
-CREATE FUNCTION ag_catalog.age_build_vle_match_edge(agtype, agtype)
-RETURNS agtype
-LANGUAGE C
-STABLE
-PARALLEL SAFE
-AS 'MODULE_PATHNAME';
-
--- function to match a terminal vle edge
-CREATE FUNCTION ag_catalog.age_match_vle_terminal_edge(agtype, agtype, agtype)
-RETURNS boolean
-LANGUAGE C
-STABLE
-CALLED ON NULL INPUT
-PARALLEL SAFE
-AS 'MODULE_PATHNAME';
-
--- function to create an AGTV_PATH from a VLE_path_container
-CREATE FUNCTION ag_catalog.age_materialize_vle_path(agtype)
-RETURNS agtype
-LANGUAGE C
-STABLE
-RETURNS NULL ON NULL INPUT
-PARALLEL SAFE
-AS 'MODULE_PATHNAME';
-
--- function to create an AGTV_ARRAY of edges from a VLE_path_container
-CREATE FUNCTION ag_catalog.age_materialize_vle_edges(agtype)
-RETURNS agtype
-LANGUAGE C
-STABLE
-RETURNS NULL ON NULL INPUT
-PARALLEL SAFE
-AS 'MODULE_PATHNAME';
-
-CREATE FUNCTION ag_catalog.age_match_vle_edge_to_id_qual(agtype, agtype, agtype)
-RETURNS boolean
-LANGUAGE C
-STABLE
-RETURNS NULL ON NULL INPUT
-PARALLEL SAFE
-AS 'MODULE_PATHNAME';
-
-CREATE FUNCTION ag_catalog.age_match_two_vle_edges(agtype, agtype)
-RETURNS boolean
-LANGUAGE C
-STABLE
-RETURNS NULL ON NULL INPUT
-PARALLEL SAFE
-AS 'MODULE_PATHNAME';
-
--- list functions
-CREATE FUNCTION ag_catalog.age_keys(agtype)
-RETURNS agtype
-LANGUAGE c
-STABLE
-PARALLEL SAFE
-AS 'MODULE_PATHNAME';
-
-CREATE FUNCTION ag_catalog.age_labels(agtype)
-RETURNS agtype
-LANGUAGE c
-STABLE
-RETURNS NULL ON NULL INPUT
-PARALLEL SAFE
-AS 'MODULE_PATHNAME';
-
-CREATE FUNCTION ag_catalog.age_nodes(agtype)
-RETURNS agtype
-LANGUAGE c
-STABLE
-RETURNS NULL ON NULL INPUT
-PARALLEL SAFE
-AS 'MODULE_PATHNAME';
-
-CREATE FUNCTION ag_catalog.age_relationships(agtype)
-RETURNS agtype
-LANGUAGE c
-STABLE
-PARALLEL SAFE
-AS 'MODULE_PATHNAME';
-
-CREATE FUNCTION ag_catalog.age_range(variadic "any")
-RETURNS agtype
-LANGUAGE c
-STABLE
-PARALLEL SAFE
-AS 'MODULE_PATHNAME';
-
---
--- End
---
diff --git a/age--0.7.0--1.0.0.sql b/age--0.7.0--1.0.0.sql
deleted file mode 100644
index 59d3bff82..000000000
--- a/age--0.7.0--1.0.0.sql
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
--- complain if script is sourced in psql, rather than via CREATE EXTENSION
-\echo Use "ALTER EXTENSION age UPDATE TO '1.0.0'" to load this file. \quit
-
-CREATE FUNCTION ag_catalog.load_labels_from_file(graph_name name,
- label_name name,
- file_path text,
- id_field_exists bool default true)
- RETURNS void
- LANGUAGE c
- AS 'MODULE_PATHNAME';
-
-CREATE FUNCTION ag_catalog.load_edges_from_file(graph_name name,
- label_name name,
- file_path text)
- RETURNS void
- LANGUAGE c
- AS 'MODULE_PATHNAME';
-
-CREATE FUNCTION ag_catalog._cypher_merge_clause(internal)
-RETURNS void
-LANGUAGE c
-AS 'MODULE_PATHNAME';
-
-CREATE FUNCTION ag_catalog.age_unnest(agtype, block_types boolean = false)
- RETURNS SETOF agtype
- LANGUAGE c
- STABLE
-PARALLEL SAFE
-AS 'MODULE_PATHNAME';
-
---
--- End
---
diff --git a/age--0.5.0--0.6.0.sql b/age--1.1.0--1.1.1.sql
similarity index 58%
rename from age--0.5.0--0.6.0.sql
rename to age--1.1.0--1.1.1.sql
index dbe620f13..e4ec742dd 100644
--- a/age--0.5.0--0.6.0.sql
+++ b/age--1.1.0--1.1.1.sql
@@ -17,18 +17,21 @@
* under the License.
*/
--- complain if script is sourced in psql, rather than via CREATE EXTENSION
-\echo Use "ALTER EXTENSION age UPDATE TO '0.6.0'" to load this file. \quit
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION age UPDATE TO '1.1.1'" to load this file. \quit
-CREATE OR REPLACE FUNCTION ag_catalog.age_vle(IN agtype, IN agtype, IN agtype,
- IN agtype, IN agtype, IN agtype,
- IN agtype, OUT edges agtype)
-RETURNS SETOF agtype
-LANGUAGE C
-IMMUTABLE
-STRICT
+-- add in new age_prepare_cypher function
+CREATE FUNCTION ag_catalog.age_prepare_cypher(cstring, cstring)
+RETURNS boolean
+LANGUAGE c
+STABLE
+PARALLEL SAFE
AS 'MODULE_PATHNAME';
---
--- End
---
+-- modify the param defaults for cypher function
+CREATE OR REPLACE FUNCTION ag_catalog.cypher(graph_name name = NULL,
+ query_string cstring = NULL,
+ params agtype = NULL)
+RETURNS SETOF record
+LANGUAGE c
+AS 'MODULE_PATHNAME';
diff --git a/age--1.3.0.sql b/age--1.3.0.sql
index 6d8730f9b..b364ad3e7 100644
--- a/age--1.3.0.sql
+++ b/age--1.3.0.sql
@@ -25,15 +25,16 @@
--
CREATE TABLE ag_graph (
+ graphid oid NOT NULL,
name name NOT NULL,
namespace regnamespace NOT NULL
-) WITH (OIDS);
+);
+
+CREATE UNIQUE INDEX ag_graph_graphid_index ON ag_graph USING btree (graphid);
-- include content of the ag_graph table into the pg_dump output
SELECT pg_catalog.pg_extension_config_dump('ag_graph', '');
-CREATE UNIQUE INDEX ag_graph_oid_index ON ag_graph USING btree (oid);
-
CREATE UNIQUE INDEX ag_graph_name_index ON ag_graph USING btree (name);
CREATE UNIQUE INDEX ag_graph_namespace_index
@@ -51,19 +52,20 @@ CREATE TABLE ag_label (
id label_id,
kind label_kind,
relation regclass NOT NULL,
- seq_name name NOT NULL
-) WITH (OIDS);
+ seq_name name NOT NULL,
+ CONSTRAINT fk_graph_oid
+ FOREIGN KEY(graph)
+ REFERENCES ag_graph(graphid)
+);
-- include content of the ag_label table into the pg_dump output
SELECT pg_catalog.pg_extension_config_dump('ag_label', '');
-CREATE UNIQUE INDEX ag_label_oid_index ON ag_label USING btree (oid);
-
CREATE UNIQUE INDEX ag_label_name_graph_index
ON ag_label
USING btree (name, graph);
-CREATE UNIQUE INDEX ag_label_graph_id_index
+CREATE UNIQUE INDEX ag_label_graph_oid_index
ON ag_label
USING btree (graph, id);
diff --git a/drivers/docker-compose.yml b/drivers/docker-compose.yml
index 1716d227b..45ab86a39 100644
--- a/drivers/docker-compose.yml
+++ b/drivers/docker-compose.yml
@@ -7,4 +7,4 @@ services:
- POSTGRES_PASSWORD=agens
- POSTGRES_DB=postgres
ports:
- - 5432:5432
\ No newline at end of file
+ - 5432:5432
diff --git a/drivers/jdbc/lib/build.gradle.kts b/drivers/jdbc/lib/build.gradle.kts
index 15662e7ce..2ba529ec1 100644
--- a/drivers/jdbc/lib/build.gradle.kts
+++ b/drivers/jdbc/lib/build.gradle.kts
@@ -68,4 +68,4 @@ tasks.test {
showCauses = true
showStackTraces = true
}
-}
\ No newline at end of file
+}
diff --git a/drivers/jdbc/lib/src/test/java/org/apache/age/jdbc/AgtypeStatementTest.java b/drivers/jdbc/lib/src/test/java/org/apache/age/jdbc/AgtypeStatementTest.java
index 74be24608..97e424a70 100644
--- a/drivers/jdbc/lib/src/test/java/org/apache/age/jdbc/AgtypeStatementTest.java
+++ b/drivers/jdbc/lib/src/test/java/org/apache/age/jdbc/AgtypeStatementTest.java
@@ -202,4 +202,4 @@ private void runPreparedStatementString(PgConnection conn) throws SQLException,
assertEquals(1, returnedAgtype.getInt());
}
-}
\ No newline at end of file
+}
diff --git a/drivers/python/age/age.py b/drivers/python/age/age.py
index 98b99bb71..40ad0c21e 100644
--- a/drivers/python/age/age.py
+++ b/drivers/python/age/age.py
@@ -58,7 +58,7 @@ def deleteGraph(conn:ext.connection, graphName:str):
with conn.cursor() as cursor:
cursor.execute(sql.SQL("SELECT drop_graph({graphName}, true);").format(graphName=sql.Literal(graphName)))
conn.commit()
-
+
def buildCypher(graphName:str, cypherStmt:str, columns:list) ->str:
if graphName == None:
@@ -91,7 +91,7 @@ def execSql(conn:ext.connection, stmt:str, commit:bool=False, params:tuple=None)
cursor.execute(stmt, params)
if commit:
conn.commit()
-
+
return cursor
except SyntaxError as cause:
conn.rollback()
@@ -105,8 +105,8 @@ def querySql(conn:ext.connection, stmt:str, params:tuple=None) -> ext.cursor :
return execSql(conn, stmt, False, params)
# Execute cypher statement and return cursor.
-# If cypher statement changes data (create, set, remove),
-# You must commit session(ag.commit())
+# If cypher statement changes data (create, set, remove),
+# You must commit session(ag.commit())
# (Otherwise the execution cannot make any effect.)
def execCypher(conn:ext.connection, graphName:str, cypherStmt:str, cols:list=None, params:tuple=None) -> ext.cursor :
if conn == None or conn.closed:
@@ -120,7 +120,7 @@ def execCypher(conn:ext.connection, graphName:str, cypherStmt:str, cols:list=Non
cypher = cypher[2:len(cypher)-1]
preparedStmt = "SELECT * FROM age_prepare_cypher({graphName},{cypherStmt})"
-
+
cursor = conn.cursor()
try:
cursor.execute(sql.SQL(preparedStmt).format(graphName=sql.Literal(graphName),cypherStmt=sql.Literal(cypher)))
@@ -190,10 +190,10 @@ def setGraph(self, graph:str):
def commit(self):
self.connection.commit()
-
+
def rollback(self):
self.connection.rollback()
-
+
def execCypher(self, cypherStmt:str, cols:list=None, params:tuple=None) -> ext.cursor :
return execCypher(self.connection, self.graphName, cypherStmt, cols=cols, params=params)
@@ -202,8 +202,8 @@ def cypher(self, cursor:ext.cursor, cypherStmt:str, cols:list=None, params:tuple
# def execSql(self, stmt:str, commit:bool=False, params:tuple=None) -> ext.cursor :
# return execSql(self.connection, stmt, commit, params)
-
-
+
+
# def execCypher(self, cypherStmt:str, commit:bool=False, params:tuple=None) -> ext.cursor :
# return execCypher(self.connection, self.graphName, cypherStmt, commit, params)
@@ -211,7 +211,4 @@ def cypher(self, cursor:ext.cursor, cypherStmt:str, cols:list=None, params:tuple
# return execCypherWithReturn(self.connection, self.graphName, cypherStmt, columns, params)
# def queryCypher(self, cypherStmt:str, columns:list=None , params:tuple=None) -> ext.cursor :
- # return queryCypher(self.connection, self.graphName, cypherStmt, columns, params)
-
-
-
+ # return queryCypher(self.connection, self.graphName, cypherStmt, columns, params)
diff --git a/drivers/python/age/exceptions.py b/drivers/python/age/exceptions.py
index cee9cc3be..8c46e8e99 100644
--- a/drivers/python/age/exceptions.py
+++ b/drivers/python/age/exceptions.py
@@ -1,70 +1,67 @@
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-from psycopg2.errors import *
-
-class AgeNotSet(Exception):
- def __init__(self, name):
- self.name = name
-
- def __repr__(self) :
- return 'AGE extension is not set.'
-
-class GraphNotFound(Exception):
- def __init__(self, name):
- self.name = name
-
- def __repr__(self) :
- return 'Graph[' + self.name + '] does not exist.'
-
-
-class GraphAlreadyExists(Exception):
- def __init__(self, name):
- self.name = name
-
- def __repr__(self) :
- return 'Graph[' + self.name + '] already exists.'
-
-
-class GraphNotSet(Exception):
- def __repr__(self) :
- return 'Graph name is not set.'
-
-
-class NoConnection(Exception):
- def __repr__(self) :
- return 'No Connection'
-
-class NoCursor(Exception):
- def __repr__(self) :
- return 'No Cursor'
-
-class SqlExecutionError(Exception):
- def __init__(self, msg, cause):
- self.msg = msg
- self.cause = cause
- super().__init__(msg, cause)
-
- def __repr__(self) :
- return 'SqlExecution [' + self.msg + ']'
-
-class AGTypeError(Exception):
- def __init__(self, msg, cause):
- self.msg = msg
- self.cause = cause
- super().__init__(msg, cause)
-
-
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+from psycopg2.errors import *
+
+class AgeNotSet(Exception):
+ def __init__(self, name):
+ self.name = name
+
+ def __repr__(self) :
+ return 'AGE extension is not set.'
+
+class GraphNotFound(Exception):
+ def __init__(self, name):
+ self.name = name
+
+ def __repr__(self) :
+ return 'Graph[' + self.name + '] does not exist.'
+
+
+class GraphAlreadyExists(Exception):
+ def __init__(self, name):
+ self.name = name
+
+ def __repr__(self) :
+ return 'Graph[' + self.name + '] already exists.'
+
+
+class GraphNotSet(Exception):
+ def __repr__(self) :
+ return 'Graph name is not set.'
+
+
+class NoConnection(Exception):
+ def __repr__(self) :
+ return 'No Connection'
+
+class NoCursor(Exception):
+ def __repr__(self) :
+ return 'No Cursor'
+
+class SqlExecutionError(Exception):
+ def __init__(self, msg, cause):
+ self.msg = msg
+ self.cause = cause
+ super().__init__(msg, cause)
+
+ def __repr__(self) :
+ return 'SqlExecution [' + self.msg + ']'
+
+class AGTypeError(Exception):
+ def __init__(self, msg, cause):
+ self.msg = msg
+ self.cause = cause
+ super().__init__(msg, cause)
diff --git a/drivers/python/test_age_py.py b/drivers/python/test_age_py.py
index 1be2f04b7..99c8d0857 100644
--- a/drivers/python/test_age_py.py
+++ b/drivers/python/test_age_py.py
@@ -365,4 +365,4 @@ def testCollect(self):
suite.addTest(TestAgeBasic('testMultipleEdges'))
suite.addTest(TestAgeBasic('testCollect'))
TestAgeBasic.args = args
- unittest.TextTestRunner().run(suite)
\ No newline at end of file
+ unittest.TextTestRunner().run(suite)
diff --git a/regress/expected/agtype.out b/regress/expected/agtype.out
index 49d227846..65cca0e4c 100644
--- a/regress/expected/agtype.out
+++ b/regress/expected/agtype.out
@@ -23,6 +23,7 @@
-- Load extension and set path
--
LOAD 'age';
+SET extra_float_digits = 0;
SET search_path TO ag_catalog;
--
-- Create a table using the AGTYPE type
@@ -2260,7 +2261,7 @@ SELECT agtype_to_int4(agtype_in('1.444::numeric'));
-- These should all fail
SELECT agtype_to_int4(agtype_in('"string"'));
-ERROR: invalid input syntax for integer: "string"
+ERROR: invalid input syntax for type integer: "string"
SELECT agtype_to_int4(agtype_in('[1, 2, 3]'));
ERROR: cannot cast agtype array to type int
SELECT agtype_to_int4(agtype_in('{"int":1}'));
diff --git a/regress/expected/catalog.out b/regress/expected/catalog.out
index 240ff17b4..bc7b9434a 100644
--- a/regress/expected/catalog.out
+++ b/regress/expected/catalog.out
@@ -28,7 +28,7 @@ NOTICE: graph "graph" has been created
(1 row)
-SELECT * FROM ag_graph WHERE name = 'graph';
+SELECT name, namespace FROM ag_graph WHERE name = 'graph';
name | namespace
-------+-----------
graph | graph
@@ -121,7 +121,7 @@ NOTICE: graph "GraphB" has been created
(1 row)
-- Show GraphA's construction to verify case is preserved.
-SELECT * FROM ag_graph WHERE name = 'GraphA';
+SELECT name, namespace FROM ag_graph WHERE name = 'GraphA';
name | namespace
--------+-----------
GraphA | "GraphA"
@@ -142,7 +142,7 @@ NOTICE: graph "GraphA" renamed to "GraphX"
(1 row)
-- Show GraphX's construction to verify case is preserved.
-SELECT * FROM ag_graph WHERE name = 'GraphX';
+SELECT name, namespace FROM ag_graph WHERE name = 'GraphX';
name | namespace
--------+-----------
GraphX | "GraphX"
@@ -155,14 +155,14 @@ SELECT nspname FROM pg_namespace WHERE nspname = 'GraphX';
(1 row)
-- Verify there isn't a graph GraphA anymore.
-SELECT * FROM ag_graph WHERE name = 'GraphA';
+SELECT name, namespace FROM ag_graph WHERE name = 'GraphA';
name | namespace
------+-----------
(0 rows)
SELECT * FROM pg_namespace WHERE nspname = 'GraphA';
- nspname | nspowner | nspacl
----------+----------+--------
+ oid | nspname | nspowner | nspacl
+-----+---------+----------+--------
(0 rows)
-- Sanity check that graphx does not exist - should return 0.
diff --git a/regress/expected/cypher_call.out b/regress/expected/cypher_call.out
index f7e45a050..8c5c5fca7 100644
--- a/regress/expected/cypher_call.out
+++ b/regress/expected/cypher_call.out
@@ -41,8 +41,8 @@ CREATE FUNCTION call_stmt_test.add_agtype(agtype, agtype) RETURNS agtype
LANGUAGE SQL
IMMUTABLE
RETURNS NULL ON NULL INPUT;
-/*
- * CALL (solo)
+/*
+ * CALL (solo)
*/
SELECT * FROM cypher('cypher_call', $$CALL sqrt(64)$$) as (sqrt agtype);
sqrt
diff --git a/regress/expected/cypher_set.out b/regress/expected/cypher_set.out
index dd575d6a3..0310ae015 100644
--- a/regress/expected/cypher_set.out
+++ b/regress/expected/cypher_set.out
@@ -102,11 +102,11 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) SET n.i = rand() RETURN n.i < 1 A
(3 rows)
SELECT * FROM cypher('cypher_set', $$MATCH (n) SET n.i = pi() RETURN n$$) AS (a agtype);
- a
-------------------------------------------------------------------------------------------------------
- {"id": 844424930131969, "label": "v", "properties": {"i": 3.14159265358979}}::vertex
- {"id": 844424930131971, "label": "v", "properties": {"i": 3.14159265358979}}::vertex
- {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3.14159265358979, "j": 5}}::vertex
+ a
+-------------------------------------------------------------------------------------------------------
+ {"id": 844424930131969, "label": "v", "properties": {"i": 3.141592653589793}}::vertex
+ {"id": 844424930131971, "label": "v", "properties": {"i": 3.141592653589793}}::vertex
+ {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3.141592653589793, "j": 5}}::vertex
(3 rows)
--Handle Inheritance
@@ -182,6 +182,29 @@ $$) AS (a agtype);
{"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 50, "j": 5, "y": 50, "z": 99}}::vertex
(1 row)
+SELECT * FROM cypher('cypher_set', $$
+ MATCH (n {j: 5})
+ SET n.y = 53
+ SET n.y = 50
+ SET n.z = 99
+ SET n.arr = [n.y, n.z]
+ RETURN n
+$$) AS (a agtype);
+ a
+---------------------------------------------------------------------------------------------------------------------------
+ {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 50, "j": 5, "y": 50, "z": 99, "arr": [50, 99]}}::vertex
+(1 row)
+
+SELECT * FROM cypher('cypher_set', $$
+ MATCH (n {j: 5})
+ REMOVE n.arr
+ RETURN n
+$$) AS (a agtype);
+ a
+----------------------------------------------------------------------------------------------------------
+ {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 50, "j": 5, "y": 50, "z": 99}}::vertex
+(1 row)
+
SELECT * FROM cypher('cypher_set', $$
MATCH (n {j: 5})
RETURN n
@@ -238,6 +261,46 @@ $$) AS (a agtype);
{"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 50, "j": 5, "y": 2, "z": 99}}::vertex
(2 rows)
+-- Test that SET works with nodes(path) and relationships(path)
+SELECT * FROM cypher('cypher_set', $$
+ MATCH p=(n)-[e:e {j:34}]->()
+ WITH nodes(p) AS ns
+ WITH ns[0] AS n
+ SET n.k = 99
+ SET n.k = 999
+ RETURN n
+$$) AS (a agtype);
+ a
+-------------------------------------------------------------------------------------------------------------------
+ {"id": 281474976710658, "label": "", "properties": {"k": 999, "y": 1}}::vertex
+ {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 50, "j": 5, "k": 999, "y": 2, "z": 99}}::vertex
+(2 rows)
+
+SELECT * FROM cypher('cypher_set', $$
+ MATCH p=(n)-[e:e {j:34}]->()
+ WITH relationships(p) AS rs
+ WITH rs[0] AS r
+ SET r.l = 99
+ SET r.l = 999
+ RETURN r
+$$) AS (a agtype);
+ a
+--------------------------------------------------------------------------------------------------------------------------------------------------
+ {"id": 1125899906842630, "label": "e", "end_id": 281474976710659, "start_id": 281474976710658, "properties": {"j": 34, "l": 999, "y": 99}}::edge
+ {"id": 1125899906842629, "label": "e", "end_id": 844424930131970, "start_id": 844424930131970, "properties": {"j": 34, "l": 999}}::edge
+(2 rows)
+
+SELECT * FROM cypher('cypher_set', $$
+ MATCH p=(n)-[e:e {j:34}]->()
+ REMOVE n.k, e.l
+ RETURN p
+$$) AS (a agtype);
+ a
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ [{"id": 281474976710658, "label": "", "properties": {"y": 1}}::vertex, {"id": 1125899906842630, "label": "e", "end_id": 281474976710659, "start_id": 281474976710658, "properties": {"j": 34, "y": 99}}::edge, {"id": 281474976710659, "label": "", "properties": {"y": 2}}::vertex]::path
+ [{"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 50, "j": 5, "y": 2, "z": 99}}::vertex, {"id": 1125899906842629, "label": "e", "end_id": 844424930131970, "start_id": 844424930131970, "properties": {"j": 34}}::edge, {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 50, "j": 5, "y": 2, "z": 99}}::vertex]::path
+(2 rows)
+
SELECT * FROM cypher('cypher_set', $$MATCH (n)-[]->(n) SET n.y = 99 RETURN n$$) AS (a agtype);
a
----------------------------------------------------------------------------------------------------------
@@ -263,9 +326,9 @@ EXECUTE p_1;
{"id": 281474976710659, "label": "", "properties": {"i": 3, "y": 2}}::vertex
{"id": 281474976710658, "label": "", "properties": {"i": 3, "t": 150, "y": 1}}::vertex
{"id": 281474976710657, "label": "", "properties": {"i": 3, "t": 150}}::vertex
- {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 844424930131969, "label": "v", "properties": {"i": 3, "t": 150}}::vertex
{"id": 844424930131971, "label": "v", "properties": {"i": 3, "t": 150}}::vertex
+ {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 1407374883553281, "label": "other_v", "properties": {"i": 3, "k": 10}}::vertex
{"id": 1407374883553282, "label": "other_v", "properties": {"i": 3, "k": 10}}::vertex
{"id": 1407374883553283, "label": "other_v", "properties": {"i": 3, "k": 10}}::vertex
@@ -278,9 +341,9 @@ EXECUTE p_1;
{"id": 281474976710659, "label": "", "properties": {"i": 3, "y": 2}}::vertex
{"id": 281474976710658, "label": "", "properties": {"i": 3, "t": 150, "y": 1}}::vertex
{"id": 281474976710657, "label": "", "properties": {"i": 3, "t": 150}}::vertex
- {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 844424930131969, "label": "v", "properties": {"i": 3, "t": 150}}::vertex
{"id": 844424930131971, "label": "v", "properties": {"i": 3, "t": 150}}::vertex
+ {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 1407374883553281, "label": "other_v", "properties": {"i": 3, "k": 10}}::vertex
{"id": 1407374883553282, "label": "other_v", "properties": {"i": 3, "k": 10}}::vertex
{"id": 1407374883553283, "label": "other_v", "properties": {"i": 3, "k": 10}}::vertex
@@ -294,9 +357,9 @@ EXECUTE p_2('{"var_name": 4}');
{"id": 281474976710659, "label": "", "properties": {"i": 4, "y": 2}}::vertex
{"id": 281474976710658, "label": "", "properties": {"i": 4, "t": 150, "y": 1}}::vertex
{"id": 281474976710657, "label": "", "properties": {"i": 4, "t": 150}}::vertex
- {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 4, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 844424930131969, "label": "v", "properties": {"i": 4, "t": 150}}::vertex
{"id": 844424930131971, "label": "v", "properties": {"i": 4, "t": 150}}::vertex
+ {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 4, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 1407374883553281, "label": "other_v", "properties": {"i": 4, "k": 10}}::vertex
{"id": 1407374883553282, "label": "other_v", "properties": {"i": 4, "k": 10}}::vertex
{"id": 1407374883553283, "label": "other_v", "properties": {"i": 4, "k": 10}}::vertex
@@ -309,9 +372,9 @@ EXECUTE p_2('{"var_name": 6}');
{"id": 281474976710659, "label": "", "properties": {"i": 6, "y": 2}}::vertex
{"id": 281474976710658, "label": "", "properties": {"i": 6, "t": 150, "y": 1}}::vertex
{"id": 281474976710657, "label": "", "properties": {"i": 6, "t": 150}}::vertex
- {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 6, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 844424930131969, "label": "v", "properties": {"i": 6, "t": 150}}::vertex
{"id": 844424930131971, "label": "v", "properties": {"i": 6, "t": 150}}::vertex
+ {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 6, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 1407374883553281, "label": "other_v", "properties": {"i": 6, "k": 10}}::vertex
{"id": 1407374883553282, "label": "other_v", "properties": {"i": 6, "k": 10}}::vertex
{"id": 1407374883553283, "label": "other_v", "properties": {"i": 6, "k": 10}}::vertex
@@ -333,9 +396,9 @@ SELECT set_test();
{"id": 281474976710659, "label": "", "properties": {"i": 7, "y": 2}}::vertex
{"id": 281474976710658, "label": "", "properties": {"i": 7, "t": 150, "y": 1}}::vertex
{"id": 281474976710657, "label": "", "properties": {"i": 7, "t": 150}}::vertex
- {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 7, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 844424930131969, "label": "v", "properties": {"i": 7, "t": 150}}::vertex
{"id": 844424930131971, "label": "v", "properties": {"i": 7, "t": 150}}::vertex
+ {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 7, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 1407374883553281, "label": "other_v", "properties": {"i": 7, "k": 10}}::vertex
{"id": 1407374883553282, "label": "other_v", "properties": {"i": 7, "k": 10}}::vertex
{"id": 1407374883553283, "label": "other_v", "properties": {"i": 7, "k": 10}}::vertex
@@ -348,9 +411,9 @@ SELECT set_test();
{"id": 281474976710659, "label": "", "properties": {"i": 7, "y": 2}}::vertex
{"id": 281474976710658, "label": "", "properties": {"i": 7, "t": 150, "y": 1}}::vertex
{"id": 281474976710657, "label": "", "properties": {"i": 7, "t": 150}}::vertex
- {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 7, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 844424930131969, "label": "v", "properties": {"i": 7, "t": 150}}::vertex
{"id": 844424930131971, "label": "v", "properties": {"i": 7, "t": 150}}::vertex
+ {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 7, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 1407374883553281, "label": "other_v", "properties": {"i": 7, "k": 10}}::vertex
{"id": 1407374883553282, "label": "other_v", "properties": {"i": 7, "k": 10}}::vertex
{"id": 1407374883553283, "label": "other_v", "properties": {"i": 7, "k": 10}}::vertex
@@ -366,9 +429,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) SET n.i = 3, n.j = 5 RETURN n $$)
{"id": 281474976710659, "label": "", "properties": {"i": 3, "j": 5, "y": 2}}::vertex
{"id": 281474976710658, "label": "", "properties": {"i": 3, "j": 5, "t": 150, "y": 1}}::vertex
{"id": 281474976710657, "label": "", "properties": {"i": 3, "j": 5, "t": 150}}::vertex
- {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 844424930131969, "label": "v", "properties": {"i": 3, "j": 5, "t": 150}}::vertex
{"id": 844424930131971, "label": "v", "properties": {"i": 3, "j": 5, "t": 150}}::vertex
+ {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 1407374883553281, "label": "other_v", "properties": {"i": 3, "j": 5, "k": 10}}::vertex
{"id": 1407374883553282, "label": "other_v", "properties": {"i": 3, "j": 5, "k": 10}}::vertex
{"id": 1407374883553283, "label": "other_v", "properties": {"i": 3, "j": 5, "k": 10}}::vertex
@@ -517,9 +580,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) SET n.i = [3, 'test', [1, 2, 3],
{"id": 281474976710659, "label": "", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "y": 2}}::vertex
{"id": 281474976710658, "label": "", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150, "y": 1}}::vertex
{"id": 281474976710657, "label": "", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150}}::vertex
- {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 844424930131969, "label": "v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150}}::vertex
{"id": 844424930131971, "label": "v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150}}::vertex
+ {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 1407374883553281, "label": "other_v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "k": 10}}::vertex
{"id": 1407374883553282, "label": "other_v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "k": 10}}::vertex
{"id": 1407374883553283, "label": "other_v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "k": 10}}::vertex
@@ -535,9 +598,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) RETURN n$$) AS (a agtype);
{"id": 281474976710659, "label": "", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "y": 2}}::vertex
{"id": 281474976710658, "label": "", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150, "y": 1}}::vertex
{"id": 281474976710657, "label": "", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150}}::vertex
- {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 844424930131969, "label": "v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150}}::vertex
{"id": 844424930131971, "label": "v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150}}::vertex
+ {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 1407374883553281, "label": "other_v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "k": 10}}::vertex
{"id": 1407374883553282, "label": "other_v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "k": 10}}::vertex
{"id": 1407374883553283, "label": "other_v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "k": 10}}::vertex
@@ -567,9 +630,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) SET n.i = [] RETURN n$$) AS (a ag
{"id": 281474976710659, "label": "", "properties": {"i": [], "j": 5, "y": 2}}::vertex
{"id": 281474976710658, "label": "", "properties": {"i": [], "j": 5, "t": 150, "y": 1}}::vertex
{"id": 281474976710657, "label": "", "properties": {"i": [], "j": 5, "t": 150}}::vertex
- {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 844424930131969, "label": "v", "properties": {"i": [], "j": 5, "t": 150}}::vertex
{"id": 844424930131971, "label": "v", "properties": {"i": [], "j": 5, "t": 150}}::vertex
+ {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 1407374883553281, "label": "other_v", "properties": {"i": [], "j": 5, "k": 10}}::vertex
{"id": 1407374883553282, "label": "other_v", "properties": {"i": [], "j": 5, "k": 10}}::vertex
{"id": 1407374883553283, "label": "other_v", "properties": {"i": [], "j": 5, "k": 10}}::vertex
@@ -585,9 +648,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) RETURN n$$) AS (a agtype);
{"id": 281474976710659, "label": "", "properties": {"i": [], "j": 5, "y": 2}}::vertex
{"id": 281474976710658, "label": "", "properties": {"i": [], "j": 5, "t": 150, "y": 1}}::vertex
{"id": 281474976710657, "label": "", "properties": {"i": [], "j": 5, "t": 150}}::vertex
- {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 844424930131969, "label": "v", "properties": {"i": [], "j": 5, "t": 150}}::vertex
{"id": 844424930131971, "label": "v", "properties": {"i": [], "j": 5, "t": 150}}::vertex
+ {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 1407374883553281, "label": "other_v", "properties": {"i": [], "j": 5, "k": 10}}::vertex
{"id": 1407374883553282, "label": "other_v", "properties": {"i": [], "j": 5, "k": 10}}::vertex
{"id": 1407374883553283, "label": "other_v", "properties": {"i": [], "j": 5, "k": 10}}::vertex
@@ -604,9 +667,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) SET n.i = {prop1: 3, prop2:'test'
{"id": 281474976710659, "label": "", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "y": 2}}::vertex
{"id": 281474976710658, "label": "", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150, "y": 1}}::vertex
{"id": 281474976710657, "label": "", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150}}::vertex
- {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 844424930131969, "label": "v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150}}::vertex
{"id": 844424930131971, "label": "v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150}}::vertex
+ {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 1407374883553281, "label": "other_v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "k": 10}}::vertex
{"id": 1407374883553282, "label": "other_v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "k": 10}}::vertex
{"id": 1407374883553283, "label": "other_v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "k": 10}}::vertex
@@ -622,9 +685,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) RETURN n$$) AS (a agtype);
{"id": 281474976710659, "label": "", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "y": 2}}::vertex
{"id": 281474976710658, "label": "", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150, "y": 1}}::vertex
{"id": 281474976710657, "label": "", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150}}::vertex
- {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 844424930131969, "label": "v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150}}::vertex
{"id": 844424930131971, "label": "v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150}}::vertex
+ {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 1407374883553281, "label": "other_v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "k": 10}}::vertex
{"id": 1407374883553282, "label": "other_v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "k": 10}}::vertex
{"id": 1407374883553283, "label": "other_v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "k": 10}}::vertex
@@ -654,9 +717,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) SET n.i = {} RETURN n$$) AS (a ag
{"id": 281474976710659, "label": "", "properties": {"i": {}, "j": 5, "y": 2}}::vertex
{"id": 281474976710658, "label": "", "properties": {"i": {}, "j": 5, "t": 150, "y": 1}}::vertex
{"id": 281474976710657, "label": "", "properties": {"i": {}, "j": 5, "t": 150}}::vertex
- {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 844424930131969, "label": "v", "properties": {"i": {}, "j": 5, "t": 150}}::vertex
{"id": 844424930131971, "label": "v", "properties": {"i": {}, "j": 5, "t": 150}}::vertex
+ {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 1407374883553281, "label": "other_v", "properties": {"i": {}, "j": 5, "k": 10}}::vertex
{"id": 1407374883553282, "label": "other_v", "properties": {"i": {}, "j": 5, "k": 10}}::vertex
{"id": 1407374883553283, "label": "other_v", "properties": {"i": {}, "j": 5, "k": 10}}::vertex
@@ -672,9 +735,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) RETURN n$$) AS (a agtype);
{"id": 281474976710659, "label": "", "properties": {"i": {}, "j": 5, "y": 2}}::vertex
{"id": 281474976710658, "label": "", "properties": {"i": {}, "j": 5, "t": 150, "y": 1}}::vertex
{"id": 281474976710657, "label": "", "properties": {"i": {}, "j": 5, "t": 150}}::vertex
- {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 844424930131969, "label": "v", "properties": {"i": {}, "j": 5, "t": 150}}::vertex
{"id": 844424930131971, "label": "v", "properties": {"i": {}, "j": 5, "t": 150}}::vertex
+ {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex
{"id": 1407374883553281, "label": "other_v", "properties": {"i": {}, "j": 5, "k": 10}}::vertex
{"id": 1407374883553282, "label": "other_v", "properties": {"i": {}, "j": 5, "k": 10}}::vertex
{"id": 1407374883553283, "label": "other_v", "properties": {"i": {}, "j": 5, "k": 10}}::vertex
diff --git a/regress/expected/expr.out b/regress/expected/expr.out
index 1dfcba9db..37c7849b0 100644
--- a/regress/expected/expr.out
+++ b/regress/expected/expr.out
@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
+SET extra_float_digits = 0;
LOAD 'age';
SET search_path TO ag_catalog;
SELECT * FROM create_graph('expr');
@@ -1232,7 +1233,7 @@ $$) AS (i int);
SELECT * FROM cypher('type_coercion', $$
RETURN '1.0'
$$) AS (i bigint);
-ERROR: invalid input syntax for integer: "1.0"
+ERROR: invalid input syntax for type bigint: "1.0"
-- Casting to ints that will cause overflow
SELECT * FROM cypher('type_coercion', $$
RETURN 10000000000000000000
@@ -1417,11 +1418,11 @@ SELECT agtype_in('null::int');
SELECT * FROM cypher('expr', $$
RETURN '0.0'::int
$$) AS r(result agtype);
-ERROR: invalid input syntax for integer: "0.0"
+ERROR: invalid input syntax for type bigint: "0.0"
SELECT * FROM cypher('expr', $$
RETURN '1.5'::int
$$) AS r(result agtype);
-ERROR: invalid input syntax for integer: "1.5"
+ERROR: invalid input syntax for type bigint: "1.5"
SELECT * FROM cypher('graph_name', $$
RETURN "15555555555555555555555555555"::int
$$) AS (string_result agtype);
@@ -1439,11 +1440,11 @@ ERROR: bigint out of range
SELECT * FROM cypher('expr', $$
RETURN ''::int
$$) AS r(result agtype);
-ERROR: invalid input syntax for integer: ""
+ERROR: invalid input syntax for type bigint: ""
SELECT * FROM cypher('expr', $$
RETURN 'false_'::int
$$) AS r(result agtype);
-ERROR: invalid input syntax for integer: "false_"
+ERROR: invalid input syntax for type bigint: "false_"
--
-- Test from an agtype value to agtype int
--
@@ -5580,7 +5581,7 @@ SELECT * FROM cypher('UCSC', $$ MATCH (u) RETURN stDev(u.gpa), stDevP(u.gpa) $$)
AS (stDev agtype, stDevP agtype);
stdev | stdevp
-------------------+-------------------
- 0.549566929066705 | 0.508800109100231
+ 0.549566929066706 | 0.508800109100232
(1 row)
-- should return 0
diff --git a/regress/expected/scan.out b/regress/expected/scan.out
index af82dbf22..d96d80049 100644
--- a/regress/expected/scan.out
+++ b/regress/expected/scan.out
@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
+SET extra_float_digits = 0;
LOAD 'age';
SET search_path TO ag_catalog;
SELECT create_graph('scan');
diff --git a/regress/sql/agtype.sql b/regress/sql/agtype.sql
index e3bef554b..3fb81d01e 100644
--- a/regress/sql/agtype.sql
+++ b/regress/sql/agtype.sql
@@ -25,6 +25,7 @@
-- Load extension and set path
--
LOAD 'age';
+SET extra_float_digits = 0;
SET search_path TO ag_catalog;
--
diff --git a/regress/sql/catalog.sql b/regress/sql/catalog.sql
index 363ac66b9..96c63e47a 100644
--- a/regress/sql/catalog.sql
+++ b/regress/sql/catalog.sql
@@ -25,7 +25,7 @@ SET search_path TO ag_catalog;
--
SELECT create_graph('graph');
-SELECT * FROM ag_graph WHERE name = 'graph';
+SELECT name, namespace FROM ag_graph WHERE name = 'graph';
-- create a label to test drop_label()
SELECT * FROM cypher('graph', $$CREATE (:l)$$) AS r(a agtype);
@@ -62,18 +62,18 @@ SELECT create_graph('GraphA');
SELECT create_graph('GraphB');
-- Show GraphA's construction to verify case is preserved.
-SELECT * FROM ag_graph WHERE name = 'GraphA';
+SELECT name, namespace FROM ag_graph WHERE name = 'GraphA';
SELECT nspname FROM pg_namespace WHERE nspname = 'GraphA';
-- Rename GraphA to GraphX.
SELECT alter_graph('GraphA', 'RENAME', 'GraphX');
-- Show GraphX's construction to verify case is preserved.
-SELECT * FROM ag_graph WHERE name = 'GraphX';
+SELECT name, namespace FROM ag_graph WHERE name = 'GraphX';
SELECT nspname FROM pg_namespace WHERE nspname = 'GraphX';
-- Verify there isn't a graph GraphA anymore.
-SELECT * FROM ag_graph WHERE name = 'GraphA';
+SELECT name, namespace FROM ag_graph WHERE name = 'GraphA';
SELECT * FROM pg_namespace WHERE nspname = 'GraphA';
-- Sanity check that graphx does not exist - should return 0.
diff --git a/regress/sql/cypher_call.sql b/regress/sql/cypher_call.sql
index 992948c19..236b0e7ae 100644
--- a/regress/sql/cypher_call.sql
+++ b/regress/sql/cypher_call.sql
@@ -35,8 +35,8 @@ CREATE FUNCTION call_stmt_test.add_agtype(agtype, agtype) RETURNS agtype
IMMUTABLE
RETURNS NULL ON NULL INPUT;
-/*
- * CALL (solo)
+/*
+ * CALL (solo)
*/
SELECT * FROM cypher('cypher_call', $$CALL sqrt(64)$$) as (sqrt agtype);
@@ -106,4 +106,4 @@ SELECT * FROM cypher('cypher_call', $$ CALL sqrt(64) YIELD sqrt AS sqrt1 CALL sq
SELECT * FROM cypher('cypher_call', $$ CALL sqrt(64) YIELD sqrt CALL agtype_sum(2,2) YIELD agtype_sum AS sqrt RETURN sqrt, sqrt $$) as (a agtype, b agtype);
DROP SCHEMA call_stmt_test CASCADE;
-SELECT drop_graph('cypher_call', true);
\ No newline at end of file
+SELECT drop_graph('cypher_call', true);
diff --git a/regress/sql/cypher_set.sql b/regress/sql/cypher_set.sql
index 69dea2e4b..484cbff7d 100644
--- a/regress/sql/cypher_set.sql
+++ b/regress/sql/cypher_set.sql
@@ -62,6 +62,21 @@ SELECT * FROM cypher('cypher_set', $$
RETURN n
$$) AS (a agtype);
+SELECT * FROM cypher('cypher_set', $$
+ MATCH (n {j: 5})
+ SET n.y = 53
+ SET n.y = 50
+ SET n.z = 99
+ SET n.arr = [n.y, n.z]
+ RETURN n
+$$) AS (a agtype);
+
+SELECT * FROM cypher('cypher_set', $$
+ MATCH (n {j: 5})
+ REMOVE n.arr
+ RETURN n
+$$) AS (a agtype);
+
SELECT * FROM cypher('cypher_set', $$
MATCH (n {j: 5})
RETURN n
@@ -96,6 +111,32 @@ SELECT * FROM cypher('cypher_set', $$
RETURN n
$$) AS (a agtype);
+-- Test that SET works with nodes(path) and relationships(path)
+
+SELECT * FROM cypher('cypher_set', $$
+ MATCH p=(n)-[e:e {j:34}]->()
+ WITH nodes(p) AS ns
+ WITH ns[0] AS n
+ SET n.k = 99
+ SET n.k = 999
+ RETURN n
+$$) AS (a agtype);
+
+SELECT * FROM cypher('cypher_set', $$
+ MATCH p=(n)-[e:e {j:34}]->()
+ WITH relationships(p) AS rs
+ WITH rs[0] AS r
+ SET r.l = 99
+ SET r.l = 999
+ RETURN r
+$$) AS (a agtype);
+
+SELECT * FROM cypher('cypher_set', $$
+ MATCH p=(n)-[e:e {j:34}]->()
+ REMOVE n.k, e.l
+ RETURN p
+$$) AS (a agtype);
+
SELECT * FROM cypher('cypher_set', $$MATCH (n)-[]->(n) SET n.y = 99 RETURN n$$) AS (a agtype);
SELECT * FROM cypher('cypher_set', $$MATCH (n) MATCH (n)-[]->(m) SET n.t = 150 RETURN n$$) AS (a agtype);
diff --git a/regress/sql/expr.sql b/regress/sql/expr.sql
index 186cc6fc6..c9547e332 100644
--- a/regress/sql/expr.sql
+++ b/regress/sql/expr.sql
@@ -17,6 +17,7 @@
* under the License.
*/
+SET extra_float_digits = 0;
LOAD 'age';
SET search_path TO ag_catalog;
diff --git a/regress/sql/scan.sql b/regress/sql/scan.sql
index 97804e5c7..840a822f2 100644
--- a/regress/sql/scan.sql
+++ b/regress/sql/scan.sql
@@ -17,6 +17,7 @@
* under the License.
*/
+SET extra_float_digits = 0;
LOAD 'age';
SET search_path TO ag_catalog;
diff --git a/src/backend/catalog/ag_graph.c b/src/backend/catalog/ag_graph.c
index c1e53d6ab..f4a0d7213 100644
--- a/src/backend/catalog/ag_graph.c
+++ b/src/backend/catalog/ag_graph.c
@@ -26,9 +26,11 @@
#include "access/skey.h"
#include "access/stratnum.h"
#include "catalog/indexing.h"
+#include "catalog/namespace.h"
+#include "nodes/makefuncs.h"
#include "storage/lockdefs.h"
-#include "utils/builtins.h"
#include "utils/fmgroids.h"
+#include "utils/fmgrprotos.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
#include "utils/relcache.h"
@@ -39,36 +41,36 @@
static Oid get_graph_namespace(const char *graph_name);
// INSERT INTO ag_catalog.ag_graph VALUES (graph_name, nsp_id)
-Oid insert_graph(const Name graph_name, const Oid nsp_id)
+void insert_graph(const Name graph_name, const Oid nsp_id)
{
Datum values[Natts_ag_graph];
bool nulls[Natts_ag_graph];
Relation ag_graph;
HeapTuple tuple;
- Oid graph_oid;
+
AssertArg(graph_name);
AssertArg(OidIsValid(nsp_id));
+ ag_graph = table_open(ag_graph_relation_id(), RowExclusiveLock);
+ values[Anum_ag_graph_oid - 1] = ObjectIdGetDatum(nsp_id);
+ nulls[Anum_ag_graph_oid - 1] = false;
+
values[Anum_ag_graph_name - 1] = NameGetDatum(graph_name);
nulls[Anum_ag_graph_name - 1] = false;
values[Anum_ag_graph_namespace - 1] = ObjectIdGetDatum(nsp_id);
nulls[Anum_ag_graph_namespace - 1] = false;
- ag_graph = heap_open(ag_graph_relation_id(), RowExclusiveLock);
-
tuple = heap_form_tuple(RelationGetDescr(ag_graph), values, nulls);
/*
* CatalogTupleInsert() is originally for PostgreSQL's catalog. However,
* it is used at here for convenience.
*/
- graph_oid = CatalogTupleInsert(ag_graph, tuple);
-
- heap_close(ag_graph, RowExclusiveLock);
+ CatalogTupleInsert(ag_graph, tuple);
- return graph_oid;
+ table_close(ag_graph, RowExclusiveLock);
}
// DELETE FROM ag_catalog.ag_graph WHERE name = graph_name
@@ -82,7 +84,7 @@ void delete_graph(const Name graph_name)
ScanKeyInit(&scan_keys[0], Anum_ag_graph_name, BTEqualStrategyNumber,
F_NAMEEQ, NameGetDatum(graph_name));
- ag_graph = heap_open(ag_graph_relation_id(), RowExclusiveLock);
+ ag_graph = table_open(ag_graph_relation_id(), RowExclusiveLock);
scan_desc = systable_beginscan(ag_graph, ag_graph_name_index_id(), true,
NULL, 1, scan_keys);
@@ -97,7 +99,7 @@ void delete_graph(const Name graph_name)
CatalogTupleDelete(ag_graph, &tuple->t_self);
systable_endscan(scan_desc);
- heap_close(ag_graph, RowExclusiveLock);
+ table_close(ag_graph, RowExclusiveLock);
}
// Function updates graph name in ag_graph table.
@@ -116,7 +118,7 @@ void update_graph_name(const Name graph_name, const Name new_name)
ScanKeyInit(&scan_keys[0], Anum_ag_graph_name, BTEqualStrategyNumber,
F_NAMEEQ, NameGetDatum(graph_name));
- ag_graph = heap_open(ag_graph_relation_id(), RowExclusiveLock);
+ ag_graph = table_open(ag_graph_relation_id(), RowExclusiveLock);
scan_desc = systable_beginscan(ag_graph, ag_graph_name_index_id(), true,
NULL, 1, scan_keys);
@@ -146,7 +148,7 @@ void update_graph_name(const Name graph_name, const Name new_name)
// end scan and close ag_graph
systable_endscan(scan_desc);
- heap_close(ag_graph, RowExclusiveLock);
+ table_close(ag_graph, RowExclusiveLock);
}
Oid get_graph_oid(const char *graph_name)
@@ -155,9 +157,13 @@ Oid get_graph_oid(const char *graph_name)
cache_data = search_graph_name_cache(graph_name);
if (cache_data)
+ {
return cache_data->oid;
+ }
else
+ {
return InvalidOid;
+ }
}
static Oid get_graph_namespace(const char *graph_name)
diff --git a/src/backend/catalog/ag_label.c b/src/backend/catalog/ag_label.c
index 41a11abef..9cd892356 100644
--- a/src/backend/catalog/ag_label.c
+++ b/src/backend/catalog/ag_label.c
@@ -26,6 +26,7 @@
#include "access/skey.h"
#include "access/stratnum.h"
#include "catalog/indexing.h"
+#include "catalog/namespace.h"
#include "fmgr.h"
#include "nodes/execnodes.h"
#include "nodes/makefuncs.h"
@@ -43,10 +44,13 @@
#include "utils/ag_cache.h"
#include "utils/graphid.h"
-// INSERT INTO ag_catalog.ag_label
-// VALUES (label_name, label_graph, label_id, label_kind, label_relation, seq_name)
-Oid insert_label(const char *label_name, Oid label_graph, int32 label_id,
- char label_kind, Oid label_relation, const char *seq_name)
+/*
+ * INSERT INTO ag_catalog.ag_label
+ * VALUES (label_name, label_graph, label_id, label_kind,
+ * label_relation, seq_name)
+ */
+void insert_label(const char *label_name, Oid graph_oid, int32 label_id,
+ char label_kind, Oid label_relation, const char *seq_name)
{
NameData label_name_data;
NameData seq_name_data;
@@ -54,25 +58,25 @@ Oid insert_label(const char *label_name, Oid label_graph, int32 label_id,
bool nulls[Natts_ag_label];
Relation ag_label;
HeapTuple tuple;
- Oid label_oid;
/*
* NOTE: Is it better to make use of label_id and label_kind domain types
* than to use assert to check label_id and label_kind are valid?
*/
AssertArg(label_name);
- AssertArg(OidIsValid(label_graph));
AssertArg(label_id_is_valid(label_id));
AssertArg(label_kind == LABEL_KIND_VERTEX ||
label_kind == LABEL_KIND_EDGE);
AssertArg(OidIsValid(label_relation));
AssertArg(seq_name);
+ ag_label = table_open(ag_label_relation_id(), RowExclusiveLock);
+
namestrcpy(&label_name_data, label_name);
values[Anum_ag_label_name - 1] = NameGetDatum(&label_name_data);
nulls[Anum_ag_label_name - 1] = false;
- values[Anum_ag_label_graph - 1] = ObjectIdGetDatum(label_graph);
+ values[Anum_ag_label_graph - 1] = ObjectIdGetDatum(graph_oid);
nulls[Anum_ag_label_graph - 1] = false;
values[Anum_ag_label_id - 1] = Int32GetDatum(label_id);
@@ -88,19 +92,15 @@ Oid insert_label(const char *label_name, Oid label_graph, int32 label_id,
values[Anum_ag_label_seq_name - 1] = NameGetDatum(&seq_name_data);
nulls[Anum_ag_label_seq_name - 1] = false;
- ag_label = heap_open(ag_label_relation_id(), RowExclusiveLock);
-
tuple = heap_form_tuple(RelationGetDescr(ag_label), values, nulls);
/*
* CatalogTupleInsert() is originally for PostgreSQL's catalog. However,
* it is used at here for convenience.
*/
- label_oid = CatalogTupleInsert(ag_label, tuple);
-
- heap_close(ag_label, RowExclusiveLock);
+ CatalogTupleInsert(ag_label, tuple);
- return label_oid;
+ table_close(ag_label, RowExclusiveLock);
}
// DELETE FROM ag_catalog.ag_label WHERE relation = relation
@@ -114,7 +114,7 @@ void delete_label(Oid relation)
ScanKeyInit(&scan_keys[0], Anum_ag_label_relation, BTEqualStrategyNumber,
F_OIDEQ, ObjectIdGetDatum(relation));
- ag_label = heap_open(ag_label_relation_id(), RowExclusiveLock);
+ ag_label = table_open(ag_label_relation_id(), RowExclusiveLock);
scan_desc = systable_beginscan(ag_label, ag_label_relation_index_id(),
true, NULL, 1, scan_keys);
@@ -129,45 +129,34 @@ void delete_label(Oid relation)
CatalogTupleDelete(ag_label, &tuple->t_self);
systable_endscan(scan_desc);
- heap_close(ag_label, RowExclusiveLock);
+ table_close(ag_label, RowExclusiveLock);
}
-Oid get_label_oid(const char *label_name, Oid label_graph)
+int32 get_label_id(const char *label_name, Oid graph_oid)
{
label_cache_data *cache_data;
- cache_data = search_label_name_graph_cache(label_name, label_graph);
- if (cache_data)
- return cache_data->oid;
- else
- return InvalidOid;
-}
-
-int32 get_label_id(const char *label_name, Oid label_graph)
-{
- label_cache_data *cache_data;
-
- cache_data = search_label_name_graph_cache(label_name, label_graph);
+ cache_data = search_label_name_graph_cache(label_name, graph_oid);
if (cache_data)
return cache_data->id;
else
return INVALID_LABEL_ID;
}
-Oid get_label_relation(const char *label_name, Oid label_graph)
+Oid get_label_relation(const char *label_name, Oid graph_oid)
{
label_cache_data *cache_data;
- cache_data = search_label_name_graph_cache(label_name, label_graph);
+ cache_data = search_label_name_graph_cache(label_name, graph_oid);
if (cache_data)
return cache_data->relation;
else
return InvalidOid;
}
-char *get_label_relation_name(const char *label_name, Oid label_graph)
+char *get_label_relation_name(const char *label_name, Oid graph_oid)
{
- return get_rel_name(get_label_relation(label_name, label_graph));
+ return get_rel_name(get_label_relation(label_name, graph_oid));
}
char get_label_kind(const char *label_name, Oid label_graph)
@@ -206,7 +195,7 @@ Datum _label_name(PG_FUNCTION_ARGS)
label_id = (int32)(((uint64)AG_GETARG_GRAPHID(1)) >> ENTRY_ID_BITS);
- label_cache = search_label_graph_id_cache(graph, label_id);
+ label_cache = search_label_graph_oid_cache(graph, label_id);
label_name = NameStr(label_cache->name);
@@ -243,23 +232,23 @@ PG_FUNCTION_INFO_V1(_extract_label_id);
Datum _extract_label_id(PG_FUNCTION_ARGS)
{
- graphid graph_id;
+ graphid graph_oid;
if (PG_ARGISNULL(0))
{
ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
- errmsg("graph_id must not be null")));
+ errmsg("graph_oid must not be null")));
}
- graph_id = AG_GETARG_GRAPHID(0);
+ graph_oid = AG_GETARG_GRAPHID(0);
- PG_RETURN_INT32(get_graphid_label_id(graph_id));
+ PG_RETURN_INT32(get_graphid_label_id(graph_oid));
}
-bool label_id_exists(Oid label_graph, int32 label_id)
+bool label_id_exists(Oid graph_oid, int32 label_id)
{
label_cache_data *cache_data;
- cache_data = search_label_graph_id_cache(label_graph, label_id);
+ cache_data = search_label_graph_oid_cache(graph_oid, label_id);
if (cache_data)
return true;
else
@@ -288,15 +277,16 @@ RangeVar *get_label_range_var(char *graph_name, Oid graph_oid,
* XXX: We may want to use the cache system for this function,
* however the cache system currently requires us to know the
* name of the label we want.
- */
+ */
List *get_all_edge_labels_per_graph(EState *estate, Oid graph_oid)
{
List *labels = NIL;
ScanKeyData scan_keys[2];
Relation ag_label;
- HeapScanDesc scan_desc;
+ TableScanDesc scan_desc;
HeapTuple tuple;
TupleTableSlot *slot;
+ ResultRelInfo *resultRelInfo;
// setup scan keys to get all edges for the given graph oid
ScanKeyInit(&scan_keys[1], Anum_ag_label_graph, BTEqualStrategyNumber,
@@ -305,11 +295,15 @@ List *get_all_edge_labels_per_graph(EState *estate, Oid graph_oid)
F_CHAREQ, CharGetDatum(LABEL_TYPE_EDGE));
// setup the table to be scanned
- ag_label = heap_open(ag_label_relation_id(), RowExclusiveLock);
- scan_desc = heap_beginscan(ag_label, estate->es_snapshot, 2, scan_keys);
+ ag_label = table_open(ag_label_relation_id(), RowExclusiveLock);
+ scan_desc = table_beginscan(ag_label, estate->es_snapshot, 2, scan_keys);
- slot = ExecInitExtraTupleSlot(estate,
- RelationGetDescr(ag_label));
+ resultRelInfo = create_entity_result_rel_info(estate, "ag_catalog",
+ "ag_label");
+
+ slot = ExecInitExtraTupleSlot(
+ estate, RelationGetDescr(resultRelInfo->ri_RelationDesc),
+ &TTSOpsHeapTuple);
// scan through the results and get all the label names.
while(true)
@@ -324,7 +318,7 @@ List *get_all_edge_labels_per_graph(EState *estate, Oid graph_oid)
if (!HeapTupleIsValid(tuple))
break;
- ExecStoreTuple(tuple, slot, InvalidBuffer, false);
+ ExecStoreHeapTuple(tuple, slot, false);
datum = slot_getattr(slot, Anum_ag_label_name, &isNull);
label = DatumGetName(datum);
@@ -332,8 +326,10 @@ List *get_all_edge_labels_per_graph(EState *estate, Oid graph_oid)
labels = lappend(labels, label);
}
- heap_endscan(scan_desc);
- heap_close(ag_label, RowExclusiveLock);
+ table_endscan(scan_desc);
+
+ destroy_entity_result_rel_info(resultRelInfo);
+ table_close(resultRelInfo->ri_RelationDesc, RowExclusiveLock);
return labels;
}
diff --git a/src/backend/commands/graph_commands.c b/src/backend/commands/graph_commands.c
index 4216652bc..7df40b948 100644
--- a/src/backend/commands/graph_commands.c
+++ b/src/backend/commands/graph_commands.c
@@ -19,9 +19,9 @@
#include "postgres.h"
-#include "access/xact.h"
#include "access/genam.h"
#include "access/heapam.h"
+#include "access/xact.h"
#include "catalog/dependency.h"
#include "catalog/objectaddress.h"
#include "commands/defrem.h"
@@ -35,9 +35,8 @@
#include "nodes/pg_list.h"
#include "nodes/value.h"
#include "parser/parser.h"
-#include "utils/fmgroids.h"
-#include "utils/relcache.h"
#include "utils/rel.h"
+#include "utils/relcache.h"
#include "catalog/ag_graph.h"
#include "catalog/ag_label.h"
@@ -182,9 +181,8 @@ Datum drop_graph(PG_FUNCTION_ARGS)
graph_name_str = NameStr(*graph_name);
if (!graph_exists(graph_name_str))
{
- ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_SCHEMA),
- errmsg("graph \"%s\" does not exist", graph_name_str)));
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_SCHEMA),
+ errmsg("graph \"%s\" does not exist", graph_name_str)));
}
drop_schema_for_graph(graph_name_str, cascade);
@@ -367,11 +365,11 @@ List *get_graphnames(void)
List *graphnames = NIL;
char *str;
- ag_graph = heap_open(ag_graph_relation_id(), RowExclusiveLock);
+ ag_graph = table_open(ag_graph_relation_id(), RowExclusiveLock);
scan_desc = systable_beginscan(ag_graph, ag_graph_name_index_id(), true,
NULL, 0, NULL);
- slot = MakeTupleTableSlot(RelationGetDescr(ag_graph));
+ slot = MakeTupleTableSlot(RelationGetDescr(ag_graph), &TTSOpsHeapTuple);
for (;;)
{
@@ -380,17 +378,17 @@ List *get_graphnames(void)
break;
ExecClearTuple(slot);
- ExecStoreTuple(tuple, slot, InvalidBuffer, false);
+ ExecStoreHeapTuple(tuple, slot, false);
slot_getallattrs(slot);
- str = DatumGetCString(slot->tts_values[0]);
+ str = DatumGetCString(slot->tts_values[Anum_ag_graph_name - 1]);
graphnames = lappend(graphnames, str);
}
ExecDropSingleTupleTableSlot(slot);
systable_endscan(scan_desc);
- heap_close(ag_graph, RowExclusiveLock);
+ table_close(ag_graph, RowExclusiveLock);
return graphnames;
}
diff --git a/src/backend/commands/label_commands.c b/src/backend/commands/label_commands.c
index b696e9637..39e9dce37 100644
--- a/src/backend/commands/label_commands.c
+++ b/src/backend/commands/label_commands.c
@@ -260,8 +260,8 @@ Datum create_elabel(PG_FUNCTION_ARGS)
* new table and sequence. Returns the oid from the new tuple in
* ag_catalog.ag_label.
*/
-Oid create_label(char *graph_name, char *label_name, char label_type,
- List *parents)
+void create_label(char *graph_name, char *label_name, char label_type,
+ List *parents)
{
graph_cache_data *cache_data;
Oid graph_oid;
@@ -272,7 +272,12 @@ Oid create_label(char *graph_name, char *label_name, char label_type,
RangeVar *seq_range_var;
int32 label_id;
Oid relation_id;
- Oid label_oid;
+
+ if (!is_valid_label(label_name, label_type))
+ {
+ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_SCHEMA),
+ errmsg("label name is invalid")));
+ }
if (!is_valid_label(label_name, label_type))
{
@@ -314,12 +319,10 @@ Oid create_label(char *graph_name, char *label_name, char label_type,
// get a new "id" for the new label
label_id = get_new_label_id(graph_oid, nsp_id);
- label_oid = insert_label(label_name, graph_oid, label_id, label_type,
- relation_id,seq_name);
+ insert_label(label_name, graph_oid, label_id, label_type,
+ relation_id, seq_name);
CommandCounterIncrement();
-
- return label_oid;
}
// CREATE TABLE `schema_name`.`rel_name` (
@@ -684,13 +687,15 @@ static int32 get_new_label_id(Oid graph_oid, Oid nsp_id)
for (cnt = LABEL_ID_MIN; cnt <= LABEL_ID_MAX; cnt++)
{
- int64 label_id;
+ int32 label_id;
// the data type of the sequence is integer (int4)
- label_id = nextval_internal(seq_id, true);
+ label_id = (int32) nextval_internal(seq_id, true);
Assert(label_id_is_valid(label_id));
if (!label_id_exists(graph_oid, label_id))
- return (int32)label_id;
+ {
+ return (int32) label_id;
+ }
}
ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
diff --git a/src/backend/executor/cypher_create.c b/src/backend/executor/cypher_create.c
index bf349e8ac..4b4810924 100644
--- a/src/backend/executor/cypher_create.c
+++ b/src/backend/executor/cypher_create.c
@@ -19,6 +19,7 @@
#include "postgres.h"
+#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/xact.h"
#include "executor/tuptable.h"
@@ -26,17 +27,14 @@
#include "nodes/extensible.h"
#include "nodes/nodes.h"
#include "nodes/plannodes.h"
-#include "parser/parse_relation.h"
#include "rewrite/rewriteHandler.h"
#include "utils/rel.h"
-#include "utils/tqual.h"
#include "catalog/ag_label.h"
#include "executor/cypher_executor.h"
#include "executor/cypher_utils.h"
#include "nodes/cypher_nodes.h"
#include "utils/agtype.h"
-#include "utils/ag_cache.h"
#include "utils/graphid.h"
static void begin_cypher_create(CustomScanState *node, EState *estate,
@@ -85,7 +83,8 @@ static void begin_cypher_create(CustomScanState *node, EState *estate,
ExecAssignExprContext(estate, &node->ss.ps);
ExecInitScanTupleSlot(estate, &node->ss,
- ExecGetResultType(node->ss.ps.lefttree));
+ ExecGetResultType(node->ss.ps.lefttree),
+ &TTSOpsHeapTuple);
if (!CYPHER_CLAUSE_IS_TERMINAL(css->flags))
{
@@ -108,7 +107,7 @@ static void begin_cypher_create(CustomScanState *node, EState *estate,
continue;
// Open relation and acquire a row exclusive lock.
- rel = heap_open(cypher_node->relid, RowExclusiveLock);
+ rel = table_open(cypher_node->relid, RowExclusiveLock);
// Initialize resultRelInfo for the vertex
cypher_node->resultRelInfo = makeNode(ResultRelInfo);
@@ -120,9 +119,8 @@ static void begin_cypher_create(CustomScanState *node, EState *estate,
ExecOpenIndices(cypher_node->resultRelInfo, false);
// Setup the relation's tuple slot
- cypher_node->elemTupleSlot = ExecInitExtraTupleSlot(
- estate,
- RelationGetDescr(cypher_node->resultRelInfo->ri_RelationDesc));
+ cypher_node->elemTupleSlot = table_slot_create(
+ rel, &estate->es_tupleTable);
if (cypher_node->id_expr != NULL)
{
@@ -295,8 +293,8 @@ static void end_cypher_create(CustomScanState *node)
ExecCloseIndices(cypher_node->resultRelInfo);
// close the relation itself
- heap_close(cypher_node->resultRelInfo->ri_RelationDesc,
- RowExclusiveLock);
+ table_close(cypher_node->resultRelInfo->ri_RelationDesc,
+ RowExclusiveLock);
}
}
}
@@ -446,6 +444,7 @@ static void create_edge(cypher_create_custom_scan_state *css,
prev_path = lappend(prev_path, DatumGetPointer(result));
css->path_values = list_concat(prev_path, css->path_values);
}
+
if (CYPHER_TARGET_NODE_IS_VARIABLE(node->flags))
{
scantuple->tts_values[node->tuple_position - 1] = result;
diff --git a/src/backend/executor/cypher_delete.c b/src/backend/executor/cypher_delete.c
index 866432d6b..a83003607 100644
--- a/src/backend/executor/cypher_delete.c
+++ b/src/backend/executor/cypher_delete.c
@@ -19,27 +19,23 @@
#include "postgres.h"
-#include "access/sysattr.h"
+#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/multixact.h"
+#include "access/table.h"
#include "access/xact.h"
-#include "storage/bufmgr.h"
#include "executor/tuptable.h"
#include "nodes/execnodes.h"
#include "nodes/extensible.h"
#include "nodes/nodes.h"
#include "nodes/plannodes.h"
#include "parser/parsetree.h"
-#include "parser/parse_relation.h"
-#include "rewrite/rewriteHandler.h"
+#include "storage/bufmgr.h"
#include "utils/rel.h"
-#include "utils/tqual.h"
#include "catalog/ag_label.h"
-#include "commands/label_commands.h"
#include "executor/cypher_executor.h"
#include "executor/cypher_utils.h"
-#include "parser/cypher_parse_node.h"
#include "nodes/cypher_nodes.h"
#include "utils/agtype.h"
#include "utils/graphid.h"
@@ -99,7 +95,8 @@ static void begin_cypher_delete(CustomScanState *node, EState *estate,
// setup scan tuple slot and projection info
ExecInitScanTupleSlot(estate, &node->ss,
- ExecGetResultType(node->ss.ps.lefttree));
+ ExecGetResultType(node->ss.ps.lefttree),
+ &TTSOpsHeapTuple);
if (!CYPHER_CLAUSE_IS_TERMINAL(css->flags))
{
@@ -284,9 +281,9 @@ static void delete_entity(EState *estate, ResultRelInfo *resultRelInfo,
{
ResultRelInfo *saved_resultRelInfo;
LockTupleMode lockmode;
- HeapUpdateFailureData hufd;
- HTSU_Result lock_result;
- HTSU_Result delete_result;
+ TM_FailureData hufd;
+ TM_Result lock_result;
+ TM_Result delete_result;
Buffer buffer;
// Find the physical tuple, this variable is coming from
@@ -303,11 +300,11 @@ static void delete_entity(EState *estate, ResultRelInfo *resultRelInfo,
* It is possible the entity may have already been deleted. If the tuple
* can be deleted, the lock result will be HeapTupleMayBeUpdated. If the
* tuple was already deleted by this DELETE clause, the result would be
- * HeapTupleSelfUpdated, if the result was deleted by a previous delete
- * clause, the result will HeapTupleInvisible. Throw an error if any
+ * TM_SelfModified, if the result was deleted by a previous delete
+ * clause, the result will TM_Invisible. Throw an error if any
* other result was returned.
*/
- if (lock_result == HeapTupleMayBeUpdated)
+ if (lock_result == TM_Ok)
{
delete_result = heap_delete(resultRelInfo->ri_RelationDesc,
&tuple->t_self, GetCurrentCommandId(true),
@@ -320,30 +317,32 @@ static void delete_entity(EState *estate, ResultRelInfo *resultRelInfo,
*/
switch (delete_result)
{
- case HeapTupleMayBeUpdated:
- break;
- case HeapTupleSelfUpdated:
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("deleting the same entity more than once cannot happen")));
- /* ereport never gets here */
- break;
- case HeapTupleUpdated:
- ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("could not serialize access due to concurrent update")));
- /* ereport never gets here */
- break;
- default:
- elog(ERROR, "Entity failed to be update");
- /* elog never gets here */
- break;
+ case TM_Ok:
+ break;
+ case TM_SelfModified:
+ ereport(
+ ERROR,
+ (errcode(ERRCODE_INTERNAL_ERROR),
+ errmsg(
+ "deleting the same entity more than once cannot happen")));
+ /* ereport never gets here */
+ break;
+ case TM_Updated:
+ ereport(
+ ERROR,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("could not serialize access due to concurrent update")));
+ /* ereport never gets here */
+ break;
+ default:
+ elog(ERROR, "Entity failed to be update");
+ /* elog never gets here */
+ break;
}
/* increment the command counter */
CommandCounterIncrement();
}
- else if (lock_result != HeapTupleInvisible &&
- lock_result != HeapTupleSelfUpdated)
+ else if (lock_result != TM_Invisible && lock_result != TM_SelfModified)
{
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
@@ -374,7 +373,7 @@ static void process_delete_list(CustomScanState *node)
cypher_delete_item *item;
agtype_value *original_entity_value, *id, *label;
ScanKeyData scan_keys[1];
- HeapScanDesc scan_desc;
+ TableScanDesc scan_desc;
ResultRelInfo *resultRelInfo;
HeapTuple heap_tuple;
char *label_name;
@@ -424,8 +423,8 @@ static void process_delete_list(CustomScanState *node)
/*
* Setup the scan description, with the correct snapshot and scan keys.
*/
- scan_desc = heap_beginscan(resultRelInfo->ri_RelationDesc,
- estate->es_snapshot, 1, scan_keys);
+ scan_desc = table_beginscan(resultRelInfo->ri_RelationDesc,
+ estate->es_snapshot, 1, scan_keys);
/* Retrieve the tuple. */
heap_tuple = heap_getnext(scan_desc, ForwardScanDirection);
@@ -437,7 +436,7 @@ static void process_delete_list(CustomScanState *node)
*/
if (!HeapTupleIsValid(heap_tuple))
{
- heap_endscan(scan_desc);
+ table_endscan(scan_desc);
destroy_entity_result_rel_info(resultRelInfo);
continue;
@@ -459,7 +458,7 @@ static void process_delete_list(CustomScanState *node)
delete_entity(estate, resultRelInfo, heap_tuple);
/* Close the scan and the relation. */
- heap_endscan(scan_desc);
+ table_endscan(scan_desc);
destroy_entity_result_rel_info(resultRelInfo);
}
}
@@ -492,18 +491,19 @@ static void find_connected_edges(CustomScanState *node, char *graph_name,
{
char *label_name = lfirst(lc);
ResultRelInfo *resultRelInfo;
- HeapScanDesc scan_desc;
+ TableScanDesc scan_desc;
HeapTuple tuple;
TupleTableSlot *slot;
resultRelInfo = create_entity_result_rel_info(estate,
graph_name, label_name);
- scan_desc = heap_beginscan(resultRelInfo->ri_RelationDesc,
- estate->es_snapshot, 0, NULL);
+ scan_desc = table_beginscan(resultRelInfo->ri_RelationDesc,
+ estate->es_snapshot, 0, NULL);
- slot = ExecInitExtraTupleSlot(estate,
- RelationGetDescr(resultRelInfo->ri_RelationDesc));
+ slot = ExecInitExtraTupleSlot(
+ estate, RelationGetDescr(resultRelInfo->ri_RelationDesc),
+ &TTSOpsHeapTuple);
// scan the table
while(true)
@@ -517,7 +517,7 @@ static void find_connected_edges(CustomScanState *node, char *graph_name,
if (!HeapTupleIsValid(tuple))
break;
- ExecStoreTuple(tuple, slot, InvalidBuffer, false);
+ ExecStoreHeapTuple(tuple, slot, false);
startid = GRAPHID_GET_DATUM(slot_getattr(slot, Anum_ag_label_edge_table_start_id, &isNull));
endid = GRAPHID_GET_DATUM(slot_getattr(slot, Anum_ag_label_edge_table_end_id, &isNull));
@@ -540,7 +540,7 @@ static void find_connected_edges(CustomScanState *node, char *graph_name,
}
}
- heap_endscan(scan_desc);
+ table_endscan(scan_desc);
destroy_entity_result_rel_info(resultRelInfo);
}
diff --git a/src/backend/executor/cypher_merge.c b/src/backend/executor/cypher_merge.c
index 0ac49761d..c386e6524 100644
--- a/src/backend/executor/cypher_merge.c
+++ b/src/backend/executor/cypher_merge.c
@@ -20,23 +20,20 @@
#include "postgres.h"
#include "access/htup_details.h"
+#include "access/table.h"
#include "access/xact.h"
#include "executor/tuptable.h"
#include "nodes/execnodes.h"
#include "nodes/extensible.h"
#include "nodes/nodes.h"
#include "nodes/plannodes.h"
-#include "parser/parse_relation.h"
-#include "rewrite/rewriteHandler.h"
#include "utils/rel.h"
-#include "utils/tqual.h"
#include "catalog/ag_label.h"
#include "executor/cypher_executor.h"
#include "executor/cypher_utils.h"
#include "nodes/cypher_nodes.h"
#include "utils/agtype.h"
-#include "utils/ag_cache.h"
#include "utils/graphid.h"
static void begin_cypher_merge(CustomScanState *node, EState *estate,
@@ -85,7 +82,8 @@ static void begin_cypher_merge(CustomScanState *node, EState *estate,
ExecAssignExprContext(estate, &node->ss.ps);
ExecInitScanTupleSlot(estate, &node->ss,
- ExecGetResultType(node->ss.ps.lefttree));
+ ExecGetResultType(node->ss.ps.lefttree),
+ &TTSOpsVirtual);
/*
* When MERGE is not the last clause in a cypher query. Setup projection
@@ -120,7 +118,7 @@ static void begin_cypher_merge(CustomScanState *node, EState *estate,
}
// Open relation and acquire a row exclusive lock.
- rel = heap_open(cypher_node->relid, RowExclusiveLock);
+ rel = table_open(cypher_node->relid, RowExclusiveLock);
// Initialize resultRelInfo for the vertex
cypher_node->resultRelInfo = makeNode(ResultRelInfo);
@@ -134,7 +132,8 @@ static void begin_cypher_merge(CustomScanState *node, EState *estate,
// Setup the relation's tuple slot
cypher_node->elemTupleSlot = ExecInitExtraTupleSlot(
estate,
- RelationGetDescr(cypher_node->resultRelInfo->ri_RelationDesc));
+ RelationGetDescr(cypher_node->resultRelInfo->ri_RelationDesc),
+ &TTSOpsHeapTuple);
if (cypher_node->id_expr != NULL)
{
@@ -277,7 +276,6 @@ static void process_simple_merge(CustomScanState *node)
/* setup the scantuple that the process_path needs */
econtext->ecxt_scantuple = sss->ss.ss_ScanTupleSlot;
- econtext->ecxt_scantuple->tts_isempty = false;
process_path(css);
}
@@ -476,7 +474,6 @@ static TupleTableSlot *exec_cypher_merge(CustomScanState *node)
*/
ExprContext *econtext = node->ss.ps.ps_ExprContext;
SubqueryScanState *sss = (SubqueryScanState *)node->ss.ps.lefttree;
- HeapTuple heap_tuple = NULL;
/*
* Our child execution node is always a subquery. If not there
@@ -503,8 +500,8 @@ static TupleTableSlot *exec_cypher_merge(CustomScanState *node)
* it.
*/
ExecInitScanTupleSlot(estate, &sss->ss,
- ExecGetResultType(sss->subplan));
-
+ ExecGetResultType(sss->subplan),
+ &TTSOpsVirtual);
/* setup the scantuple that the process_path needs */
econtext->ecxt_scantuple = sss->ss.ss_ScanTupleSlot;
@@ -521,15 +518,8 @@ static TupleTableSlot *exec_cypher_merge(CustomScanState *node)
*/
mark_tts_isnull(econtext->ecxt_scantuple);
- // create the physical heap tuple
- heap_tuple = heap_form_tuple(
- econtext->ecxt_scantuple->tts_tupleDescriptor,
- econtext->ecxt_scantuple->tts_values,
- econtext->ecxt_scantuple->tts_isnull);
-
- // store the heap tuple
- ExecStoreTuple(heap_tuple, econtext->ecxt_scantuple, InvalidBuffer,
- false);
+ // store the heap tuble
+ ExecStoreVirtualTuple(econtext->ecxt_scantuple);
/*
* make the subquery's projection scan slot be the tuple table we
@@ -580,8 +570,8 @@ static void end_cypher_merge(CustomScanState *node)
ExecCloseIndices(cypher_node->resultRelInfo);
// close the relation itself
- heap_close(cypher_node->resultRelInfo->ri_RelationDesc,
- RowExclusiveLock);
+ table_close(cypher_node->resultRelInfo->ri_RelationDesc,
+ RowExclusiveLock);
}
}
diff --git a/src/backend/executor/cypher_set.c b/src/backend/executor/cypher_set.c
index 6afa2c8a3..7a8ec9b30 100644
--- a/src/backend/executor/cypher_set.c
+++ b/src/backend/executor/cypher_set.c
@@ -19,25 +19,20 @@
#include "postgres.h"
-#include "access/sysattr.h"
+#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/xact.h"
-#include "storage/bufmgr.h"
#include "executor/tuptable.h"
#include "nodes/execnodes.h"
#include "nodes/extensible.h"
#include "nodes/nodes.h"
#include "nodes/plannodes.h"
-#include "parser/parsetree.h"
-#include "parser/parse_relation.h"
#include "rewrite/rewriteHandler.h"
+#include "storage/bufmgr.h"
#include "utils/rel.h"
-#include "catalog/ag_label.h"
-#include "commands/label_commands.h"
#include "executor/cypher_executor.h"
#include "executor/cypher_utils.h"
-#include "parser/cypher_parse_node.h"
#include "nodes/cypher_nodes.h"
#include "utils/agtype.h"
#include "utils/graphid.h"
@@ -82,7 +77,8 @@ static void begin_cypher_set(CustomScanState *node, EState *estate,
ExecAssignExprContext(estate, &node->ss.ps);
ExecInitScanTupleSlot(estate, &node->ss,
- ExecGetResultType(node->ss.ps.lefttree));
+ ExecGetResultType(node->ss.ps.lefttree),
+ &TTSOpsHeapTuple);
if (!CYPHER_CLAUSE_IS_TERMINAL(css->flags))
{
@@ -112,11 +108,12 @@ static HeapTuple update_entity_tuple(ResultRelInfo *resultRelInfo,
{
HeapTuple tuple = NULL;
LockTupleMode lockmode;
- HeapUpdateFailureData hufd;
- HTSU_Result lock_result;
- HTSU_Result update_result;
+ TM_FailureData hufd;
+ TM_Result lock_result;
Buffer buffer;
-
+ bool update_indexes;
+ TM_Result result;
+ CommandId cid = GetCurrentCommandId(true);
ResultRelInfo *saved_resultRelInfo = estate->es_result_relation_info;
estate->es_result_relation_info = resultRelInfo;
@@ -126,10 +123,11 @@ static HeapTuple update_entity_tuple(ResultRelInfo *resultRelInfo,
GetCurrentCommandId(false), lockmode,
LockWaitBlock, false, &buffer, &hufd);
- if (lock_result == HeapTupleMayBeUpdated)
+ if (lock_result == TM_Ok)
{
+ ExecOpenIndices(resultRelInfo, false);
ExecStoreVirtualTuple(elemTupleSlot);
- tuple = ExecMaterializeSlot(elemTupleSlot);
+ tuple = ExecFetchSlotHeapTuple(elemTupleSlot, true, NULL);
tuple->t_self = old_tuple->t_self;
// Check the constraints of the tuple
@@ -139,26 +137,57 @@ static HeapTuple update_entity_tuple(ResultRelInfo *resultRelInfo,
ExecConstraints(resultRelInfo, elemTupleSlot, estate);
}
- // Insert the tuple normally
- update_result = heap_update(resultRelInfo->ri_RelationDesc,
- &(tuple->t_self), tuple,
- GetCurrentCommandId(true),
- estate->es_crosscheck_snapshot, true, &hufd,
- &lockmode);
+ result = table_tuple_update(resultRelInfo->ri_RelationDesc,
+ &tuple->t_self, elemTupleSlot,
+ cid, estate->es_snapshot,
+ estate->es_crosscheck_snapshot,
+ true /* wait for commit */ ,
+ &hufd, &lockmode, &update_indexes);
+
+ if (result == TM_SelfModified)
+ {
+ if (hufd.cmax != cid)
+ {
+ ereport(ERROR,
+ (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
+ errmsg("tuple to be updated was already modified")));
+ }
+
+ ExecCloseIndices(resultRelInfo);
+ estate->es_result_relation_info = saved_resultRelInfo;
+
+ return tuple;
+ }
- if (update_result != HeapTupleMayBeUpdated)
+ if (result != TM_Ok)
{
ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Entity failed to be updated: %i", update_result)));
+ errmsg("Entity failed to be updated: %i", result)));
}
// Insert index entries for the tuple
- if (resultRelInfo->ri_NumIndices > 0)
+ if (resultRelInfo->ri_NumIndices > 0 && update_indexes)
{
- ExecInsertIndexTuples(elemTupleSlot, &(tuple->t_self), estate,
- false, NULL, NIL);
+ ExecInsertIndexTuples(elemTupleSlot, estate, false, NULL, NIL);
}
+
+ ExecCloseIndices(resultRelInfo);
}
+ else if (lock_result == TM_SelfModified)
+ {
+ if (hufd.cmax != cid)
+ {
+ ereport(ERROR,
+ (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
+ errmsg("tuple to be updated was already modified")));
+ }
+ }
+ else
+ {
+ ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR),
+ errmsg("Entity failed to be updated: %i", lock_result)));
+ }
+
ReleaseBuffer(buffer);
estate->es_result_relation_info = saved_resultRelInfo;
@@ -376,13 +405,14 @@ static void process_update_list(CustomScanState *node)
TupleTableSlot *slot;
ResultRelInfo *resultRelInfo;
ScanKeyData scan_keys[1];
- HeapScanDesc scan_desc;
+ TableScanDesc scan_desc;
bool remove_property;
char *label_name;
cypher_update_item *update_item;
Datum new_entity;
HeapTuple heap_tuple;
char *clause_name = css->set_list->clause_name;
+ int cid;
update_item = (cypher_update_item *)lfirst(lc);
@@ -418,8 +448,8 @@ static void process_update_list(CustomScanState *node)
/* get the id and label for later */
id = GET_AGTYPE_VALUE_OBJECT_VALUE(original_entity_value, "id");
label = GET_AGTYPE_VALUE_OBJECT_VALUE(original_entity_value, "label");
- label_name = pnstrdup(label->val.string.val, label->val.string.len);
+ label_name = pnstrdup(label->val.string.val, label->val.string.len);
/* get the properties we need to update */
original_properties = GET_AGTYPE_VALUE_OBJECT_VALUE(original_entity_value,
"properties");
@@ -478,11 +508,12 @@ static void process_update_list(CustomScanState *node)
}
}
- resultRelInfo = create_entity_result_rel_info(estate,
- css->set_list->graph_name,
- label_name);
+ resultRelInfo = create_entity_result_rel_info(
+ estate, css->set_list->graph_name, label_name);
- slot = ExecInitExtraTupleSlot(estate, RelationGetDescr(resultRelInfo->ri_RelationDesc));
+ slot = ExecInitExtraTupleSlot(
+ estate, RelationGetDescr(resultRelInfo->ri_RelationDesc),
+ &TTSOpsHeapTuple);
/*
* Now that we have the updated properties, create a either a vertex or
@@ -533,6 +564,9 @@ static void process_update_list(CustomScanState *node)
* If the last update index for the entity is equal to the current loop
* index, then update this tuple.
*/
+ cid = estate->es_snapshot->curcid;
+ estate->es_snapshot->curcid = GetCurrentCommandId(false);
+
if (luindex[update_item->entity_position - 1] == lidx)
{
/*
@@ -545,8 +579,8 @@ static void process_update_list(CustomScanState *node)
* Setup the scan description, with the correct snapshot and scan
* keys.
*/
- scan_desc = heap_beginscan(resultRelInfo->ri_RelationDesc,
- estate->es_snapshot, 1, scan_keys);
+ scan_desc = table_beginscan(resultRelInfo->ri_RelationDesc,
+ estate->es_snapshot, 1, scan_keys);
/* Retrieve the tuple. */
heap_tuple = heap_getnext(scan_desc, ForwardScanDirection);
@@ -560,12 +594,13 @@ static void process_update_list(CustomScanState *node)
heap_tuple);
}
/* close the ScanDescription */
- heap_endscan(scan_desc);
+ table_endscan(scan_desc);
}
+ estate->es_snapshot->curcid = cid;
/* close relation */
ExecCloseIndices(resultRelInfo);
- heap_close(resultRelInfo->ri_RelationDesc, RowExclusiveLock);
+ table_close(resultRelInfo->ri_RelationDesc, RowExclusiveLock);
/* increment loop index */
lidx++;
diff --git a/src/backend/executor/cypher_utils.c b/src/backend/executor/cypher_utils.c
index 3558fe696..7b92fbc61 100644
--- a/src/backend/executor/cypher_utils.c
+++ b/src/backend/executor/cypher_utils.c
@@ -27,7 +27,9 @@
#include "access/htup_details.h"
#include "access/sysattr.h"
#include "access/xact.h"
+#include "access/heapam.h"
#include "access/multixact.h"
+#include "access/xact.h"
#include "nodes/extensible.h"
#include "nodes/makefuncs.h"
#include "nodes/nodes.h"
@@ -44,6 +46,7 @@
#include "executor/cypher_utils.h"
#include "utils/agtype.h"
#include "utils/ag_cache.h"
+#include "utils/agtype.h"
#include "utils/graphid.h"
/*
@@ -92,7 +95,7 @@ void destroy_entity_result_rel_info(ResultRelInfo *result_rel_info)
ExecCloseIndices(result_rel_info);
// close the rel
- heap_close(result_rel_info->ri_RelationDesc, RowExclusiveLock);
+ table_close(result_rel_info->ri_RelationDesc, RowExclusiveLock);
}
TupleTableSlot *populate_vertex_tts(
@@ -171,7 +174,7 @@ bool entity_exists(EState *estate, Oid graph_oid, graphid id)
{
label_cache_data *label;
ScanKeyData scan_keys[1];
- HeapScanDesc scan_desc;
+ TableScanDesc scan_desc;
HeapTuple tuple;
Relation rel;
bool result = true;
@@ -180,14 +183,14 @@ bool entity_exists(EState *estate, Oid graph_oid, graphid id)
* Extract the label id from the graph id and get the table name
* the entity is part of.
*/
- label = search_label_graph_id_cache(graph_oid, GET_LABEL_ID(id));
+ label = search_label_graph_oid_cache(graph_oid, GET_LABEL_ID(id));
// Setup the scan key to be the graphid
ScanKeyInit(&scan_keys[0], 1, BTEqualStrategyNumber,
F_GRAPHIDEQ, GRAPHID_GET_DATUM(id));
- rel = heap_open(label->relation, RowExclusiveLock);
- scan_desc = heap_beginscan(rel, estate->es_snapshot, 1, scan_keys);
+ rel = table_open(label->relation, RowExclusiveLock);
+ scan_desc = table_beginscan(rel, estate->es_snapshot, 1, scan_keys);
tuple = heap_getnext(scan_desc, ForwardScanDirection);
@@ -200,8 +203,8 @@ bool entity_exists(EState *estate, Oid graph_oid, graphid id)
result = false;
}
- heap_endscan(scan_desc);
- heap_close(rel, RowExclusiveLock);
+ table_endscan(scan_desc);
+ table_close(rel, RowExclusiveLock);
return result;
}
@@ -235,7 +238,7 @@ HeapTuple insert_entity_tuple_cid(ResultRelInfo *resultRelInfo,
HeapTuple tuple = NULL;
ExecStoreVirtualTuple(elemTupleSlot);
- tuple = ExecMaterializeSlot(elemTupleSlot);
+ tuple = ExecFetchSlotHeapTuple(elemTupleSlot, true, NULL);
/* Check the constraints of the tuple */
tuple->t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
@@ -244,14 +247,14 @@ HeapTuple insert_entity_tuple_cid(ResultRelInfo *resultRelInfo,
ExecConstraints(resultRelInfo, elemTupleSlot, estate);
}
- /* Insert the tuple using the passed in cid */
- heap_insert(resultRelInfo->ri_RelationDesc, tuple, cid, 0, NULL);
+ // Insert the tuple normally
+ table_tuple_insert(resultRelInfo->ri_RelationDesc, elemTupleSlot,
+ GetCurrentCommandId(true), 0, NULL);
- /* Insert index entries for the tuple */
+ // Insert index entries for the tuple
if (resultRelInfo->ri_NumIndices > 0)
{
- ExecInsertIndexTuples(elemTupleSlot, &(tuple->t_self), estate, false,
- NULL, NIL);
+ ExecInsertIndexTuples(elemTupleSlot, estate, false, NULL, NIL);
}
return tuple;
diff --git a/src/backend/nodes/cypher_outfuncs.c b/src/backend/nodes/cypher_outfuncs.c
index d7fc791b0..4053a399c 100644
--- a/src/backend/nodes/cypher_outfuncs.c
+++ b/src/backend/nodes/cypher_outfuncs.c
@@ -316,7 +316,7 @@ void out_cypher_create_target_nodes(StringInfo str, const ExtensibleNode *node)
WRITE_NODE_FIELD(paths);
WRITE_INT32_FIELD(flags);
- WRITE_OID_FIELD(graph_oid);
+ WRITE_INT32_FIELD(graph_oid);
}
// serialization function for the cypher_create_path ExtensibleNode.
@@ -384,7 +384,7 @@ void out_cypher_delete_information(StringInfo str, const ExtensibleNode *node)
WRITE_NODE_FIELD(delete_items);
WRITE_INT32_FIELD(flags);
WRITE_STRING_FIELD(graph_name);
- WRITE_OID_FIELD(graph_oid);
+ WRITE_INT32_FIELD(graph_oid);
WRITE_BOOL_FIELD(detach);
}
@@ -403,7 +403,7 @@ void out_cypher_merge_information(StringInfo str, const ExtensibleNode *node)
DEFINE_AG_NODE(cypher_merge_information);
WRITE_INT32_FIELD(flags);
- WRITE_OID_FIELD(graph_oid);
+ WRITE_INT32_FIELD(graph_oid);
WRITE_INT32_FIELD(merge_function_attr);
WRITE_NODE_FIELD(path);
}
diff --git a/src/backend/nodes/cypher_readfuncs.c b/src/backend/nodes/cypher_readfuncs.c
index 7ae64b899..89cedd577 100644
--- a/src/backend/nodes/cypher_readfuncs.c
+++ b/src/backend/nodes/cypher_readfuncs.c
@@ -29,11 +29,19 @@
*
* Macros for declaring appropriate local variables.
*/
-// Declare the extensible node and local fields for the pg_strtok
+/* A few guys need only local_node */
+#define READ_LOCALS_NO_FIELDS(nodeTypeName) \
+ nodeTypeName *local_node = (nodeTypeName *) node
+
+/* And a few guys need only the pg_strtok support fields */
+#define READ_TEMP_LOCALS() \
+ const char *token; \
+ int length
+
+/* ... but most need both */
#define READ_LOCALS(nodeTypeName) \
- nodeTypeName *local_node = (nodeTypeName *)node; \
- char *token; \
- int length;
+ READ_LOCALS_NO_FIELDS(nodeTypeName); \
+ READ_TEMP_LOCALS()
/*
* The READ_*_FIELD defines first skips the :fldname token (key) part of the string
@@ -49,7 +57,7 @@
#define READ_INT_FIELD(fldname) \
token = pg_strtok(&length); \
token = pg_strtok(&length); \
- local_node->fldname = atoi(token)
+ local_node->fldname = strtol(token, 0, 10)
// Read an unsigned integer field (anything written as ":fldname %u")
#define READ_UINT_FIELD(fldname) \
@@ -85,7 +93,7 @@
#define READ_ENUM_FIELD(fldname, enumtype) \
token = pg_strtok(&length); \
token = pg_strtok(&length); \
- local_node->fldname = (enumtype) atoi(token)
+ local_node->fldname = (enumtype) strtol(token, 0, 10)
// Read a float field
#define READ_FLOAT_FIELD(fldname) \
@@ -178,8 +186,8 @@ void read_cypher_create_target_nodes(struct ExtensibleNode *node)
READ_LOCALS(cypher_create_target_nodes);
READ_NODE_FIELD(paths);
- READ_INT_FIELD(flags);
- READ_OID_FIELD(graph_oid);
+ READ_UINT_FIELD(flags);
+ READ_UINT_FIELD(graph_oid);
}
/*
@@ -204,7 +212,7 @@ void read_cypher_target_node(struct ExtensibleNode *node)
READ_LOCALS(cypher_target_node);
READ_CHAR_FIELD(type);
- READ_INT_FIELD(flags);
+ READ_UINT_FIELD(flags);
READ_ENUM_FIELD(dir, cypher_rel_dir);
READ_NODE_FIELD(id_expr);
READ_NODE_FIELD(id_expr_state);
@@ -228,7 +236,7 @@ void read_cypher_update_information(struct ExtensibleNode *node)
READ_LOCALS(cypher_update_information);
READ_NODE_FIELD(set_items);
- READ_INT_FIELD(flags);
+ READ_UINT_FIELD(flags);
READ_INT_FIELD(tuple_position);
READ_STRING_FIELD(graph_name);
READ_STRING_FIELD(clause_name);
@@ -260,9 +268,9 @@ void read_cypher_delete_information(struct ExtensibleNode *node)
READ_LOCALS(cypher_delete_information);
READ_NODE_FIELD(delete_items);
- READ_INT_FIELD(flags);
+ READ_UINT_FIELD(flags);
READ_STRING_FIELD(graph_name);
- READ_OID_FIELD(graph_oid);
+ READ_UINT_FIELD(graph_oid);
READ_BOOL_FIELD(detach);
}
@@ -286,8 +294,8 @@ void read_cypher_merge_information(struct ExtensibleNode *node)
{
READ_LOCALS(cypher_merge_information);
- READ_INT_FIELD(flags);
- READ_OID_FIELD(graph_oid);
+ READ_UINT_FIELD(flags);
+ READ_UINT_FIELD(graph_oid);
READ_INT_FIELD(merge_function_attr);
READ_NODE_FIELD(path);
}
diff --git a/src/backend/optimizer/cypher_createplan.c b/src/backend/optimizer/cypher_createplan.c
index 9e0863423..c6480d154 100644
--- a/src/backend/optimizer/cypher_createplan.c
+++ b/src/backend/optimizer/cypher_createplan.c
@@ -19,13 +19,10 @@
#include "postgres.h"
-#include "access/sysattr.h"
-#include "catalog/pg_type_d.h"
#include "nodes/extensible.h"
#include "nodes/nodes.h"
#include "nodes/pg_list.h"
#include "nodes/plannodes.h"
-#include "nodes/relation.h"
#include "executor/cypher_executor.h"
#include "optimizer/cypher_createplan.h"
diff --git a/src/backend/optimizer/cypher_pathnode.c b/src/backend/optimizer/cypher_pathnode.c
index 4e04b752c..cdd0b0635 100644
--- a/src/backend/optimizer/cypher_pathnode.c
+++ b/src/backend/optimizer/cypher_pathnode.c
@@ -22,7 +22,6 @@
#include "nodes/extensible.h"
#include "nodes/nodes.h"
#include "nodes/pg_list.h"
-#include "nodes/relation.h"
#include "optimizer/cypher_createplan.h"
#include "optimizer/cypher_pathnode.h"
diff --git a/src/backend/optimizer/cypher_paths.c b/src/backend/optimizer/cypher_paths.c
index 0cb537ae3..151abfa43 100644
--- a/src/backend/optimizer/cypher_paths.c
+++ b/src/backend/optimizer/cypher_paths.c
@@ -19,11 +19,8 @@
#include "postgres.h"
-#include "access/sysattr.h"
-#include "catalog/pg_type_d.h"
#include "nodes/parsenodes.h"
#include "nodes/primnodes.h"
-#include "nodes/relation.h"
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
diff --git a/src/backend/parser/ag_scanner.l b/src/backend/parser/ag_scanner.l
index 3bab06157..68b15a22c 100644
--- a/src/backend/parser/ag_scanner.l
+++ b/src/backend/parser/ag_scanner.l
@@ -316,6 +316,7 @@ static int _scan_errposition(const int location, const ag_yy_extra *extra);
* and is the same with "ag_scanner_t".
*/
#define YY_DECL ag_token ag_scanner_next_token(yyscan_t yyscanner)
+#define NDIGITS_PER_REMAINDER 9
%}
%%
@@ -902,7 +903,6 @@ static void _numstr_to_decimal(const char *numstr, const int base, strbuf *sb)
*/
const uint64 divisor = 1000000000;
const int ndivisions = 3;
- const int ndigits_per_remainder = 9;
int ndigits;
int nwords;
@@ -1048,11 +1048,11 @@ static void _numstr_to_decimal(const char *numstr, const int base, strbuf *sb)
// convert the collected remainders to a string, starting from the last one
for (i = nremainders - 1; i >= 0; i--)
{
- char buf[ndigits_per_remainder];
+ char buf[NDIGITS_PER_REMAINDER];
int buf_i;
uint32 tmp;
- buf_i = ndigits_per_remainder;
+ buf_i = NDIGITS_PER_REMAINDER;
for (tmp = remainders[i]; tmp > 0; tmp /= 10)
buf[--buf_i] = '0' + (char)(tmp % 10);
@@ -1064,7 +1064,7 @@ static void _numstr_to_decimal(const char *numstr, const int base, strbuf *sb)
buf[--buf_i] = '0';
}
- strbuf_append_buf(sb, &buf[buf_i], ndigits_per_remainder - buf_i);
+ strbuf_append_buf(sb, &buf[buf_i], NDIGITS_PER_REMAINDER - buf_i);
}
pfree(remainders);
diff --git a/src/backend/parser/cypher_analyze.c b/src/backend/parser/cypher_analyze.c
index 425b1167b..874585103 100644
--- a/src/backend/parser/cypher_analyze.c
+++ b/src/backend/parser/cypher_analyze.c
@@ -32,7 +32,6 @@
#include "parser/parse_node.h"
#include "parser/parse_relation.h"
#include "parser/parse_target.h"
-#include "parser/parsetree.h"
#include "utils/builtins.h"
#include "catalog/ag_graph.h"
@@ -212,7 +211,7 @@ static bool convert_cypher_walker(Node *node, ParseState *pstate)
* QTW_IGNORE_JOINALIASES
* We are not interested in this.
*/
- flags = QTW_EXAMINE_RTES | QTW_IGNORE_RT_SUBQUERIES |
+ flags = QTW_EXAMINE_RTES_BEFORE | QTW_IGNORE_RT_SUBQUERIES |
QTW_IGNORE_JOINALIASES;
/* recurse on query */
diff --git a/src/backend/parser/cypher_clause.c b/src/backend/parser/cypher_clause.c
index 26de52a66..f14c49986 100644
--- a/src/backend/parser/cypher_clause.c
+++ b/src/backend/parser/cypher_clause.c
@@ -25,6 +25,7 @@
#include "postgres.h"
#include "access/sysattr.h"
+#include "access/heapam.h"
#include "catalog/pg_type_d.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
@@ -33,7 +34,7 @@
#include "nodes/parsenodes.h"
#include "nodes/pg_list.h"
#include "nodes/primnodes.h"
-#include "optimizer/var.h"
+#include "optimizer/optimizer.h"
#include "parser/parse_clause.h"
#include "parser/parse_coerce.h"
#include "parser/parse_collate.h"
@@ -85,6 +86,14 @@
#define AGE_VARNAME_ID AGE_DEFAULT_VARNAME_PREFIX"id"
#define AGE_VARNAME_SET_CLAUSE AGE_DEFAULT_VARNAME_PREFIX"set_clause"
+/*
+ * In the transformation stage, we need to track
+ * where a variable came from. When moving between
+ * clauses, Postgres parsestate and Query data structures
+ * are insufficient for some of the information we
+ * need.
+ */
+
/*
* Rules to determine if a node must be included:
*
@@ -270,9 +279,9 @@ static cypher_target_node *get_referenced_variable(ParseState *pstate,
//call...[yield]
static Query *transform_cypher_call_stmt(cypher_parsestate *cpstate,
- cypher_clause *clause);
+ cypher_clause *clause);
static Query *transform_cypher_call_subquery(cypher_parsestate *cpstate,
- cypher_clause *clause);
+ cypher_clause *clause);
// transform
#define PREV_CYPHER_CLAUSE_ALIAS AGE_DEFAULT_ALIAS_PREFIX"previous_cypher_clause"
@@ -1704,8 +1713,7 @@ cypher_update_information *transform_cypher_set_item_list(
ereport(
ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg(
- "SET clause doesn't not support updating maps or lists in a property"),
+ errmsg("SET clause doesn't not support updating maps or lists in a property"),
parser_errposition(pstate, set_item->location)));
}
@@ -5219,7 +5227,7 @@ transform_create_cypher_edge(cypher_parsestate *cpstate, List **target_list,
rel->relid = RelationGetRelid(label_relation);
rte = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation,
- NULL, false, false);
+ AccessShareLock, NULL, false, false);
rte->requiredPerms = ACL_INSERT;
// Build Id expression, always use the default logic
@@ -5488,7 +5496,7 @@ transform_create_cypher_new_node(cypher_parsestate *cpstate,
rel->relid = RelationGetRelid(label_relation);
rte = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation,
- NULL, false, false);
+ AccessShareLock, NULL, false, false);
rte->requiredPerms = ACL_INSERT;
// id
@@ -6485,7 +6493,7 @@ transform_merge_cypher_edge(cypher_parsestate *cpstate, List **target_list,
rel->relid = RelationGetRelid(label_relation);
rte = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation,
- NULL, false, false);
+ AccessShareLock, NULL, false, false);
rte->requiredPerms = ACL_INSERT;
// Build Id expression, always use the default logic
@@ -6603,7 +6611,7 @@ transform_merge_cypher_node(cypher_parsestate *cpstate, List **target_list,
rel->relid = RelationGetRelid(label_relation);
rte = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation,
- NULL, false, false);
+ AccessShareLock, NULL, false, false);
rte->requiredPerms = ACL_INSERT;
// id
diff --git a/src/backend/parser/cypher_expr.c b/src/backend/parser/cypher_expr.c
index 16346a698..62f7efde2 100644
--- a/src/backend/parser/cypher_expr.c
+++ b/src/backend/parser/cypher_expr.c
@@ -41,6 +41,7 @@
#include "parser/parse_oper.h"
#include "parser/parse_relation.h"
#include "utils/builtins.h"
+#include "utils/float.h"
#include "utils/int8.h"
#include "utils/lsyscache.h"
#include "utils/syscache.h"
diff --git a/src/backend/parser/cypher_keywords.c b/src/backend/parser/cypher_keywords.c
index 5ed120288..cd4082260 100644
--- a/src/backend/parser/cypher_keywords.c
+++ b/src/backend/parser/cypher_keywords.c
@@ -33,63 +33,27 @@
#include "funcapi.h"
#include "parser/cypher_gram.h"
+#include "parser/cypher_kwlist_d.h"
/*
* This list must be sorted by ASCII name, because binary search is used to
* locate entries.
*/
-const ScanKeyword cypher_keywords[] = {
- {"all", ALL, RESERVED_KEYWORD},
- {"analyze", ANALYZE, RESERVED_KEYWORD},
- {"and", AND, RESERVED_KEYWORD},
- {"as", AS, RESERVED_KEYWORD},
- {"asc", ASC, RESERVED_KEYWORD},
- {"ascending", ASCENDING, RESERVED_KEYWORD},
- {"by", BY, RESERVED_KEYWORD},
- {"call", CALL, RESERVED_KEYWORD},
- {"case", CASE, RESERVED_KEYWORD},
- {"coalesce", COALESCE, RESERVED_KEYWORD},
- {"contains", CONTAINS, RESERVED_KEYWORD},
- {"create", CREATE, RESERVED_KEYWORD},
- {"delete", DELETE, RESERVED_KEYWORD},
- {"desc", DESC, RESERVED_KEYWORD},
- {"descending", DESCENDING, RESERVED_KEYWORD},
- {"detach", DETACH, RESERVED_KEYWORD},
- {"distinct", DISTINCT, RESERVED_KEYWORD},
- {"else", ELSE, RESERVED_KEYWORD},
- {"end", END_P, RESERVED_KEYWORD},
- {"ends", ENDS, RESERVED_KEYWORD},
- {"exists", EXISTS, RESERVED_KEYWORD},
- {"explain", EXPLAIN, RESERVED_KEYWORD},
- {"false", FALSE_P, RESERVED_KEYWORD},
- {"in", IN, RESERVED_KEYWORD},
- {"is", IS, RESERVED_KEYWORD},
- {"limit", LIMIT, RESERVED_KEYWORD},
- {"match", MATCH, RESERVED_KEYWORD},
- {"merge", MERGE, RESERVED_KEYWORD},
- {"not", NOT, RESERVED_KEYWORD},
- {"null", NULL_P, RESERVED_KEYWORD},
- {"optional", OPTIONAL, RESERVED_KEYWORD},
- {"or", OR, RESERVED_KEYWORD},
- {"order", ORDER, RESERVED_KEYWORD},
- {"remove", REMOVE, RESERVED_KEYWORD},
- {"return", RETURN, RESERVED_KEYWORD},
- {"set", SET, RESERVED_KEYWORD},
- {"skip", SKIP, RESERVED_KEYWORD},
- {"starts", STARTS, RESERVED_KEYWORD},
- {"then", THEN, RESERVED_KEYWORD},
- {"true", TRUE_P, RESERVED_KEYWORD},
- {"union", UNION, RESERVED_KEYWORD},
- {"unwind", UNWIND, RESERVED_KEYWORD},
- {"verbose", VERBOSE, RESERVED_KEYWORD},
- {"when", WHEN, RESERVED_KEYWORD},
- {"where", WHERE, RESERVED_KEYWORD},
- {"with", WITH, RESERVED_KEYWORD},
- {"xor", XOR, RESERVED_KEYWORD},
- {"yield", YIELD, RESERVED_KEYWORD}
+#define PG_KEYWORD(kwname, value, category) value,
+
+const uint16 CypherKeywordTokens[] = {
+#include "parser/cypher_kwlist.h"
+};
+
+#undef PG_KEYWORD
+
+#define PG_KEYWORD(kwname, value, category) category,
+
+const uint16 CypherKeywordCategories[] = {
+#include "parser/cypher_kwlist.h"
};
-const int num_cypher_keywords = lengthof(cypher_keywords);
+#undef PG_KEYWORD
PG_FUNCTION_INFO_V1(get_cypher_keywords);
@@ -106,7 +70,7 @@ Datum get_cypher_keywords(PG_FUNCTION_ARGS)
func_ctx = SRF_FIRSTCALL_INIT();
old_mem_ctx = MemoryContextSwitchTo(func_ctx->multi_call_memory_ctx);
- tup_desc = CreateTemplateTupleDesc(3, false);
+ tup_desc = CreateTemplateTupleDesc(3);
TupleDescInitEntry(tup_desc, (AttrNumber)1, "word", TEXTOID, -1, 0);
TupleDescInitEntry(tup_desc, (AttrNumber)2, "catcode", CHAROID, -1, 0);
TupleDescInitEntry(tup_desc, (AttrNumber)3, "catdesc", TEXTOID, -1, 0);
@@ -118,15 +82,16 @@ Datum get_cypher_keywords(PG_FUNCTION_ARGS)
func_ctx = SRF_PERCALL_SETUP();
- if (func_ctx->call_cntr < num_cypher_keywords)
+ if (func_ctx->call_cntr < CypherKeyword.num_keywords)
{
char *values[3];
HeapTuple tuple;
// cast-away-const is ugly but alternatives aren't much better
- values[0] = (char *)cypher_keywords[func_ctx->call_cntr].name;
+ values[0] = (char *) GetScanKeyword((int) func_ctx->call_cntr,
+ &CypherKeyword);
- switch (cypher_keywords[func_ctx->call_cntr].category)
+ switch (CypherKeywordCategories[func_ctx->call_cntr])
{
case UNRESERVED_KEYWORD:
values[1] = "U";
diff --git a/src/backend/parser/cypher_parse_agg.c b/src/backend/parser/cypher_parse_agg.c
index b5654e778..8fdb71d3e 100644
--- a/src/backend/parser/cypher_parse_agg.c
+++ b/src/backend/parser/cypher_parse_agg.c
@@ -27,7 +27,7 @@
#include "catalog/pg_constraint.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/tlist.h"
-#include "optimizer/var.h"
+#include "optimizer/optimizer.h"
#include "parser/cypher_parse_agg.h"
#include "parser/parsetree.h"
#include "rewrite/rewriteManip.h"
@@ -192,7 +192,7 @@ void parse_check_aggregates(ParseState *pstate, Query *qry)
root->planner_cxt = CurrentMemoryContext;
root->hasJoinRTEs = true;
- groupClauses = (List *) flatten_join_alias_vars(root,
+ groupClauses = (List *) flatten_join_alias_vars((Query*)root,
(Node *) groupClauses);
}
@@ -236,7 +236,7 @@ void parse_check_aggregates(ParseState *pstate, Query *qry)
finalize_grouping_exprs(clause, pstate, qry, groupClauses, root,
have_non_var_grouping);
if (hasJoinRTEs)
- clause = flatten_join_alias_vars(root, clause);
+ clause = flatten_join_alias_vars((Query*)root, clause);
check_ungrouped_columns(clause, pstate, qry, groupClauses,
groupClauseCommonVars, have_non_var_grouping,
&func_grouped_rels);
@@ -245,7 +245,7 @@ void parse_check_aggregates(ParseState *pstate, Query *qry)
finalize_grouping_exprs(clause, pstate, qry, groupClauses, root,
have_non_var_grouping);
if (hasJoinRTEs)
- clause = flatten_join_alias_vars(root, clause);
+ clause = flatten_join_alias_vars((Query*)root, clause);
check_ungrouped_columns(clause, pstate, qry, groupClauses,
groupClauseCommonVars, have_non_var_grouping,
&func_grouped_rels);
@@ -562,7 +562,7 @@ static bool finalize_grouping_exprs_walker(Node *node,
Index ref = 0;
if (context->root)
- expr = flatten_join_alias_vars(context->root, expr);
+ expr = flatten_join_alias_vars((Query*)context->root, expr);
/*
* Each expression must match a grouping entry at the current
diff --git a/src/backend/parser/cypher_parser.c b/src/backend/parser/cypher_parser.c
index ae06c9c11..c6a95d398 100644
--- a/src/backend/parser/cypher_parser.c
+++ b/src/backend/parser/cypher_parser.c
@@ -19,7 +19,6 @@
#include "postgres.h"
-#include "common/keywords.h"
#include "nodes/pg_list.h"
#include "parser/scansup.h"
@@ -67,20 +66,19 @@ int cypher_yylex(YYSTYPE *lvalp, YYLTYPE *llocp, ag_scanner_t scanner)
break;
case AG_TOKEN_IDENTIFIER:
{
- const ScanKeyword *keyword;
+ int kwnum;
char *ident;
- keyword = ScanKeywordLookup(token.value.s, cypher_keywords,
- num_cypher_keywords);
- if (keyword)
+ kwnum = ScanKeywordLookup(token.value.s, &CypherKeyword);
+ if (kwnum >= 0)
{
/*
* use token.value.s instead of keyword->name to preserve
* case sensitivity
*/
- lvalp->keyword = token.value.s;
+ lvalp->keyword = GetScanKeyword(kwnum, &CypherKeyword);
*llocp = token.location;
- return keyword->value;
+ return CypherKeywordTokens[kwnum];
}
ident = pstrdup(token.value.s);
diff --git a/src/backend/utils/adt/ag_float8_supp.c b/src/backend/utils/adt/ag_float8_supp.c
index 286f074ef..450fdc07a 100644
--- a/src/backend/utils/adt/ag_float8_supp.c
+++ b/src/backend/utils/adt/ag_float8_supp.c
@@ -27,6 +27,7 @@
#include
+#include "utils/float.h"
#include "utils/builtins.h"
#include "utils/ag_float8_supp.h"
diff --git a/src/backend/utils/adt/age_global_graph.c b/src/backend/utils/adt/age_global_graph.c
index 6868bb434..b2b34455a 100644
--- a/src/backend/utils/adt/age_global_graph.c
+++ b/src/backend/utils/adt/age_global_graph.c
@@ -19,14 +19,23 @@
#include "postgres.h"
+#include "access/heapam.h"
+#include "access/relscan.h"
+#include "access/skey.h"
+#include "access/table.h"
+#include "access/tableam.h"
#include "catalog/namespace.h"
+#include "commands/label_commands.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/rel.h"
#include "utils/snapmgr.h"
#include "commands/label_commands.h"
+#include "catalog/ag_graph.h"
+#include "catalog/ag_label.h"
#include "utils/age_global_graph.h"
+#include "utils/age_graphid_ds.h"
#include "utils/agtype.h"
#include "catalog/ag_graph.h"
#include "catalog/ag_label.h"
@@ -189,7 +198,7 @@ static List *get_ag_labels_names(Snapshot snapshot, Oid graph_oid,
List *labels = NIL;
ScanKeyData scan_keys[2];
Relation ag_label;
- HeapScanDesc scan_desc;
+ TableScanDesc scan_desc;
HeapTuple tuple;
TupleDesc tupdesc;
@@ -203,8 +212,8 @@ static List *get_ag_labels_names(Snapshot snapshot, Oid graph_oid,
F_CHAREQ, CharGetDatum(label_type));
/* setup the table to be scanned, ag_label in this case */
- ag_label = heap_open(ag_label_relation_id(), ShareLock);
- scan_desc = heap_beginscan(ag_label, snapshot, 2, scan_keys);
+ ag_label = table_open(ag_label_relation_id(), ShareLock);
+ scan_desc = table_beginscan(ag_label, snapshot, 2, scan_keys);
/* get the tupdesc - we don't need to release this one */
tupdesc = RelationGetDescr(ag_label);
@@ -228,8 +237,8 @@ static List *get_ag_labels_names(Snapshot snapshot, Oid graph_oid,
}
/* close up scan */
- heap_endscan(scan_desc);
- heap_close(ag_label, ShareLock);
+ table_endscan(scan_desc);
+ table_close(ag_label, ShareLock);
return labels;
}
@@ -399,7 +408,7 @@ static void load_vertex_hashtable(GRAPH_global_context *ggctx)
foreach (lc, vertex_label_names)
{
Relation graph_vertex_label;
- HeapScanDesc scan_desc;
+ TableScanDesc scan_desc;
HeapTuple tuple;
char *vertex_label_name;
Oid vertex_label_table_oid;
@@ -411,8 +420,8 @@ static void load_vertex_hashtable(GRAPH_global_context *ggctx)
vertex_label_table_oid = get_relname_relid(vertex_label_name,
graph_namespace_oid);
/* open the relation (table) and begin the scan */
- graph_vertex_label = heap_open(vertex_label_table_oid, ShareLock);
- scan_desc = heap_beginscan(graph_vertex_label, snapshot, 0, NULL);
+ graph_vertex_label = table_open(vertex_label_table_oid, ShareLock);
+ scan_desc = table_beginscan(graph_vertex_label, snapshot, 0, NULL);
/* get the tupdesc - we don't need to release this one */
tupdesc = RelationGetDescr(graph_vertex_label);
/* bail if the number of columns differs */
@@ -452,8 +461,8 @@ static void load_vertex_hashtable(GRAPH_global_context *ggctx)
}
/* end the scan and close the relation */
- heap_endscan(scan_desc);
- heap_close(graph_vertex_label, ShareLock);
+ table_endscan(scan_desc);
+ table_close(graph_vertex_label, ShareLock);
}
}
@@ -498,7 +507,7 @@ static void load_edge_hashtable(GRAPH_global_context *ggctx)
foreach (lc, edge_label_names)
{
Relation graph_edge_label;
- HeapScanDesc scan_desc;
+ TableScanDesc scan_desc;
HeapTuple tuple;
char *edge_label_name;
Oid edge_label_table_oid;
@@ -510,8 +519,8 @@ static void load_edge_hashtable(GRAPH_global_context *ggctx)
edge_label_table_oid = get_relname_relid(edge_label_name,
graph_namespace_oid);
/* open the relation (table) and begin the scan */
- graph_edge_label = heap_open(edge_label_table_oid, ShareLock);
- scan_desc = heap_beginscan(graph_edge_label, snapshot, 0, NULL);
+ graph_edge_label = table_open(edge_label_table_oid, ShareLock);
+ scan_desc = table_beginscan(graph_edge_label, snapshot, 0, NULL);
/* get the tupdesc - we don't need to release this one */
tupdesc = RelationGetDescr(graph_edge_label);
/* bail if the number of columns differs */
@@ -573,8 +582,8 @@ static void load_edge_hashtable(GRAPH_global_context *ggctx)
}
/* end the scan and close the relation */
- heap_endscan(scan_desc);
- heap_close(graph_edge_label, ShareLock);
+ table_endscan(scan_desc);
+ table_close(graph_edge_label, ShareLock);
}
}
diff --git a/src/backend/utils/adt/age_vle.c b/src/backend/utils/adt/age_vle.c
index ca4876db0..bb154e3ae 100644
--- a/src/backend/utils/adt/age_vle.c
+++ b/src/backend/utils/adt/age_vle.c
@@ -19,6 +19,8 @@
#include "postgres.h"
+#include "access/heapam.h"
+#include "catalog/namespace.h"
#include "catalog/pg_type.h"
#include "funcapi.h"
#include "utils/lsyscache.h"
diff --git a/src/backend/utils/adt/agtype.c b/src/backend/utils/adt/agtype.c
index c8a88d9ee..43c1b205b 100644
--- a/src/backend/utils/adt/agtype.c
+++ b/src/backend/utils/adt/agtype.c
@@ -32,8 +32,15 @@
#include
+#include "access/genam.h"
+#include "access/heapam.h"
+#include "access/skey.h"
+#include "access/table.h"
+#include "access/tableam.h"
#include "access/htup_details.h"
#include "catalog/namespace.h"
+#include "catalog/pg_collation.h"
+#include "catalog/pg_operator.h"
#include "catalog/pg_type.h"
#include "catalog/pg_aggregate_d.h"
#include "catalog/pg_collation_d.h"
@@ -45,6 +52,7 @@
#include "parser/parse_coerce.h"
#include "nodes/pg_list.h"
#include "utils/builtins.h"
+#include "utils/float.h"
#include "utils/fmgroids.h"
#include "utils/int8.h"
#include "utils/lsyscache.h"
@@ -151,25 +159,25 @@ static bool is_array_path(agtype_value *agtv);
/* graph entity retrieval */
static Datum get_vertex(const char *graph, const char *vertex_label,
int64 graphid);
-static char *get_label_name(const char *graph_name, int64 graph_id);
+static char *get_label_name(const char *graph_name, graphid element_graphid);
static float8 get_float_compatible_arg(Datum arg, Oid type, char *funcname,
bool *is_null);
static Numeric get_numeric_compatible_arg(Datum arg, Oid type, char *funcname,
- bool *is_null,
- enum agtype_value_type *ag_type);
+ bool *is_null,
+ enum agtype_value_type *ag_type);
agtype *get_one_agtype_from_variadic_args(FunctionCallInfo fcinfo,
- int variadic_offset,
- int expected_nargs);
+ int variadic_offset,
+ int expected_nargs);
static int64 get_int64_from_int_datums(Datum d, Oid type, char *funcname,
bool *is_agnull);
static agtype_iterator *get_next_object_key(agtype_iterator *it,
- agtype_container *agtc,
- agtype_value *key);
+ agtype_container *agtc,
+ agtype_value *key);
static agtype_iterator *get_next_list_element(agtype_iterator *it,
- agtype_container *agtc,
- agtype_value *elem);
+ agtype_container *agtc,
+ agtype_value *elem);
static int extract_variadic_args_min(FunctionCallInfo fcinfo,
int variadic_start, bool convert_unknown,
Datum **args, Oid **types, bool **nulls,
@@ -186,7 +194,8 @@ Oid get_AGTYPEOID(void)
{
if (g_AGTYPEOID == InvalidOid)
{
- g_AGTYPEOID = GetSysCacheOid2(TYPENAMENSP, CStringGetDatum("agtype"),
+ g_AGTYPEOID = GetSysCacheOid2(TYPENAMENSP, Anum_pg_type_oid,
+ CStringGetDatum("agtype"),
ObjectIdGetDatum(ag_catalog_namespace_id()));
}
@@ -198,7 +207,7 @@ Oid get_AGTYPEARRAYOID(void)
{
if (g_AGTYPEARRAYOID == InvalidOid)
{
- g_AGTYPEARRAYOID = GetSysCacheOid2(TYPENAMENSP,
+ g_AGTYPEARRAYOID = GetSysCacheOid2(TYPENAMENSP,Anum_pg_type_oid,
CStringGetDatum("_agtype"),
ObjectIdGetDatum(ag_catalog_namespace_id()));
}
@@ -2178,7 +2187,7 @@ Datum _agtype_build_vertex(PG_FUNCTION_ARGS)
result.res = push_agtype_value(&result.parse_state, WAGT_KEY,
string_to_agtype_value("id"));
- if (fcinfo->argnull[0])
+ if (fcinfo->args[0].isnull)
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -2193,7 +2202,7 @@ Datum _agtype_build_vertex(PG_FUNCTION_ARGS)
result.res = push_agtype_value(&result.parse_state, WAGT_KEY,
string_to_agtype_value("label"));
- if (fcinfo->argnull[1])
+ if (fcinfo->args[1].isnull)
{
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("_agtype_build_vertex() label cannot be NULL")));
@@ -2208,7 +2217,7 @@ Datum _agtype_build_vertex(PG_FUNCTION_ARGS)
string_to_agtype_value("properties"));
//if the properties object is null, push an empty object
- if (fcinfo->argnull[2])
+ if (fcinfo->args[2].isnull)
{
result.res = push_agtype_value(&result.parse_state, WAGT_BEGIN_OBJECT,
NULL);
@@ -2260,7 +2269,7 @@ Datum _agtype_build_edge(PG_FUNCTION_ARGS)
result.res = push_agtype_value(&result.parse_state, WAGT_KEY,
string_to_agtype_value("id"));
- if (fcinfo->argnull[0])
+ if (fcinfo->args[0].isnull)
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -2275,7 +2284,7 @@ Datum _agtype_build_edge(PG_FUNCTION_ARGS)
result.res = push_agtype_value(&result.parse_state, WAGT_KEY,
string_to_agtype_value("label"));
- if (fcinfo->argnull[3])
+ if (fcinfo->args[3].isnull)
{
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("_agtype_build_vertex() label cannot be NULL")));
@@ -2289,7 +2298,7 @@ Datum _agtype_build_edge(PG_FUNCTION_ARGS)
result.res = push_agtype_value(&result.parse_state, WAGT_KEY,
string_to_agtype_value("end_id"));
- if (fcinfo->argnull[2])
+ if (fcinfo->args[2].isnull)
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -2304,7 +2313,7 @@ Datum _agtype_build_edge(PG_FUNCTION_ARGS)
result.res = push_agtype_value(&result.parse_state, WAGT_KEY,
string_to_agtype_value("start_id"));
- if (fcinfo->argnull[1])
+ if (fcinfo->args[1].isnull)
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -2320,7 +2329,7 @@ Datum _agtype_build_edge(PG_FUNCTION_ARGS)
string_to_agtype_value("properties"));
/* if the properties object is null, push an empty object */
- if (fcinfo->argnull[4])
+ if (fcinfo->args[4].isnull)
{
result.res = push_agtype_value(&result.parse_state, WAGT_BEGIN_OBJECT,
NULL);
@@ -2633,7 +2642,7 @@ Datum agtype_to_int8(PG_FUNCTION_ARGS)
result = DatumGetInt64(DirectFunctionCall1(int8in,
CStringGetDatum(agtv.val.string.val)));
else if(agtv.type == AGTV_BOOL)
- result = DatumGetInt64(DirectFunctionCall1(bool_int4,
+ result = DatumGetInt64(DirectFunctionCall1(bool_int4,
BoolGetDatum(agtv.val.boolean)));
else
elog(ERROR, "invalid agtype type: %d", (int)agtv.type);
@@ -2658,7 +2667,7 @@ Datum agtype_to_int4(PG_FUNCTION_ARGS)
/* Return null if arg_agt is null. This covers SQL and Agtype NULLS */
if (arg_agt == NULL)
- {
+ {
PG_RETURN_NULL();
}
@@ -2668,39 +2677,39 @@ Datum agtype_to_int4(PG_FUNCTION_ARGS)
agtv.type != AGTV_NUMERIC &&
agtv.type != AGTV_STRING &&
agtv.type != AGTV_BOOL))
- {
+ {
cannot_cast_agtype_value(agtv.type, "int");
}
PG_FREE_IF_COPY(agtype_in, 0);
if (agtv.type == AGTV_INTEGER)
- {
+ {
result = DatumGetInt32(DirectFunctionCall1(int84,
Int64GetDatum(agtv.val.int_value)));
}
else if (agtv.type == AGTV_FLOAT)
- {
+ {
result = DatumGetInt32(DirectFunctionCall1(dtoi4,
Float8GetDatum(agtv.val.float_value)));
}
else if (agtv.type == AGTV_NUMERIC)
- {
+ {
result = DatumGetInt32(DirectFunctionCall1(numeric_int4,
NumericGetDatum(agtv.val.numeric)));
}
else if (agtv.type == AGTV_STRING)
- {
+ {
result = DatumGetInt32(DirectFunctionCall1(int4in,
CStringGetDatum(agtv.val.string.val)));
}
else if (agtv.type == AGTV_BOOL)
- {
- result = DatumGetInt64(DirectFunctionCall1(bool_int4,
+ {
+ result = DatumGetInt64(DirectFunctionCall1(bool_int4,
BoolGetDatum(agtv.val.boolean)));
}
else
- {
+ {
elog(ERROR, "invalid agtype type: %d", (int)agtv.type);
}
@@ -2772,7 +2781,7 @@ Datum agtype_to_float8(PG_FUNCTION_ARGS)
{
cannot_cast_agtype_value(agtv.type, "float");
}
-
+
PG_FREE_IF_COPY(agtype_in, 0);
if (agtv.type == AGTV_FLOAT)
@@ -2799,7 +2808,7 @@ Datum agtype_to_float8(PG_FUNCTION_ARGS)
errmsg("cannot cast to float8, integer value out of range")));
}
else if (agtv.type == AGTV_NUMERIC)
- {
+ {
result = DatumGetFloat8(DirectFunctionCall1(numeric_float8,
NumericGetDatum(agtv.val.numeric)));
}
@@ -2809,10 +2818,10 @@ Datum agtype_to_float8(PG_FUNCTION_ARGS)
CStringGetDatum(agtv.val.string.val)));
}
else
- {
+ {
elog(ERROR, "invalid agtype type: %d", (int)agtv.type);
}
-
+
PG_RETURN_FLOAT8(result);
}
@@ -4047,13 +4056,13 @@ Datum agtype_typecast_int(PG_FUNCTION_ARGS)
/* Return null if arg_agt is null. This covers SQL and Agtype NULLS */
if (arg_agt == NULL)
- {
+ {
PG_RETURN_NULL();
}
/* check that we have a scalar value */
if (!AGT_ROOT_IS_SCALAR(arg_agt))
- {
+ {
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("typecast argument must be a scalar value")));
@@ -4064,7 +4073,7 @@ Datum agtype_typecast_int(PG_FUNCTION_ARGS)
/* check for agtype null */
if (arg_value->type == AGTV_NULL)
- {
+ {
PG_RETURN_NULL();
}
@@ -4083,7 +4092,7 @@ Datum agtype_typecast_int(PG_FUNCTION_ARGS)
NumericGetDatum(arg_value->val.numeric));
break;
case AGTV_BOOL:
- d = DirectFunctionCall1(bool_int4,
+ d = DirectFunctionCall1(bool_int4,
BoolGetDatum(arg_value->val.boolean));
break;
case AGTV_STRING:
@@ -4129,13 +4138,13 @@ Datum agtype_typecast_bool(PG_FUNCTION_ARGS)
/* Return null if arg_agt is null. This covers SQL and Agtype NULLS */
if (arg_agt == NULL)
- {
+ {
PG_RETURN_NULL();
}
/* check that we have a scalar value */
if (!AGT_ROOT_IS_SCALAR(arg_agt))
- {
+ {
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("typecast argument must be a scalar value")));
@@ -4146,7 +4155,7 @@ Datum agtype_typecast_bool(PG_FUNCTION_ARGS)
/* check for agtype null */
if (arg_value->type == AGTV_NULL)
- {
+ {
PG_RETURN_NULL();
}
@@ -4657,11 +4666,11 @@ Datum column_get_datum(TupleDesc tupdesc, HeapTuple tuple, int column,
}
/*
- * Function to retrieve a label name, given the graph name and graphid. The
- * function returns a pointer to a duplicated string that needs to be freed
- * when you are finished using it.
+ * Function to retrieve a label name, given the graph name and graphid of the
+ * node or edge. The function returns a pointer to a duplicated string that
+ * needs to be freed when you are finished using it.
*/
-static char *get_label_name(const char *graph_name, int64 graphid)
+static char *get_label_name(const char *graph_name, graphid element_graphid)
{
ScanKeyData scan_keys[2];
Relation ag_label;
@@ -4669,48 +4678,49 @@ static char *get_label_name(const char *graph_name, int64 graphid)
HeapTuple tuple;
TupleDesc tupdesc;
char *result = NULL;
-
- Oid graphoid = get_graph_oid(graph_name);
+ bool column_is_null = false;
+ Oid graph_oid = get_graph_oid(graph_name);
+ int32 label_id = get_graphid_label_id(element_graphid);
/* scankey for first match in ag_label, column 2, graphoid, BTEQ, OidEQ */
ScanKeyInit(&scan_keys[0], Anum_ag_label_graph, BTEqualStrategyNumber,
- F_OIDEQ, ObjectIdGetDatum(graphoid));
+ F_OIDEQ, ObjectIdGetDatum(graph_oid));
/* scankey for second match in ag_label, column 3, label id, BTEQ, Int4EQ */
ScanKeyInit(&scan_keys[1], Anum_ag_label_id, BTEqualStrategyNumber,
- F_INT4EQ, Int32GetDatum(get_graphid_label_id(graphid)));
+ F_INT4EQ, Int32GetDatum(label_id));
- ag_label = heap_open(ag_relation_id("ag_label", "table"), ShareLock);
- scan_desc = systable_beginscan(ag_label,
- ag_relation_id("ag_label_graph_id_index",
- "index"), true, NULL, 2,
- scan_keys);
+ ag_label = table_open(ag_label_relation_id(), ShareLock);
+ scan_desc = systable_beginscan(ag_label, ag_label_graph_oid_index_id(), true,
+ NULL, 2, scan_keys);
tuple = systable_getnext(scan_desc);
if (!HeapTupleIsValid(tuple))
{
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_SCHEMA),
- errmsg("graphid %lu does not exist", graphid)));
+ errmsg("graphid %lu does not exist", element_graphid)));
}
/* get the tupdesc - we don't need to release this one */
tupdesc = RelationGetDescr(ag_label);
/* bail if the number of columns differs */
- if (tupdesc->natts != 6)
+ if (tupdesc->natts != Natts_ag_label)
+ {
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
errmsg("Invalid number of attributes for ag_catalog.ag_label")));
+ }
/* get the label name */
- result = NameStr(*DatumGetName(column_get_datum(tupdesc, tuple, 0, "name",
- NAMEOID, true)));
+ result = NameStr(*DatumGetName(heap_getattr(tuple, Anum_ag_label_name,
+ tupdesc, &column_is_null)));
/* duplicate it */
result = strdup(result);
/* end the scan and close the relation */
systable_endscan(scan_desc);
- heap_close(ag_label, ShareLock);
+ table_close(ag_label, ShareLock);
return result;
}
@@ -4720,7 +4730,7 @@ static Datum get_vertex(const char *graph, const char *vertex_label,
{
ScanKeyData scan_keys[1];
Relation graph_vertex_label;
- HeapScanDesc scan_desc;
+ TableScanDesc scan_desc;
HeapTuple tuple;
TupleDesc tupdesc;
Datum id, properties, result;
@@ -4738,8 +4748,8 @@ static Datum get_vertex(const char *graph, const char *vertex_label,
Int64GetDatum(graphid));
/* open the relation (table), begin the scan, and get the tuple */
- graph_vertex_label = heap_open(vertex_label_table_oid, ShareLock);
- scan_desc = heap_beginscan(graph_vertex_label, snapshot, 1, scan_keys);
+ graph_vertex_label = table_open(vertex_label_table_oid, ShareLock);
+ scan_desc = table_beginscan(graph_vertex_label, snapshot, 1, scan_keys);
tuple = heap_getnext(scan_desc, ForwardScanDirection);
/* bail if the tuple isn't valid */
@@ -4768,8 +4778,8 @@ static Datum get_vertex(const char *graph, const char *vertex_label,
result = DirectFunctionCall3(_agtype_build_vertex, id,
CStringGetDatum(vertex_label), properties);
/* end the scan and close the relation */
- heap_endscan(scan_desc);
- heap_close(graph_vertex_label, ShareLock);
+ table_endscan(scan_desc);
+ table_close(graph_vertex_label, ShareLock);
/* return the vertex datum */
return result;
}
@@ -4783,7 +4793,7 @@ Datum age_startnode(PG_FUNCTION_ARGS)
agtype_value *agtv_value = NULL;
char *graph_name = NULL;
char *label_name = NULL;
- graphid graph_id;
+ graphid start_id;
Datum result;
/* we need the graph name */
@@ -4825,14 +4835,14 @@ Datum age_startnode(PG_FUNCTION_ARGS)
/* it must not be null and must be an integer */
Assert(agtv_value != NULL);
Assert(agtv_value->type = AGTV_INTEGER);
- graph_id = agtv_value->val.int_value;
+ start_id = agtv_value->val.int_value;
/* get the label */
- label_name = get_label_name(graph_name, graph_id);
+ label_name = get_label_name(graph_name, start_id);
/* it must not be null and must be a string */
Assert(label_name != NULL);
- result = get_vertex(graph_name, label_name, graph_id);
+ result = get_vertex(graph_name, label_name, start_id);
free(label_name);
@@ -4848,7 +4858,7 @@ Datum age_endnode(PG_FUNCTION_ARGS)
agtype_value *agtv_value = NULL;
char *graph_name = NULL;
char *label_name = NULL;
- graphid graph_id;
+ graphid end_id;
Datum result;
/* we need the graph name */
@@ -4890,14 +4900,14 @@ Datum age_endnode(PG_FUNCTION_ARGS)
/* it must not be null and must be an integer */
Assert(agtv_value != NULL);
Assert(agtv_value->type = AGTV_INTEGER);
- graph_id = agtv_value->val.int_value;
+ end_id = agtv_value->val.int_value;
/* get the label */
- label_name = get_label_name(graph_name, graph_id);
+ label_name = get_label_name(graph_name, end_id);
/* it must not be null and must be a string */
Assert(label_name != NULL);
- result = get_vertex(graph_name, label_name, graph_id);
+ result = get_vertex(graph_name, label_name, end_id);
free(label_name);
@@ -7167,10 +7177,9 @@ Datum age_replace(PG_FUNCTION_ARGS)
* We need the strings as a text strings so that we can let PG deal with
* multibyte characters in the string.
*/
- text_result = DatumGetTextPP(DirectFunctionCall3(replace_text,
- PointerGetDatum(text_string),
- PointerGetDatum(text_search),
- PointerGetDatum(text_replace)));
+ text_result = DatumGetTextPP(DirectFunctionCall3Coll(
+ replace_text, C_COLLATION_OID, PointerGetDatum(text_string),
+ PointerGetDatum(text_search), PointerGetDatum(text_replace)));
/* convert it back to a cstring */
string = text_to_cstring(text_result);
diff --git a/src/backend/utils/adt/agtype_gin.c b/src/backend/utils/adt/agtype_gin.c
index 669935c21..9a9adc9b9 100644
--- a/src/backend/utils/adt/agtype_gin.c
+++ b/src/backend/utils/adt/agtype_gin.c
@@ -33,6 +33,7 @@
#include "access/stratnum.h"
#include "catalog/pg_collation.h"
#include "catalog/pg_type.h"
+#include "utils/float.h"
#include "utils/builtins.h"
#include "utils/varlena.h"
diff --git a/src/backend/utils/adt/graphid.c b/src/backend/utils/adt/graphid.c
index bd65b957e..2060fc66e 100644
--- a/src/backend/utils/adt/graphid.c
+++ b/src/backend/utils/adt/graphid.c
@@ -36,7 +36,8 @@ Oid get_GRAPHIDOID(void)
{
if (g_GRAPHIDOID == InvalidOid)
{
- g_GRAPHIDOID = GetSysCacheOid2(TYPENAMENSP, CStringGetDatum("graphid"),
+ g_GRAPHIDOID = GetSysCacheOid2(TYPENAMENSP, Anum_pg_type_oid,
+ CStringGetDatum("graphid"),
ObjectIdGetDatum(ag_catalog_namespace_id()));
}
@@ -48,7 +49,7 @@ Oid get_GRAPHIDARRAYOID(void)
{
if (g_GRAPHIDARRAYOID == InvalidOid)
{
- g_GRAPHIDARRAYOID = GetSysCacheOid2(TYPENAMENSP,
+ g_GRAPHIDARRAYOID = GetSysCacheOid2(TYPENAMENSP, Anum_pg_type_oid,
CStringGetDatum("_graphid"),
ObjectIdGetDatum(ag_catalog_namespace_id()));
}
diff --git a/src/backend/utils/ag_func.c b/src/backend/utils/ag_func.c
index cbd2167e2..35a03464b 100644
--- a/src/backend/utils/ag_func.c
+++ b/src/backend/utils/ag_func.c
@@ -27,7 +27,6 @@
#include "access/htup.h"
#include "access/htup_details.h"
#include "catalog/pg_proc.h"
-#include "fmgr.h"
#include "utils/builtins.h"
#include "utils/lsyscache.h"
#include "utils/syscache.h"
@@ -54,6 +53,7 @@ bool is_oid_ag_func(Oid func_oid, const char *func_name)
ReleaseSysCache(proctup);
return false;
}
+
nspid = proc->pronamespace;
ReleaseSysCache(proctup);
@@ -81,7 +81,8 @@ Oid get_ag_func_oid(const char *func_name, const int nargs, ...)
arg_types = buildoidvector(oids, nargs);
- func_oid = GetSysCacheOid3(PROCNAMEARGSNSP, CStringGetDatum(func_name),
+ func_oid = GetSysCacheOid3(PROCNAMEARGSNSP, Anum_pg_proc_oid,
+ CStringGetDatum(func_name),
PointerGetDatum(arg_types),
ObjectIdGetDatum(ag_catalog_namespace_id()));
if (!OidIsValid(func_oid))
@@ -111,7 +112,8 @@ Oid get_pg_func_oid(const char *func_name, const int nargs, ...)
arg_types = buildoidvector(oids, nargs);
- func_oid = GetSysCacheOid3(PROCNAMEARGSNSP, CStringGetDatum(func_name),
+ func_oid = GetSysCacheOid3(PROCNAMEARGSNSP, Anum_pg_proc_oid,
+ CStringGetDatum(func_name),
PointerGetDatum(arg_types),
ObjectIdGetDatum(pg_catalog_namespace_id()));
if (!OidIsValid(func_oid))
diff --git a/src/backend/utils/cache/ag_cache.c b/src/backend/utils/cache/ag_cache.c
index 6501476c6..4fd4d13e5 100644
--- a/src/backend/utils/cache/ag_cache.c
+++ b/src/backend/utils/cache/ag_cache.c
@@ -26,8 +26,8 @@
#include "access/htup_details.h"
#include "access/skey.h"
#include "access/stratnum.h"
-#include "access/sysattr.h"
#include "access/tupdesc.h"
+#include "catalog/pg_collation.h"
#include "fmgr.h"
#include "storage/lockdefs.h"
#include "utils/builtins.h"
@@ -69,17 +69,17 @@ typedef struct label_name_graph_cache_entry
label_cache_data data;
} label_name_graph_cache_entry;
-typedef struct label_graph_id_cache_key
+typedef struct label_graph_oid_cache_key
{
Oid graph;
int32 id;
-} label_graph_id_cache_key;
+} label_graph_oid_cache_key;
-typedef struct label_graph_id_cache_entry
+typedef struct label_graph_oid_cache_entry
{
- label_graph_id_cache_key key; // hash key
+ label_graph_oid_cache_key key; // hash key
label_cache_data data;
-} label_graph_id_cache_entry;
+} label_graph_oid_cache_entry;
typedef struct label_relation_cache_entry
{
@@ -107,17 +107,13 @@ static ScanKeyData graph_name_scan_keys[1];
static HTAB *graph_namespace_cache_hash = NULL;
static ScanKeyData graph_namespace_scan_keys[1];
-// ag_label.oid
-static HTAB *label_oid_cache_hash = NULL;
-static ScanKeyData label_oid_scan_keys[1];
-
// ag_label.name, ag_label.graph
static HTAB *label_name_graph_cache_hash = NULL;
static ScanKeyData label_name_graph_scan_keys[2];
// ag_label.graph, ag_label.id
-static HTAB *label_graph_id_cache_hash = NULL;
-static ScanKeyData label_graph_id_scan_keys[2];
+static HTAB *label_graph_oid_cache_hash = NULL;
+static ScanKeyData label_graph_oid_scan_keys[2];
// ag_label.relation
static HTAB *label_relation_cache_hash = NULL;
@@ -152,38 +148,35 @@ static void fill_graph_cache_data(graph_cache_data *cache_data,
// ag_label
static void initialize_label_caches(void);
static void create_label_caches(void);
-static void create_label_oid_cache(void);
static void create_label_name_graph_cache(void);
-static void create_label_graph_id_cache(void);
+static void create_label_graph_oid_cache(void);
static void create_label_relation_cache(void);
static void create_label_seq_name_graph_cache(void);
static void invalidate_label_caches(Datum arg, Oid relid);
-static void invalidate_label_oid_cache(Oid relid);
-static void flush_label_oid_cache(void);
static void invalidate_label_name_graph_cache(Oid relid);
static void flush_label_name_graph_cache(void);
-static void invalidate_label_graph_id_cache(Oid relid);
-static void flush_label_graph_id_cache(void);
+static void invalidate_label_graph_oid_cache(Oid relid);
+static void flush_label_graph_oid_cache(void);
static void invalidate_label_relation_cache(Oid relid);
static void flush_label_relation_cache(void);
static void invalidate_label_seq_name_graph_cache(Oid relid);
static void flush_label_seq_name_graph_cache(void);
-static label_cache_data *search_label_oid_cache_miss(Oid oid);
static label_cache_data *search_label_name_graph_cache_miss(Name name,
Oid graph);
static void *label_name_graph_cache_hash_search(Name name, Oid graph,
HASHACTION action,
bool *found);
-static label_cache_data *search_label_graph_id_cache_miss(Oid graph, int32 id);
-static void *label_graph_id_cache_hash_search(Oid graph, int32 id,
- HASHACTION action, bool *found);
+static label_cache_data *search_label_graph_oid_cache_miss(Oid graph,
+ uint32 id);
+static void *label_graph_oid_cache_hash_search(uint32 graph, int32 id,
+ HASHACTION action, bool *found);
static label_cache_data *search_label_relation_cache_miss(Oid relation);
static label_cache_data *search_label_seq_name_graph_cache_miss(Name name,
- Oid graph);
+ Oid graph);
static void *label_seq_name_graph_cache_hash_search(Name name, Oid graph,
- HASHACTION action,
- bool *found);
+ HASHACTION action,
+ bool *found);
static void fill_label_cache_data(label_cache_data *cache_data,
HeapTuple tuple, TupleDesc tuple_desc);
@@ -193,11 +186,13 @@ static void initialize_caches(void)
static bool initialized = false;
if (initialized)
+ {
return;
-
+ }
if (!CacheMemoryContext)
+ {
CreateCacheMemoryContext();
-
+ }
initialize_graph_caches();
initialize_label_caches();
@@ -211,7 +206,7 @@ static void ag_cache_scan_key_init(ScanKey entry, AttrNumber attno,
entry->sk_attno = attno;
entry->sk_strategy = BTEqualStrategyNumber;
entry->sk_subtype = InvalidOid;
- entry->sk_collation = InvalidOid;
+ entry->sk_collation = C_COLLATION_OID;
fmgr_info_cxt(func, &entry->sk_func, CacheMemoryContext);
entry->sk_argument = (Datum)0;
}
@@ -316,12 +311,15 @@ static void flush_graph_name_cache(void)
entry = hash_seq_search(&hash_seq);
if (!entry)
+ {
break;
-
+ }
removed = hash_search(graph_name_cache_hash, &entry->name, HASH_REMOVE,
NULL);
if (!removed)
+ {
ereport(ERROR, (errmsg_internal("graph (name) cache corrupted")));
+ }
}
}
@@ -337,7 +335,9 @@ static void flush_graph_namespace_cache(void)
entry = hash_seq_search(&hash_seq);
if (!entry)
+ {
break;
+ }
removed = hash_search(graph_namespace_cache_hash, &entry->namespace,
HASH_REMOVE, NULL);
@@ -361,7 +361,9 @@ graph_cache_data *search_graph_name_cache(const char *name)
namestrcpy(&name_key, name);
entry = hash_search(graph_name_cache_hash, &name_key, HASH_FIND, NULL);
if (entry)
+ {
return &entry->data;
+ }
return search_graph_name_cache_miss(&name_key);
}
@@ -379,11 +381,11 @@ static graph_cache_data *search_graph_name_cache_miss(Name name)
scan_keys[0].sk_argument = NameGetDatum(name);
/*
- * Calling heap_open() might call AcceptInvalidationMessage() and that
+ * Calling table_open() might call AcceptInvalidationMessage() and that
* might flush the graph caches. This is OK because this function is called
* when the desired entry is not in the cache.
*/
- ag_graph = heap_open(ag_graph_relation_id(), AccessShareLock);
+ ag_graph = table_open(ag_graph_relation_id(), AccessShareLock);
scan_desc = systable_beginscan(ag_graph, ag_graph_name_index_id(), true,
NULL, 1, scan_keys);
@@ -392,7 +394,7 @@ static graph_cache_data *search_graph_name_cache_miss(Name name)
if (!HeapTupleIsValid(tuple))
{
systable_endscan(scan_desc);
- heap_close(ag_graph, AccessShareLock);
+ table_close(ag_graph, AccessShareLock);
return NULL;
}
@@ -405,7 +407,7 @@ static graph_cache_data *search_graph_name_cache_miss(Name name)
fill_graph_cache_data(&entry->data, tuple, RelationGetDescr(ag_graph));
systable_endscan(scan_desc);
- heap_close(ag_graph, AccessShareLock);
+ table_close(ag_graph, AccessShareLock);
return &entry->data;
}
@@ -419,7 +421,9 @@ graph_cache_data *search_graph_namespace_cache(Oid namespace)
entry = hash_search(graph_namespace_cache_hash, &namespace, HASH_FIND,
NULL);
if (entry)
+ {
return &entry->data;
+ }
return search_graph_namespace_cache_miss(namespace);
}
@@ -438,11 +442,11 @@ static graph_cache_data *search_graph_namespace_cache_miss(Oid namespace)
scan_keys[0].sk_argument = ObjectIdGetDatum(namespace);
/*
- * Calling heap_open() might call AcceptInvalidationMessage() and that
+ * Calling table_open() might call AcceptInvalidationMessage() and that
* might flush the graph caches. This is OK because this function is called
* when the desired entry is not in the cache.
*/
- ag_graph = heap_open(ag_graph_relation_id(), AccessShareLock);
+ ag_graph = table_open(ag_graph_relation_id(), AccessShareLock);
scan_desc = systable_beginscan(ag_graph, ag_graph_namespace_index_id(),
true, NULL, 1, scan_keys);
@@ -452,7 +456,7 @@ static graph_cache_data *search_graph_namespace_cache_miss(Oid namespace)
if (!HeapTupleIsValid(tuple))
{
systable_endscan(scan_desc);
- heap_close(ag_graph, AccessShareLock);
+ table_close(ag_graph, AccessShareLock);
return NULL;
}
@@ -466,7 +470,7 @@ static graph_cache_data *search_graph_namespace_cache_miss(Oid namespace)
fill_graph_cache_data(&entry->data, tuple, RelationGetDescr(ag_graph));
systable_endscan(scan_desc);
- heap_close(ag_graph, AccessShareLock);
+ table_close(ag_graph, AccessShareLock);
return &entry->data;
}
@@ -477,8 +481,8 @@ static void fill_graph_cache_data(graph_cache_data *cache_data,
bool is_null;
Datum value;
- // ag_graph.oid
- value = heap_getattr(tuple, ObjectIdAttributeNumber, tuple_desc, &is_null);
+ // ag_graph.id
+ value = heap_getattr(tuple, Anum_ag_graph_oid, tuple_desc, &is_null);
Assert(!is_null);
cache_data->oid = DatumGetObjectId(value);
// ag_graph.name
@@ -493,20 +497,16 @@ static void fill_graph_cache_data(graph_cache_data *cache_data,
static void initialize_label_caches(void)
{
- // ag_label.oid
- ag_cache_scan_key_init(&label_oid_scan_keys[0], ObjectIdAttributeNumber,
- F_OIDEQ);
-
// ag_label.name, ag_label.graph
ag_cache_scan_key_init(&label_name_graph_scan_keys[0], Anum_ag_label_name,
F_NAMEEQ);
ag_cache_scan_key_init(&label_name_graph_scan_keys[1], Anum_ag_label_graph,
- F_OIDEQ);
+ F_INT4EQ);
// ag_label.graph, ag_label.id
- ag_cache_scan_key_init(&label_graph_id_scan_keys[0], Anum_ag_label_graph,
- F_OIDEQ);
- ag_cache_scan_key_init(&label_graph_id_scan_keys[1], Anum_ag_label_id,
+ ag_cache_scan_key_init(&label_graph_oid_scan_keys[0], Anum_ag_label_graph,
+ F_INT4EQ);
+ ag_cache_scan_key_init(&label_graph_oid_scan_keys[1], Anum_ag_label_id,
F_INT4EQ);
// ag_label.relation
@@ -519,6 +519,12 @@ static void initialize_label_caches(void)
ag_cache_scan_key_init(&label_seq_name_graph_scan_keys[1], Anum_ag_label_graph,
F_OIDEQ);
+ // ag_label.seq_name, ag_label.graph
+ ag_cache_scan_key_init(&label_seq_name_graph_scan_keys[0],
+ Anum_ag_label_seq_name, F_NAMEEQ);
+ ag_cache_scan_key_init(&label_seq_name_graph_scan_keys[1],
+ Anum_ag_label_graph, F_OIDEQ);
+
create_label_caches();
/*
@@ -534,32 +540,10 @@ static void create_label_caches(void)
* All the hash tables are created using their dedicated memory contexts
* which are under TopMemoryContext.
*/
- create_label_oid_cache();
create_label_name_graph_cache();
- create_label_graph_id_cache();
+ create_label_graph_oid_cache();
create_label_relation_cache();
create_label_seq_name_graph_cache();
-
-}
-
-static void create_label_oid_cache(void)
-{
- HASHCTL hash_ctl;
-
- /*
- * Use label_cache_data itself since it has oid field as its first field
- * that is the key for this hash.
- */
- MemSet(&hash_ctl, 0, sizeof(hash_ctl));
- hash_ctl.keysize = sizeof(Oid);
- hash_ctl.entrysize = sizeof(label_cache_data);
-
- /*
- * Please see the comment of hash_create() for the nelem value 16 here.
- * HASH_BLOBS flag is set because the size of the key is sizeof(uint32).
- */
- label_oid_cache_hash = hash_create("ag_label (oid) cache", 16, &hash_ctl,
- HASH_ELEM | HASH_BLOBS);
}
static void create_label_name_graph_cache(void)
@@ -579,21 +563,21 @@ static void create_label_name_graph_cache(void)
HASH_ELEM | HASH_BLOBS);
}
-static void create_label_graph_id_cache(void)
+static void create_label_graph_oid_cache(void)
{
HASHCTL hash_ctl;
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
- hash_ctl.keysize = sizeof(label_graph_id_cache_key);
- hash_ctl.entrysize = sizeof(label_graph_id_cache_entry);
+ hash_ctl.keysize = sizeof(label_graph_oid_cache_key);
+ hash_ctl.entrysize = sizeof(label_graph_oid_cache_entry);
/*
* Please see the comment of hash_create() for the nelem value 16 here.
* HASH_BLOBS flag is set because the key for this hash is fixed-size.
*/
- label_graph_id_cache_hash = hash_create("ag_label (graph, id) cache", 16,
- &hash_ctl,
- HASH_ELEM | HASH_BLOBS);
+ label_graph_oid_cache_hash = hash_create("ag_label (graph, id) cache", 16,
+ &hash_ctl,
+ HASH_ELEM | HASH_BLOBS);
}
static void create_label_relation_cache(void)
@@ -625,8 +609,8 @@ static void create_label_seq_name_graph_cache(void)
* HASH_BLOBS flag is set because the key for this hash is fixed-size.
*/
label_seq_name_graph_cache_hash = hash_create("ag_label (seq_name, graph) cache",
- 16, &hash_ctl,
- HASH_ELEM | HASH_BLOBS);
+ 16, &hash_ctl,
+ HASH_ELEM | HASH_BLOBS);
}
static void invalidate_label_caches(Datum arg, Oid relid)
@@ -637,77 +621,20 @@ static void invalidate_label_caches(Datum arg, Oid relid)
if (OidIsValid(relid))
{
- invalidate_label_oid_cache(relid);
invalidate_label_name_graph_cache(relid);
- invalidate_label_graph_id_cache(relid);
+ invalidate_label_graph_oid_cache(relid);
invalidate_label_relation_cache(relid);
invalidate_label_seq_name_graph_cache(relid);
}
else
{
- flush_label_oid_cache();
flush_label_name_graph_cache();
- flush_label_graph_id_cache();
+ flush_label_graph_oid_cache();
flush_label_relation_cache();
flush_label_seq_name_graph_cache();
}
}
-static void invalidate_label_oid_cache(Oid relid)
-{
- HASH_SEQ_STATUS hash_seq;
-
- hash_seq_init(&hash_seq, label_oid_cache_hash);
- for (;;)
- {
- label_cache_data *entry;
- void *removed;
-
- entry = hash_seq_search(&hash_seq);
- if (!entry)
- break;
-
- if (entry->relation != relid)
- continue;
-
- removed = hash_search(label_oid_cache_hash, &entry->oid, HASH_REMOVE,
- NULL);
- hash_seq_term(&hash_seq);
-
- if (!removed)
- {
- ereport(ERROR,
- (errmsg_internal("label (oid) cache corrupted")));
- }
-
- break;
- }
-}
-
-static void flush_label_oid_cache(void)
-{
- HASH_SEQ_STATUS hash_seq;
-
- hash_seq_init(&hash_seq, label_oid_cache_hash);
- for (;;)
- {
- label_cache_data *entry;
- void *removed;
-
- entry = hash_seq_search(&hash_seq);
- if (!entry)
- break;
-
- removed = hash_search(label_oid_cache_hash, &entry->oid, HASH_REMOVE,
- NULL);
- if (!removed)
- {
- ereport(ERROR,
- (errmsg_internal("label (oid) cache corrupted")));
- }
- }
-}
-
static void invalidate_label_name_graph_cache(Oid relid)
{
HASH_SEQ_STATUS hash_seq;
@@ -720,11 +647,13 @@ static void invalidate_label_name_graph_cache(Oid relid)
entry = hash_seq_search(&hash_seq);
if (!entry)
+ {
break;
-
+ }
if (entry->data.relation != relid)
+ {
continue;
-
+ }
removed = hash_search(label_name_graph_cache_hash, &entry->key,
HASH_REMOVE, NULL);
hash_seq_term(&hash_seq);
@@ -751,8 +680,9 @@ static void flush_label_name_graph_cache(void)
entry = hash_seq_search(&hash_seq);
if (!entry)
+ {
break;
-
+ }
removed = hash_search(label_name_graph_cache_hash, &entry->key,
HASH_REMOVE, NULL);
if (!removed)
@@ -763,24 +693,26 @@ static void flush_label_name_graph_cache(void)
}
}
-static void invalidate_label_graph_id_cache(Oid relid)
+static void invalidate_label_graph_oid_cache(Oid relid)
{
HASH_SEQ_STATUS hash_seq;
- hash_seq_init(&hash_seq, label_graph_id_cache_hash);
+ hash_seq_init(&hash_seq, label_graph_oid_cache_hash);
for (;;)
{
- label_graph_id_cache_entry *entry;
+ label_graph_oid_cache_entry *entry;
void *removed;
entry = hash_seq_search(&hash_seq);
if (!entry)
+ {
break;
-
+ }
if (entry->data.relation != relid)
+ {
continue;
-
- removed = hash_search(label_graph_id_cache_hash, &entry->key,
+ }
+ removed = hash_search(label_graph_oid_cache_hash, &entry->key,
HASH_REMOVE, NULL);
hash_seq_term(&hash_seq);
@@ -794,21 +726,22 @@ static void invalidate_label_graph_id_cache(Oid relid)
}
}
-static void flush_label_graph_id_cache(void)
+static void flush_label_graph_oid_cache(void)
{
HASH_SEQ_STATUS hash_seq;
- hash_seq_init(&hash_seq, label_graph_id_cache_hash);
+ hash_seq_init(&hash_seq, label_graph_oid_cache_hash);
for (;;)
{
- label_graph_id_cache_entry *entry;
+ label_graph_oid_cache_entry *entry;
void *removed;
entry = hash_seq_search(&hash_seq);
if (!entry)
+ {
break;
-
- removed = hash_search(label_graph_id_cache_hash, &entry->key,
+ }
+ removed = hash_search(label_graph_oid_cache_hash, &entry->key,
HASH_REMOVE, NULL);
if (!removed)
{
@@ -825,12 +758,15 @@ static void invalidate_label_relation_cache(Oid relid)
entry = hash_search(label_relation_cache_hash, &relid, HASH_FIND, NULL);
if (!entry)
+ {
return;
-
+ }
removed = hash_search(label_relation_cache_hash, &relid, HASH_REMOVE,
NULL);
if (!removed)
+ {
ereport(ERROR, (errmsg_internal("label (namespace) cache corrupted")));
+ }
}
static void flush_label_relation_cache(void)
@@ -845,8 +781,9 @@ static void flush_label_relation_cache(void)
entry = hash_seq_search(&hash_seq);
if (!entry)
+ {
break;
-
+ }
removed = hash_search(label_relation_cache_hash, &entry->relation,
HASH_REMOVE, NULL);
if (!removed)
@@ -869,11 +806,13 @@ static void invalidate_label_seq_name_graph_cache(Oid relid)
entry = hash_seq_search(&hash_seq);
if (!entry)
+ {
break;
-
+ }
if (entry->data.relation != relid)
+ {
continue;
-
+ }
removed = hash_search(label_seq_name_graph_cache_hash, &entry->key,
HASH_REMOVE, NULL);
hash_seq_term(&hash_seq);
@@ -900,8 +839,9 @@ static void flush_label_seq_name_graph_cache(void)
entry = hash_seq_search(&hash_seq);
if (!entry)
+ {
break;
-
+ }
removed = hash_search(label_seq_name_graph_cache_hash, &entry->key,
HASH_REMOVE, NULL);
if (!removed)
@@ -912,72 +852,12 @@ static void flush_label_seq_name_graph_cache(void)
}
}
-label_cache_data *search_label_oid_cache(Oid oid)
-{
- label_cache_data *entry;
-
- initialize_caches();
-
- entry = hash_search(label_oid_cache_hash, &oid, HASH_FIND, NULL);
- if (entry)
- return entry;
-
- return search_label_oid_cache_miss(oid);
-}
-
-static label_cache_data *search_label_oid_cache_miss(Oid oid)
-{
- ScanKeyData scan_keys[1];
- Relation ag_label;
- SysScanDesc scan_desc;
- HeapTuple tuple;
- bool found;
- label_cache_data *entry;
-
- memcpy(scan_keys, label_oid_scan_keys, sizeof(label_oid_scan_keys));
- scan_keys[0].sk_argument = ObjectIdGetDatum(oid);
-
- /*
- * Calling heap_open() might call AcceptInvalidationMessage() and that
- * might invalidate the label caches. This is OK because this function is
- * called when the desired entry is not in the cache.
- */
- ag_label = heap_open(ag_label_relation_id(), AccessShareLock);
- scan_desc = systable_beginscan(ag_label, ag_label_oid_index_id(), true,
- NULL, 1, scan_keys);
-
- // don't need to loop over scan_desc because ag_label_oid_index is UNIQUE
- tuple = systable_getnext(scan_desc);
- if (!HeapTupleIsValid(tuple))
- {
- systable_endscan(scan_desc);
- heap_close(ag_label, AccessShareLock);
-
- return NULL;
- }
-
- // get a new entry
- entry = hash_search(label_oid_cache_hash, &oid, HASH_ENTER, &found);
- Assert(!found); // no concurrent update on label_oid_cache_hash
-
- // fill the new entry with the retrieved tuple
- fill_label_cache_data(entry, tuple, RelationGetDescr(ag_label));
- // make sure that the oid field is the same with the hash key(oid)
- Assert(entry->oid == oid);
-
- systable_endscan(scan_desc);
- heap_close(ag_label, AccessShareLock);
-
- return entry;
-}
-
label_cache_data *search_label_name_graph_cache(const char *name, Oid graph)
{
NameData name_key;
label_name_graph_cache_entry *entry;
AssertArg(name);
- AssertArg(OidIsValid(graph));
initialize_caches();
@@ -985,8 +865,9 @@ label_cache_data *search_label_name_graph_cache(const char *name, Oid graph)
entry = label_name_graph_cache_hash_search(&name_key, graph, HASH_FIND,
NULL);
if (entry)
+ {
return &entry->data;
-
+ }
return search_label_name_graph_cache_miss(&name_key, graph);
}
@@ -1006,11 +887,11 @@ static label_cache_data *search_label_name_graph_cache_miss(Name name,
scan_keys[1].sk_argument = ObjectIdGetDatum(graph);
/*
- * Calling heap_open() might call AcceptInvalidationMessage() and that
+ * Calling table_open() might call AcceptInvalidationMessage() and that
* might invalidate the label caches. This is OK because this function is
* called when the desired entry is not in the cache.
*/
- ag_label = heap_open(ag_label_relation_id(), AccessShareLock);
+ ag_label = table_open(ag_label_relation_id(), AccessShareLock);
scan_desc = systable_beginscan(ag_label, ag_label_name_graph_index_id(),
true, NULL, 2, scan_keys);
@@ -1022,7 +903,7 @@ static label_cache_data *search_label_name_graph_cache_miss(Name name,
if (!HeapTupleIsValid(tuple))
{
systable_endscan(scan_desc);
- heap_close(ag_label, AccessShareLock);
+ table_close(ag_label, AccessShareLock);
return NULL;
}
@@ -1036,7 +917,7 @@ static label_cache_data *search_label_name_graph_cache_miss(Name name,
fill_label_cache_data(&entry->data, tuple, RelationGetDescr(ag_label));
systable_endscan(scan_desc);
- heap_close(ag_label, AccessShareLock);
+ table_close(ag_label, AccessShareLock);
return &entry->data;
}
@@ -1053,81 +934,81 @@ static void *label_name_graph_cache_hash_search(Name name, Oid graph,
return hash_search(label_name_graph_cache_hash, &key, action, found);
}
-label_cache_data *search_label_graph_id_cache(Oid graph, int32 id)
+label_cache_data *search_label_graph_oid_cache(uint32 graph_oid, int32 id)
{
- label_graph_id_cache_entry *entry;
+ label_graph_oid_cache_entry *entry;
- AssertArg(OidIsValid(graph));
AssertArg(label_id_is_valid(id));
initialize_caches();
- entry = label_graph_id_cache_hash_search(graph, id, HASH_FIND, NULL);
+ entry = label_graph_oid_cache_hash_search(graph_oid, id, HASH_FIND, NULL);
if (entry)
+ {
return &entry->data;
-
- return search_label_graph_id_cache_miss(graph, id);
+ }
+ return search_label_graph_oid_cache_miss(graph_oid, id);
}
-static label_cache_data *search_label_graph_id_cache_miss(Oid graph, int32 id)
+static label_cache_data *search_label_graph_oid_cache_miss(Oid graph, uint32 id)
{
ScanKeyData scan_keys[2];
Relation ag_label;
SysScanDesc scan_desc;
HeapTuple tuple;
bool found;
- label_graph_id_cache_entry *entry;
+ label_graph_oid_cache_entry *entry;
- memcpy(scan_keys, label_graph_id_scan_keys,
- sizeof(label_graph_id_scan_keys));
+ memcpy(scan_keys, label_graph_oid_scan_keys,
+ sizeof(label_graph_oid_scan_keys));
scan_keys[0].sk_argument = ObjectIdGetDatum(graph);
scan_keys[1].sk_argument = Int32GetDatum(id);
/*
- * Calling heap_open() might call AcceptInvalidationMessage() and that
+ * Calling table_open() might call AcceptInvalidationMessage() and that
* might invalidate the label caches. This is OK because this function is
* called when the desired entry is not in the cache.
*/
- ag_label = heap_open(ag_label_relation_id(), AccessShareLock);
- scan_desc = systable_beginscan(ag_label, ag_label_graph_id_index_id(),
- true, NULL, 2, scan_keys);
+ ag_label = table_open(ag_label_relation_id(), AccessShareLock);
+ scan_desc = systable_beginscan(ag_label, ag_label_graph_oid_index_id(), true,
+ NULL, 2, scan_keys);
/*
- * don't need to loop over scan_desc because ag_label_graph_id_index is
+ * don't need to loop over scan_desc because ag_label_graph_oid_index is
* UNIQUE
*/
tuple = systable_getnext(scan_desc);
if (!HeapTupleIsValid(tuple))
{
systable_endscan(scan_desc);
- heap_close(ag_label, AccessShareLock);
+ table_close(ag_label, AccessShareLock);
return NULL;
}
// get a new entry
- entry = label_graph_id_cache_hash_search(graph, id, HASH_ENTER, &found);
- Assert(!found); // no concurrent update on label_graph_id_cache_hash
+ entry = label_graph_oid_cache_hash_search(graph, id, HASH_ENTER, &found);
+ Assert(!found); // no concurrent update on label_graph_oid_cache_hash
// fill the new entry with the retrieved tuple
fill_label_cache_data(&entry->data, tuple, RelationGetDescr(ag_label));
systable_endscan(scan_desc);
- heap_close(ag_label, AccessShareLock);
+ table_close(ag_label, AccessShareLock);
return &entry->data;
}
-static void *label_graph_id_cache_hash_search(Oid graph, int32 id,
- HASHACTION action, bool *found)
+static void *label_graph_oid_cache_hash_search(uint32 graph, int32 id,
+ HASHACTION action, bool *found)
{
- label_graph_id_cache_key key;
+ label_graph_oid_cache_key key;
- // initialize the hash key for label_graph_id_cache_hash
+ // initialize the hash key for label_graph_oid_cache_hash
key.graph = graph;
key.id = id;
- return hash_search(label_graph_id_cache_hash, &key, action, found);
+ return hash_search(label_graph_oid_cache_hash, &key, action, found);
}
label_cache_data *search_label_relation_cache(Oid relation)
@@ -1138,8 +1019,9 @@ label_cache_data *search_label_relation_cache(Oid relation)
entry = hash_search(label_relation_cache_hash, &relation, HASH_FIND, NULL);
if (entry)
+ {
return &entry->data;
-
+ }
return search_label_relation_cache_miss(relation);
}
@@ -1157,13 +1039,13 @@ static label_cache_data *search_label_relation_cache_miss(Oid relation)
scan_keys[0].sk_argument = ObjectIdGetDatum(relation);
/*
- * Calling heap_open() might call AcceptInvalidationMessage() and that
+ * Calling table_open() might call AcceptInvalidationMessage() and that
* might invalidate the label caches. This is OK because this function is
* called when the desired entry is not in the cache.
*/
- ag_label = heap_open(ag_label_relation_id(), AccessShareLock);
- scan_desc = systable_beginscan(ag_label, ag_label_relation_index_id(),
- true, NULL, 1, scan_keys);
+ ag_label = table_open(ag_label_relation_id(), AccessShareLock);
+ scan_desc = systable_beginscan(ag_label, ag_label_relation_index_id(), true,
+ NULL, 1, scan_keys);
// don't need to loop over scan_desc because ag_label_relation_index is
// UNIQUE
@@ -1171,7 +1053,7 @@ static label_cache_data *search_label_relation_cache_miss(Oid relation)
if (!HeapTupleIsValid(tuple))
{
systable_endscan(scan_desc);
- heap_close(ag_label, AccessShareLock);
+ table_close(ag_label, AccessShareLock);
return NULL;
}
@@ -1185,7 +1067,7 @@ static label_cache_data *search_label_relation_cache_miss(Oid relation)
fill_label_cache_data(entry, tuple, RelationGetDescr(ag_label));
systable_endscan(scan_desc);
- heap_close(ag_label, AccessShareLock);
+ table_close(ag_label, AccessShareLock);
return entry;
}
@@ -1202,15 +1084,16 @@ label_cache_data *search_label_seq_name_graph_cache(const char *name, Oid graph)
namestrcpy(&name_key, name);
entry = label_seq_name_graph_cache_hash_search(&name_key, graph, HASH_FIND,
- NULL);
+ NULL);
if (entry)
+ {
return &entry->data;
-
+ }
return search_label_seq_name_graph_cache_miss(&name_key, graph);
}
static label_cache_data *search_label_seq_name_graph_cache_miss(Name name,
- Oid graph)
+ Oid graph)
{
ScanKeyData scan_keys[2];
Relation ag_label;
@@ -1225,11 +1108,11 @@ static label_cache_data *search_label_seq_name_graph_cache_miss(Name name,
scan_keys[1].sk_argument = ObjectIdGetDatum(graph);
/*
- * Calling heap_open() might call AcceptInvalidationMessage() and that
+ * Calling table_open() might call AcceptInvalidationMessage() and that
* might invalidate the label caches. This is OK because this function is
* called when the desired entry is not in the cache.
*/
- ag_label = heap_open(ag_label_relation_id(), AccessShareLock);
+ ag_label = table_open(ag_label_relation_id(), AccessShareLock);
scan_desc = systable_beginscan(ag_label, ag_label_seq_name_graph_index_id(),
true, NULL, 2, scan_keys);
@@ -1241,27 +1124,28 @@ static label_cache_data *search_label_seq_name_graph_cache_miss(Name name,
if (!HeapTupleIsValid(tuple))
{
systable_endscan(scan_desc);
- heap_close(ag_label, AccessShareLock);
+ table_close(ag_label, AccessShareLock);
return NULL;
}
// get a new entry
entry = label_seq_name_graph_cache_hash_search(name, graph, HASH_ENTER,
- &found);
+ &found);
Assert(!found); // no concurrent update on label_seq_name_graph_cache_hash
// fill the new entry with the retrieved tuple
fill_label_cache_data(&entry->data, tuple, RelationGetDescr(ag_label));
systable_endscan(scan_desc);
- heap_close(ag_label, AccessShareLock);
+ table_close(ag_label, AccessShareLock);
return &entry->data;
}
static void *label_seq_name_graph_cache_hash_search(Name name, Oid graph,
- HASHACTION action, bool *found)
+ HASHACTION action,
+ bool *found)
{
label_seq_name_graph_cache_key key;
@@ -1278,10 +1162,6 @@ static void fill_label_cache_data(label_cache_data *cache_data,
bool is_null;
Datum value;
- // ag_label.oid
- value = heap_getattr(tuple, ObjectIdAttributeNumber, tuple_desc, &is_null);
- Assert(!is_null);
- cache_data->oid = DatumGetObjectId(value);
// ag_label.name
value = heap_getattr(tuple, Anum_ag_label_name, tuple_desc, &is_null);
Assert(!is_null);
diff --git a/src/backend/utils/graph_generation.c b/src/backend/utils/graph_generation.c
index 436b703e0..7033c9bc9 100644
--- a/src/backend/utils/graph_generation.c
+++ b/src/backend/utils/graph_generation.c
@@ -49,23 +49,23 @@
#include "utils/load/ag_load_labels.h"
-int64 get_nextval_internal(graph_cache_data* graph_cache,
+int64 get_nextval_internal(graph_cache_data* graph_cache,
label_cache_data* label_cache);
/*
* Auxiliary function to get the next internal value in the graph,
* so a new object (node or edge) graph id can be composed.
*/
-int64 get_nextval_internal(graph_cache_data* graph_cache,
- label_cache_data* label_cache)
+int64 get_nextval_internal(graph_cache_data* graph_cache,
+ label_cache_data* label_cache)
{
Oid obj_seq_id;
char* label_seq_name_str;
label_seq_name_str = NameStr(label_cache->seq_name);
- obj_seq_id = get_relname_relid(label_seq_name_str,
+ obj_seq_id = get_relname_relid(label_seq_name_str,
graph_cache->namespace);
-
+
return nextval_internal(obj_seq_id, true);
}
@@ -77,8 +77,8 @@ PG_FUNCTION_INFO_V1(create_complete_graph);
*/
Datum create_complete_graph(PG_FUNCTION_ARGS)
-{
- Oid graph_id;
+{
+ Oid graph_oid;
Name graph_name;
int64 no_vertices;
@@ -112,7 +112,7 @@ Datum create_complete_graph(PG_FUNCTION_ARGS)
Name edge_seq_name;
char *edge_seq_name_str;
- int64 lid;
+ int64 lid;
if (PG_ARGISNULL(0))
{
@@ -123,16 +123,15 @@ Datum create_complete_graph(PG_FUNCTION_ARGS)
if (PG_ARGISNULL(1))
{
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("number of nodes can not be NULL")));
+ errmsg("number of nodes can not be NULL")));
}
-
+
if (PG_ARGISNULL(2))
{
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("edge label can not be NULL")));
+ errmsg("edge label can not be NULL")));
}
-
graph_name = PG_GETARG_NAME(0);
no_vertices = (int64) PG_GETARG_INT64(1);
edge_label_name = PG_GETARG_NAME(2);
@@ -146,7 +145,7 @@ Datum create_complete_graph(PG_FUNCTION_ARGS)
{
vtx_label_name = PG_GETARG_NAME(3);
vtx_name_str = NameStr(*vtx_label_name);
-
+
// Check if vertex and edge label are same
if (strcmp(vtx_name_str, edge_name_str) == 0)
{
@@ -160,30 +159,32 @@ Datum create_complete_graph(PG_FUNCTION_ARGS)
DirectFunctionCall1(create_graph, CStringGetDatum(graph_name));
}
- graph_id = get_graph_oid(graph_name_str);
+ graph_oid = get_graph_oid(graph_name_str);
-
-
if (!PG_ARGISNULL(3))
{
// Check if label with the input name already exists
- if (!label_exists(vtx_name_str, graph_id))
+ if (!label_exists(vtx_name_str, graph_oid))
{
- DirectFunctionCall2(create_vlabel, CStringGetDatum(graph_name), CStringGetDatum(vtx_label_name));
+ DirectFunctionCall2(create_vlabel,
+ CStringGetDatum(graph_name),
+ CStringGetDatum(vtx_label_name));
}
}
- if (!label_exists(edge_name_str, graph_id))
+ if (!label_exists(edge_name_str, graph_oid))
{
- DirectFunctionCall2(create_elabel, CStringGetDatum(graph_name), CStringGetDatum(edge_label_name));
+ DirectFunctionCall2(create_elabel,
+ CStringGetDatum(graph_name),
+ CStringGetDatum(edge_label_name));
}
- vtx_label_id = get_label_id(vtx_name_str, graph_id);
- edge_label_id = get_label_id(edge_name_str, graph_id);
+ vtx_label_id = get_label_id(vtx_name_str, graph_oid);
+ edge_label_id = get_label_id(edge_name_str, graph_oid);
graph_cache = search_graph_name_cache(graph_name_str);
- vertex_cache = search_label_name_graph_cache(vtx_name_str,graph_id);
- edge_cache = search_label_name_graph_cache(edge_name_str,graph_id);
+ vertex_cache = search_label_name_graph_cache(vtx_name_str, graph_oid);
+ edge_cache = search_label_name_graph_cache(edge_name_str, graph_oid);
nsp_id = graph_cache->namespace;
vtx_seq_name = &(vertex_cache->seq_name);
@@ -195,34 +196,34 @@ Datum create_complete_graph(PG_FUNCTION_ARGS)
vtx_seq_id = get_relname_relid(vtx_seq_name_str, nsp_id);
edge_seq_id = get_relname_relid(edge_seq_name_str, nsp_id);
- props = create_empty_agtype();
-
+ props = create_empty_agtype();
+
/* Creating vertices*/
- for (i=(int64)1;i<=no_vertices;i++)
- {
+ for (i=(int64)1; i<=no_vertices; i++)
+ {
vid = nextval_internal(vtx_seq_id, true);
object_graph_id = make_graphid(vtx_label_id, vid);
- insert_vertex_simple(graph_id,vtx_name_str,object_graph_id,props);
+ insert_vertex_simple(graph_oid, vtx_name_str, object_graph_id, props);
}
lid = vid;
-
+
/* Creating edges*/
- for (i = 1;i<=no_vertices-1;i++)
- {
+ for (i = 1; i<=no_vertices-1; i++)
+ {
start_vid = lid-no_vertices+i;
- for(j=i+1;j<=no_vertices;j++)
- {
+ for(j=i+1; j<=no_vertices; j++)
+ {
end_vid = lid-no_vertices+j;
eid = nextval_internal(edge_seq_id, true);
object_graph_id = make_graphid(edge_label_id, eid);
start_vertex_graph_id = make_graphid(vtx_label_id, start_vid);
end_vertex_graph_id = make_graphid(vtx_label_id, end_vid);
-
- insert_edge_simple(graph_id, edge_name_str,
- object_graph_id, start_vertex_graph_id,
- end_vertex_graph_id, props);
+
+ insert_edge_simple(graph_oid, edge_name_str, object_graph_id,
+ start_vertex_graph_id, end_vertex_graph_id,
+ props);
}
}
PG_RETURN_VOID();
@@ -231,18 +232,18 @@ Datum create_complete_graph(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(age_create_barbell_graph);
-/*
+/*
* The barbell graph is two complete graphs connected by a bridge path
* Syntax:
* ag_catalog.age_create_barbell_graph(graph_name Name,
* m int,
- * n int,
+ * n int,
* vertex_label_name Name DEFAULT = NULL,
* vertex_properties agtype DEFAULT = NULL,
* edge_label_name Name DEFAULT = NULL,
* edge_properties agtype DEFAULT = NULL)
* Input:
- *
+ *
* graph_name - Name of the graph to be created.
* m - number of vertices in one complete graph.
* n - number of vertices in the bridge path.
@@ -250,11 +251,11 @@ PG_FUNCTION_INFO_V1(age_create_barbell_graph);
* vertex_properties - Property values to assign each vertex. Default is NULL
* edge_label_name - Name of the label to assign each edge to.
* edge_properties - Property values to assign each edge. Default is NULL
- *
+ *
* https://en.wikipedia.org/wiki/Barbell_graph
*/
-Datum age_create_barbell_graph(PG_FUNCTION_ARGS)
+Datum age_create_barbell_graph(PG_FUNCTION_ARGS)
{
FunctionCallInfo arguments;
Oid graph_oid;
@@ -270,7 +271,7 @@ Datum age_create_barbell_graph(PG_FUNCTION_ARGS)
Name edge_label_name;
int32 edge_label_id;
char* edge_label_str;
-
+
graphid object_graph_id;
graphid start_node_graph_id;
graphid end_node_graph_id;
@@ -282,13 +283,14 @@ Datum age_create_barbell_graph(PG_FUNCTION_ARGS)
arguments = fcinfo;
- // Checking for possible NULL arguments
+ // Checking for possible NULL arguments
// Name graph_name
if (PG_ARGISNULL(0))
{
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("Graph name cannot be NULL")));
}
+
graph_name = PG_GETARG_NAME(0);
graph_name_str = NameStr(*graph_name);
@@ -298,11 +300,11 @@ Datum age_create_barbell_graph(PG_FUNCTION_ARGS)
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("Graph size cannot be NULL or lower than 3")));
}
-
+
/*
* int64 bridge_size: currently only stays at zero.
* to do: implement bridge with variable number of nodes.
- */
+ */
if (PG_ARGISNULL(2) || PG_GETARG_INT32(2) < 0 )
{
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -310,37 +312,36 @@ Datum age_create_barbell_graph(PG_FUNCTION_ARGS)
}
// node label: if null, gets default label, which is "_ag_label_vertex"
- if (PG_ARGISNULL(3))
+ if (PG_ARGISNULL(3))
{
namestrcpy(node_label_name, AG_DEFAULT_LABEL_VERTEX);
}
- else
+ else
{
node_label_name = PG_GETARG_NAME(3);
}
node_label_str = NameStr(*node_label_name);
- /*
- * Name edge_label
- */
+ /* Name edge_label */
if (PG_ARGISNULL(5))
{
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("edge label can not be NULL")));
}
+
edge_label_name = PG_GETARG_NAME(5);
edge_label_str = NameStr(*edge_label_name);
// create two separate complete graphs
- DirectFunctionCall4(create_complete_graph, arguments->arg[0],
- arguments->arg[1],
- arguments->arg[5],
- arguments->arg[3]);
- DirectFunctionCall4(create_complete_graph, arguments->arg[0],
- arguments->arg[1],
- arguments->arg[5],
- arguments->arg[3]);
+ DirectFunctionCall4(create_complete_graph, arguments->args[0].value,
+ arguments->args[1].value,
+ arguments->args[5].value,
+ arguments->args[3].value);
+ DirectFunctionCall4(create_complete_graph, arguments->args[0].value,
+ arguments->args[1].value,
+ arguments->args[5].value,
+ arguments->args[3].value);
graph_oid = get_graph_oid(graph_name_str);
node_label_id = get_label_id(node_label_str, graph_oid);
@@ -349,13 +350,13 @@ Datum age_create_barbell_graph(PG_FUNCTION_ARGS)
/*
* Fetching caches to get next values for graph id's, and access nodes
* to be connected with edges.
- */
+ */
graph_cache = search_graph_name_cache(graph_name_str);
edge_cache = search_label_name_graph_cache(edge_label_str,graph_oid);
// connect a node from each graph
start_node_index = 1; // first created node, from the first complete graph
- end_node_index = arguments->arg[1]*2; // last created node, second graph
+ end_node_index = arguments->args[1].value*2; // last created node, second graph
// next index to be assigned to a node or edge
nextval = get_nextval_internal(graph_cache, edge_cache);
@@ -370,6 +371,6 @@ Datum age_create_barbell_graph(PG_FUNCTION_ARGS)
insert_edge_simple(graph_oid, edge_label_str,
object_graph_id, start_node_graph_id,
end_node_graph_id, properties);
-
+
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/load/ag_load_edges.c b/src/backend/utils/load/ag_load_edges.c
index 419f0097e..ac4a6775f 100644
--- a/src/backend/utils/load/ag_load_edges.c
+++ b/src/backend/utils/load/ag_load_edges.c
@@ -17,16 +17,11 @@
* under the License.
*/
-#include
-#include
-#include
-#include
-#include
+#include "postgres.h"
-#include "utils/load/csv.h"
#include "utils/load/ag_load_edges.h"
#include "utils/load/age_load.h"
-
+#include "utils/load/csv.h"
void edge_field_cb(void *field, size_t field_len, void *data)
{
@@ -86,7 +81,7 @@ void edge_row_cb(int delim __attribute__((unused)), void *data)
cr->header_len = (size_t* )malloc(sizeof(size_t *) * cr->cur_field);
cr->header = malloc((sizeof (char*) * cr->cur_field));
- for ( i = 0; icur_field; i++)
+ for (i = 0; icur_field; i++)
{
cr->header_len[i] = cr->fields_len[i];
cr->header[i] = strndup(cr->fields[i], cr->header_len[i]);
@@ -97,9 +92,9 @@ void edge_row_cb(int delim __attribute__((unused)), void *data)
object_graph_id = make_graphid(cr->object_id, (int64)cr->row);
start_id_int = strtol(cr->fields[0], NULL, 10);
- start_vertex_type_id = get_label_id(cr->fields[1], cr->graph_id);
+ start_vertex_type_id = get_label_id(cr->fields[1], cr->graph_oid);
end_id_int = strtol(cr->fields[2], NULL, 10);
- end_vertex_type_id = get_label_id(cr->fields[3], cr->graph_id);
+ end_vertex_type_id = get_label_id(cr->fields[3], cr->graph_oid);
start_vertex_graph_id = make_graphid(start_vertex_type_id, start_id_int);
end_vertex_graph_id = make_graphid(end_vertex_type_id, end_id_int);
@@ -107,7 +102,7 @@ void edge_row_cb(int delim __attribute__((unused)), void *data)
props = create_agtype_from_list_i(cr->header, cr->fields,
n_fields, 3);
- insert_edge_simple(cr->graph_id, cr->object_name,
+ insert_edge_simple(cr->graph_oid, cr->object_name,
object_graph_id, start_vertex_graph_id,
end_vertex_graph_id, props);
@@ -155,7 +150,7 @@ static int is_term(unsigned char c)
int create_edges_from_csv_file(char *file_path,
char *graph_name,
- Oid graph_id,
+ Oid graph_oid,
char *object_name,
int object_id )
{
@@ -191,7 +186,7 @@ int create_edges_from_csv_file(char *file_path,
cr.header_row_length = 0;
cr.curr_row_length = 0;
cr.graph_name = graph_name;
- cr.graph_id = graph_id;
+ cr.graph_oid = graph_oid;
cr.object_name = object_name;
cr.object_id = object_id;
diff --git a/src/backend/utils/load/ag_load_labels.c b/src/backend/utils/load/ag_load_labels.c
index c0ddf69bb..27e502815 100644
--- a/src/backend/utils/load/ag_load_labels.c
+++ b/src/backend/utils/load/ag_load_labels.c
@@ -135,7 +135,7 @@ void vertex_row_cb(int delim __attribute__((unused)), void *data)
props = create_agtype_from_list(cr->header, cr->fields,
n_fields, label_id_int);
- insert_vertex_simple(cr->graph_id, cr->object_name,
+ insert_vertex_simple(cr->graph_oid, cr->object_name,
object_graph_id, props);
}
@@ -183,7 +183,7 @@ static int is_term(unsigned char c)
int create_labels_from_csv_file(char *file_path,
char *graph_name,
- Oid graph_id,
+ Oid graph_oid,
char *object_name,
int object_id,
bool id_field_exists)
@@ -221,7 +221,7 @@ int create_labels_from_csv_file(char *file_path,
cr.header_row_length = 0;
cr.curr_row_length = 0;
cr.graph_name = graph_name;
- cr.graph_id = graph_id;
+ cr.graph_oid = graph_oid;
cr.object_name = object_name;
cr.object_id = object_id;
cr.id_field_exists = id_field_exists;
diff --git a/src/backend/utils/load/age_load.c b/src/backend/utils/load/age_load.c
index d9819a301..bc64db325 100644
--- a/src/backend/utils/load/age_load.c
+++ b/src/backend/utils/load/age_load.c
@@ -21,67 +21,45 @@
#include "access/heapam.h"
#include "access/xact.h"
-#include "catalog/dependency.h"
-#include "catalog/namespace.h"
-#include "catalog/objectaddress.h"
-#include "catalog/pg_class_d.h"
-#include "commands/defrem.h"
-#include "commands/sequence.h"
-#include "commands/tablecmds.h"
-#include "miscadmin.h"
-#include "nodes/makefuncs.h"
-#include "nodes/nodes.h"
-#include "nodes/parsenodes.h"
-#include "nodes/pg_list.h"
-#include "nodes/plannodes.h"
-#include "nodes/primnodes.h"
-#include "nodes/value.h"
#include "parser/parse_node.h"
-#include "parser/parser.h"
#include "storage/lockdefs.h"
#include "tcop/dest.h"
-#include "tcop/utility.h"
-#include "utils/acl.h"
#include "utils/builtins.h"
-#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
#include "catalog/ag_graph.h"
#include "catalog/ag_label.h"
-#include "commands/label_commands.h"
-#include "utils/ag_cache.h"
#include "utils/agtype.h"
#include "utils/graphid.h"
-#include "utils/load/age_load.h"
-#include "utils/load/ag_load_labels.h"
#include "utils/load/ag_load_edges.h"
+#include "utils/load/ag_load_labels.h"
+#include "utils/load/age_load.h"
-agtype* create_empty_agtype(void)
+agtype *create_empty_agtype(void)
{
agtype_in_state result;
memset(&result, 0, sizeof(agtype_in_state));
- result.res = push_agtype_value(&result.parse_state,
- WAGT_BEGIN_OBJECT, NULL);
- result.res = push_agtype_value(&result.parse_state,
- WAGT_END_OBJECT, NULL);
+ result.res = push_agtype_value(&result.parse_state, WAGT_BEGIN_OBJECT,
+ NULL);
+ result.res = push_agtype_value(&result.parse_state, WAGT_END_OBJECT, NULL);
return agtype_value_to_agtype(result.res);
}
-agtype* create_agtype_from_list(char **header, char **fields,
- size_t fields_len, int64 vertex_id)
+agtype *create_agtype_from_list(char **header, char **fields, size_t fields_len,
+ int64 vertex_id)
{
agtype_in_state result;
int i;
memset(&result, 0, sizeof(agtype_in_state));
- result.res = push_agtype_value(&result.parse_state,
- WAGT_BEGIN_OBJECT, NULL);
+ result.res = push_agtype_value(&result.parse_state, WAGT_BEGIN_OBJECT,
+ NULL);
result.res = push_agtype_value(&result.parse_state,
WAGT_KEY,
@@ -117,12 +95,13 @@ agtype* create_agtype_from_list_i(char **header, char **fields,
{
return create_empty_agtype();
}
+
memset(&result, 0, sizeof(agtype_in_state));
- result.res = push_agtype_value(&result.parse_state,
- WAGT_BEGIN_OBJECT, NULL);
+ result.res = push_agtype_value(&result.parse_state, WAGT_BEGIN_OBJECT,
+ NULL);
- for (i = start_index; iis_space = f;
}
-
+
void
csv_set_term_func(struct csv_parser *p, int (*f)(unsigned char))
{
@@ -259,7 +259,7 @@ csv_set_realloc_func(struct csv_parser *p, void *(*f)(void *, size_t))
/* Set the realloc function used to increase buffer size */
if (p && f) p->realloc_func = f;
}
-
+
void
csv_set_free_func(struct csv_parser *p, void (*f)(void *))
{
@@ -282,7 +282,7 @@ csv_get_buffer_size(const struct csv_parser *p)
return p->entry_size;
return 0;
}
-
+
static int
csv_increase_buffer(struct csv_parser *p)
{
@@ -291,10 +291,10 @@ csv_increase_buffer(struct csv_parser *p)
if (p == NULL) return 0;
if (p->realloc_func == NULL) return 0;
-
- /* Increase the size of the entry buffer. Attempt to increase size by
+
+ /* Increase the size of the entry buffer. Attempt to increase size by
* p->blk_size, if this is larger than SIZE_MAX try to increase current
- * buffer size to SIZE_MAX. If allocation fails, try to allocate halve
+ * buffer size to SIZE_MAX. If allocation fails, try to allocate halve
* the size and try again until successful or increment size is zero.
*/
@@ -321,7 +321,7 @@ csv_increase_buffer(struct csv_parser *p)
p->entry_size += to_add;
return 0;
}
-
+
size_t
csv_parse(struct csv_parser *p, const void *s, size_t len, void (*cb1)(void *, size_t, void *), void (*cb2)(int c, void *), void *data)
{
@@ -342,7 +342,7 @@ csv_parse(struct csv_parser *p, const void *s, size_t len, void (*cb1)(void *, s
if (!p->entry_buf && pos < len) {
/* Buffer hasn't been allocated yet and len > 0 */
- if (csv_increase_buffer(p) != 0) {
+ if (csv_increase_buffer(p) != 0) {
p->quoted = quoted, p->pstate = pstate, p->spaces = spaces, p->entry_pos = entry_pos;
return pos;
}
@@ -367,7 +367,7 @@ csv_parse(struct csv_parser *p, const void *s, size_t len, void (*cb1)(void *, s
} else if (is_term ? is_term(c) : c == CSV_CR || c == CSV_LF) { /* Carriage Return or Line Feed */
if (pstate == FIELD_NOT_BEGUN) {
SUBMIT_FIELD(p);
- SUBMIT_ROW(p, c);
+ SUBMIT_ROW(p, c);
} else { /* ROW_NOT_BEGUN */
/* Don't submit empty rows by default */
if (p->options & CSV_REPALL_NL) {
diff --git a/src/include/catalog/ag_graph.h b/src/include/catalog/ag_graph.h
index 98932023f..3704f9c01 100644
--- a/src/include/catalog/ag_graph.h
+++ b/src/include/catalog/ag_graph.h
@@ -24,21 +24,22 @@
#include "catalog/ag_catalog.h"
-#define Anum_ag_graph_name 1
-#define Anum_ag_graph_namespace 2
+#define Anum_ag_graph_oid 1
+#define Anum_ag_graph_name 2
+#define Anum_ag_graph_namespace 3
-#define Natts_ag_graph 2
+#define Natts_ag_graph 3
#define ag_graph_relation_id() ag_relation_id("ag_graph", "table")
#define ag_graph_name_index_id() ag_relation_id("ag_graph_name_index", "index")
#define ag_graph_namespace_index_id() \
ag_relation_id("ag_graph_namespace_index", "index")
-Oid insert_graph(const Name graph_name, const Oid nsp_id);
+void insert_graph(const Name graph_name, const Oid nsp_id);
void delete_graph(const Name graph_name);
void update_graph_name(const Name graph_name, const Name new_name);
-Oid get_graph_oid(const char *graph_name);
+uint32 get_graph_oid(const char *graph_name);
char *get_graph_namespace_name(const char *graph_name);
List *get_graphnames(void);
diff --git a/src/include/catalog/ag_label.h b/src/include/catalog/ag_label.h
index 39ab5c975..e1e90f3f8 100644
--- a/src/include/catalog/ag_label.h
+++ b/src/include/catalog/ag_label.h
@@ -55,11 +55,10 @@
#define Natts_ag_label 6
#define ag_label_relation_id() ag_relation_id("ag_label", "table")
-#define ag_label_oid_index_id() ag_relation_id("ag_label_oid_index", "index")
#define ag_label_name_graph_index_id() \
ag_relation_id("ag_label_name_graph_index", "index")
-#define ag_label_graph_id_index_id() \
- ag_relation_id("ag_label_graph_id_index", "index")
+#define ag_label_graph_oid_index_id() \
+ ag_relation_id("ag_label_graph_oid_index", "index")
#define ag_label_relation_index_id() \
ag_relation_id("ag_label_relation_index", "index")
#define ag_label_seq_name_graph_index_id() \
@@ -70,22 +69,23 @@
#define LABEL_KIND_VERTEX 'v'
#define LABEL_KIND_EDGE 'e'
-Oid insert_label(const char *label_name, Oid label_graph, int32 label_id,
- char label_kind, Oid label_relation, const char *seq_name);
+void insert_label(const char *label_name, Oid graph_oid, int32 label_id,
+ char label_kind, Oid label_relation, const char *seq_name);
void delete_label(Oid relation);
+int32 get_label_id(const char *label_name, Oid graph_oid);
+Oid get_label_relation(const char *label_name, Oid graph_oid);
+char *get_label_relation_name(const char *label_name, Oid graph_oid);
Oid get_label_oid(const char *label_name, Oid label_graph);
-int32 get_label_id(const char *label_name, Oid label_graph);
-Oid get_label_relation(const char *label_name, Oid label_graph);
-char *get_label_relation_name(const char *label_name, Oid label_graph);
char get_label_kind(const char *label_name, Oid label_graph);
-bool label_id_exists(Oid label_graph, int32 label_id);
-RangeVar *get_label_range_var(char *graph_name, Oid graph_oid, char *label_name);
+bool label_id_exists(Oid graph_oid, int32 label_id);
+RangeVar *get_label_range_var(char *graph_name, Oid graph_oid,
+ char *label_name);
List *get_all_edge_labels_per_graph(EState *estate, Oid graph_oid);
#define label_exists(label_name, label_graph) \
- OidIsValid(get_label_oid(label_name, label_graph))
+ OidIsValid(get_label_id(label_name, label_graph))
#endif
diff --git a/src/include/commands/label_commands.h b/src/include/commands/label_commands.h
index 711a56453..6bbc814cc 100644
--- a/src/include/commands/label_commands.h
+++ b/src/include/commands/label_commands.h
@@ -54,12 +54,11 @@
#define IS_AG_DEFAULT_LABEL(x) \
(IS_DEFAULT_LABEL_EDGE(x) || IS_DEFAULT_LABEL_VERTEX(x))
+void create_label(char *graph_name, char *label_name, char label_type,
+ List *parents);
Datum create_vlabel(PG_FUNCTION_ARGS);
Datum create_elabel(PG_FUNCTION_ARGS);
-Oid create_label(char *graph_name, char *label_name, char label_type,
- List *parents);
-
#endif
diff --git a/src/include/executor/cypher_utils.h b/src/include/executor/cypher_utils.h
index 469b729bb..58dd8fa9c 100644
--- a/src/include/executor/cypher_utils.h
+++ b/src/include/executor/cypher_utils.h
@@ -20,6 +20,9 @@
#ifndef AG_CYPHER_UTILS_H
#define AG_CYPHER_UTILS_H
+#include "access/heapam.h"
+#include "access/table.h"
+#include "access/tableam.h"
#include "nodes/execnodes.h"
#include "nodes/extensible.h"
#include "nodes/nodes.h"
diff --git a/src/include/nodes/cypher_nodes.h b/src/include/nodes/cypher_nodes.h
index d6d8c02dd..64318eb5d 100644
--- a/src/include/nodes/cypher_nodes.h
+++ b/src/include/nodes/cypher_nodes.h
@@ -228,7 +228,7 @@ typedef struct cypher_create_target_nodes
ExtensibleNode extensible;
List *paths;
uint32 flags;
- Oid graph_oid;
+ uint32 graph_oid;
} cypher_create_target_nodes;
typedef struct cypher_create_path
@@ -362,7 +362,7 @@ typedef struct cypher_update_information
{
ExtensibleNode extensible;
List *set_items;
- int flags;
+ uint32 flags;
AttrNumber tuple_position;
char *graph_name;
char *clause_name;
@@ -384,9 +384,9 @@ typedef struct cypher_delete_information
{
ExtensibleNode extensible;
List *delete_items;
- int flags;
+ uint32 flags;
char *graph_name;
- Oid graph_oid;
+ uint32 graph_oid;
bool detach;
} cypher_delete_information;
@@ -400,8 +400,8 @@ typedef struct cypher_delete_item
typedef struct cypher_merge_information
{
ExtensibleNode extensible;
- int flags;
- Oid graph_oid;
+ uint32 flags;
+ uint32 graph_oid;
AttrNumber merge_function_attr;
cypher_create_path *path;
} cypher_merge_information;
diff --git a/src/include/optimizer/cypher_createplan.h b/src/include/optimizer/cypher_createplan.h
index 50a86e158..e42e20b71 100644
--- a/src/include/optimizer/cypher_createplan.h
+++ b/src/include/optimizer/cypher_createplan.h
@@ -22,7 +22,6 @@
#include "nodes/pg_list.h"
#include "nodes/plannodes.h"
-#include "nodes/relation.h"
Plan *plan_cypher_create_path(PlannerInfo *root, RelOptInfo *rel,
CustomPath *best_path, List *tlist,
diff --git a/src/include/optimizer/cypher_pathnode.h b/src/include/optimizer/cypher_pathnode.h
index 638268b7e..22404f259 100644
--- a/src/include/optimizer/cypher_pathnode.h
+++ b/src/include/optimizer/cypher_pathnode.h
@@ -21,7 +21,6 @@
#define AG_CYPHER_PATHNODE_H
#include "nodes/pg_list.h"
-#include "nodes/relation.h"
#define CREATE_PATH_NAME "Cypher Create"
#define SET_PATH_NAME "Cypher Set"
diff --git a/src/include/parser/.gitignore b/src/include/parser/.gitignore
index 891bb4cb1..a9f39a00c 100644
--- a/src/include/parser/.gitignore
+++ b/src/include/parser/.gitignore
@@ -1 +1,2 @@
cypher_gram_def.h
+cypher_kwlist_d.h
diff --git a/src/include/parser/cypher_keywords.h b/src/include/parser/cypher_keywords.h
index 8d1bbb426..d578dba8d 100644
--- a/src/include/parser/cypher_keywords.h
+++ b/src/include/parser/cypher_keywords.h
@@ -20,9 +20,10 @@
#ifndef AG_KEYWORDS_H
#define AG_KEYWORDS_H
-#include "common/keywords.h"
+#include "common/kwlookup.h"
-extern const ScanKeyword cypher_keywords[];
-extern const int num_cypher_keywords;
+extern const ScanKeywordList CypherKeyword;
+extern const uint16 CypherKeywordTokens[];
+extern const uint16 CypherKeywordCategories[];
#endif
diff --git a/src/include/parser/cypher_kwlist.h b/src/include/parser/cypher_kwlist.h
new file mode 100644
index 000000000..d15a0e34a
--- /dev/null
+++ b/src/include/parser/cypher_kwlist.h
@@ -0,0 +1,48 @@
+PG_KEYWORD("all", ALL, RESERVED_KEYWORD)
+PG_KEYWORD("analyze", ANALYZE, RESERVED_KEYWORD)
+PG_KEYWORD("and", AND, RESERVED_KEYWORD)
+PG_KEYWORD("as", AS, RESERVED_KEYWORD)
+PG_KEYWORD("asc", ASC, RESERVED_KEYWORD)
+PG_KEYWORD("ascending", ASCENDING, RESERVED_KEYWORD)
+PG_KEYWORD("by", BY, RESERVED_KEYWORD)
+PG_KEYWORD("call", CALL, RESERVED_KEYWORD)
+PG_KEYWORD("case", CASE, RESERVED_KEYWORD)
+PG_KEYWORD("coalesce", COALESCE, RESERVED_KEYWORD)
+PG_KEYWORD("contains", CONTAINS, RESERVED_KEYWORD)
+PG_KEYWORD("create", CREATE, RESERVED_KEYWORD)
+PG_KEYWORD("delete", DELETE, RESERVED_KEYWORD)
+PG_KEYWORD("desc", DESC, RESERVED_KEYWORD)
+PG_KEYWORD("descending", DESCENDING, RESERVED_KEYWORD)
+PG_KEYWORD("detach", DETACH, RESERVED_KEYWORD)
+PG_KEYWORD("distinct", DISTINCT, RESERVED_KEYWORD)
+PG_KEYWORD("else", ELSE, RESERVED_KEYWORD)
+PG_KEYWORD("end", END_P, RESERVED_KEYWORD)
+PG_KEYWORD("ends", ENDS, RESERVED_KEYWORD)
+PG_KEYWORD("exists", EXISTS, RESERVED_KEYWORD)
+PG_KEYWORD("explain", EXPLAIN, RESERVED_KEYWORD)
+PG_KEYWORD("false", FALSE_P, RESERVED_KEYWORD)
+PG_KEYWORD("in", IN, RESERVED_KEYWORD)
+PG_KEYWORD("is", IS, RESERVED_KEYWORD)
+PG_KEYWORD("limit", LIMIT, RESERVED_KEYWORD)
+PG_KEYWORD("match", MATCH, RESERVED_KEYWORD)
+PG_KEYWORD("merge", MERGE, RESERVED_KEYWORD)
+PG_KEYWORD("not", NOT, RESERVED_KEYWORD)
+PG_KEYWORD("null", NULL_P, RESERVED_KEYWORD)
+PG_KEYWORD("optional", OPTIONAL, RESERVED_KEYWORD)
+PG_KEYWORD("or", OR, RESERVED_KEYWORD)
+PG_KEYWORD("order", ORDER, RESERVED_KEYWORD)
+PG_KEYWORD("remove", REMOVE, RESERVED_KEYWORD)
+PG_KEYWORD("return", RETURN, RESERVED_KEYWORD)
+PG_KEYWORD("set", SET, RESERVED_KEYWORD)
+PG_KEYWORD("skip", SKIP, RESERVED_KEYWORD)
+PG_KEYWORD("starts", STARTS, RESERVED_KEYWORD)
+PG_KEYWORD("then", THEN, RESERVED_KEYWORD)
+PG_KEYWORD("true", TRUE_P, RESERVED_KEYWORD)
+PG_KEYWORD("union", UNION, RESERVED_KEYWORD)
+PG_KEYWORD("unwind", UNWIND, RESERVED_KEYWORD)
+PG_KEYWORD("verbose", VERBOSE, RESERVED_KEYWORD)
+PG_KEYWORD("when", WHEN, RESERVED_KEYWORD)
+PG_KEYWORD("where", WHERE, RESERVED_KEYWORD)
+PG_KEYWORD("with", WITH, RESERVED_KEYWORD)
+PG_KEYWORD("xor", XOR, RESERVED_KEYWORD)
+PG_KEYWORD("yield", YIELD, RESERVED_KEYWORD)
\ No newline at end of file
diff --git a/src/include/parser/cypher_parse_node.h b/src/include/parser/cypher_parse_node.h
index c0d33f887..5a9720d7e 100644
--- a/src/include/parser/cypher_parse_node.h
+++ b/src/include/parser/cypher_parse_node.h
@@ -38,7 +38,7 @@ typedef struct cypher_parsestate
{
ParseState pstate;
char *graph_name;
- Oid graph_oid;
+ uint32 graph_oid;
Param *params;
int default_alias_num;
List *entities;
diff --git a/src/include/utils/ag_cache.h b/src/include/utils/ag_cache.h
index a368c26ba..8775da08f 100644
--- a/src/include/utils/ag_cache.h
+++ b/src/include/utils/ag_cache.h
@@ -33,7 +33,6 @@ typedef struct graph_cache_data
// label_cache_data contains the same fields that ag_label catalog table has
typedef struct label_cache_data
{
- Oid oid;
NameData name;
Oid graph;
int32 id;
@@ -47,7 +46,7 @@ graph_cache_data *search_graph_name_cache(const char *name);
graph_cache_data *search_graph_namespace_cache(Oid namespace);
label_cache_data *search_label_oid_cache(Oid oid);
label_cache_data *search_label_name_graph_cache(const char *name, Oid graph);
-label_cache_data *search_label_graph_id_cache(Oid graph, int32 id);
+label_cache_data *search_label_graph_oid_cache(Oid graph, int32 id);
label_cache_data *search_label_relation_cache(Oid relation);
label_cache_data *search_label_seq_name_graph_cache(const char *name, Oid graph);
diff --git a/src/include/utils/age_graphid_ds.h b/src/include/utils/age_graphid_ds.h
index cfacfc1db..ea9dabdc3 100644
--- a/src/include/utils/age_graphid_ds.h
+++ b/src/include/utils/age_graphid_ds.h
@@ -20,6 +20,8 @@
#ifndef AG_AGE_GRAPHID_DS_H
#define AG_AGE_GRAPHID_DS_H
+#include "utils/graphid.h"
+
#define IS_GRAPHID_STACK_EMPTY(stack) \
get_stack_size(stack) == 0
#define PEEK_GRAPHID_STACK(stack) \
diff --git a/src/include/utils/agtype.h b/src/include/utils/agtype.h
index 4433ef533..d8975f16f 100644
--- a/src/include/utils/agtype.h
+++ b/src/include/utils/agtype.h
@@ -31,8 +31,8 @@
#ifndef AG_AGTYPE_H
#define AG_AGTYPE_H
-#include "fmgr.h"
#include "access/htup_details.h"
+#include "fmgr.h"
#include "lib/stringinfo.h"
#include "nodes/pg_list.h"
#include "utils/array.h"
@@ -40,6 +40,7 @@
#include "utils/syscache.h"
#include "catalog/ag_namespace.h"
+#include "catalog/pg_type.h"
#include "utils/graphid.h"
/* Tokens used when sequentially processing an agtype value */
diff --git a/src/include/utils/graphid.h b/src/include/utils/graphid.h
index 103977785..999661cf2 100644
--- a/src/include/utils/graphid.h
+++ b/src/include/utils/graphid.h
@@ -27,6 +27,7 @@
#include "utils/syscache.h"
#include "catalog/ag_namespace.h"
+#include "catalog/pg_type.h"
typedef int64 graphid;
#define F_GRAPHIDEQ F_INT8EQ
diff --git a/src/include/utils/load/ag_load_edges.h b/src/include/utils/load/ag_load_edges.h
index eb5463ed8..3f4ffa8f9 100644
--- a/src/include/utils/load/ag_load_edges.h
+++ b/src/include/utils/load/ag_load_edges.h
@@ -20,11 +20,10 @@
#ifndef AG_LOAD_EDGES_H
#define AG_LOAD_EDGES_H
-
-#include
-#include
#include
+#include
#include
+#include
#include
@@ -80,7 +79,7 @@ typedef struct {
size_t header_row_length;
size_t curr_row_length;
char *graph_name;
- Oid graph_id;
+ Oid graph_oid;
char *object_name;
int object_id;
char *start_vertex;
@@ -92,7 +91,7 @@ typedef struct {
void edge_field_cb(void *field, size_t field_len, void *data);
void edge_row_cb(int delim __attribute__((unused)), void *data);
-int create_edges_from_csv_file(char *file_path, char *graph_name, Oid graph_id,
+int create_edges_from_csv_file(char *file_path, char *graph_name, Oid graph_oid,
char *object_name, int object_id );
#endif //AG_LOAD_EDGES_H
diff --git a/src/include/utils/load/ag_load_labels.h b/src/include/utils/load/ag_load_labels.h
index 5689c23db..8bf24c246 100644
--- a/src/include/utils/load/ag_load_labels.h
+++ b/src/include/utils/load/ag_load_labels.h
@@ -21,11 +21,10 @@
#ifndef AG_LOAD_LABELS_H
#define AG_LOAD_LABELS_H
-
-#include
-#include
#include
+#include
#include
+#include
#include
#include "postgres.h"
@@ -65,7 +64,6 @@
#include "utils/agtype.h"
#include "utils/graphid.h"
-
#define AGE_VERTIX 1
#define AGE_EDGE 2
@@ -89,7 +87,7 @@ typedef struct {
size_t header_row_length;
size_t curr_row_length;
char *graph_name;
- Oid graph_id;
+ Oid graph_oid;
char *object_name;
int object_id;
bool id_field_exists;
@@ -99,7 +97,7 @@ typedef struct {
void vertex_field_cb(void *field, size_t field_len, void *data);
void vertex_row_cb(int delim __attribute__((unused)), void *data);
-int create_labels_from_csv_file(char *file_path, char *graph_name, Oid graph_id,
+int create_labels_from_csv_file(char *file_path, char *graph_name, Oid graph_oid,
char *object_name, int object_id,
bool id_field_exists);
diff --git a/src/include/utils/load/age_load.h b/src/include/utils/load/age_load.h
index 49eff48de..9034ee133 100644
--- a/src/include/utils/load/age_load.h
+++ b/src/include/utils/load/age_load.h
@@ -63,9 +63,9 @@ agtype* create_agtype_from_list(char **header, char **fields,
size_t fields_len, int64 vertex_id);
agtype* create_agtype_from_list_i(char **header, char **fields,
size_t fields_len, size_t start_index);
-void insert_vertex_simple(Oid graph_id, char* label_name, graphid vertex_id,
- agtype* vertex_properties);
-void insert_edge_simple(Oid graph_id, char* label_name, graphid edge_id,
+void insert_vertex_simple(Oid graph_oid, char *label_name, graphid vertex_id,
+ agtype *vertex_properties);
+void insert_edge_simple(Oid graph_oid, char *label_name, graphid edge_id,
graphid start_id, graphid end_id,
agtype* end_properties);
diff --git a/tools/PerfectHash.pm b/tools/PerfectHash.pm
new file mode 100644
index 000000000..5a04f7e95
--- /dev/null
+++ b/tools/PerfectHash.pm
@@ -0,0 +1,399 @@
+#
+# For PostgreSQL Database Management System:
+# (formerly known as Postgres, then as Postgres95)
+#
+# Portions Copyright (c) 1996-2010, The PostgreSQL Global Development Group
+#
+# Portions Copyright (c) 1994, The Regents of the University of California
+#
+# Permission to use, copy, modify, and distribute this software and its documentation for any purpose,
+# without fee, and without a written agreement is hereby granted, provided that the above copyright notice
+# and this paragraph and the following two paragraphs appear in all copies.
+#
+# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT,
+# INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS,
+# ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY
+# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
+# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+#
+# THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA
+# HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+#
+#----------------------------------------------------------------------
+#
+# PerfectHash.pm
+# Perl module that constructs minimal perfect hash functions
+#
+# This code constructs a minimal perfect hash function for the given
+# set of keys, using an algorithm described in
+# "An optimal algorithm for generating minimal perfect hash functions"
+# by Czech, Havas and Majewski in Information Processing Letters,
+# 43(5):256-264, October 1992.
+# This implementation is loosely based on NetBSD's "nbperf",
+# which was written by Joerg Sonnenberger.
+#
+# The resulting hash function is perfect in the sense that if the presented
+# key is one of the original set, it will return the key's index in the set
+# (in range 0..N-1). However, the caller must still verify the match,
+# as false positives are possible. Also, the hash function may return
+# values that are out of range (negative or >= N), due to summing unrelated
+# hashtable entries. This indicates that the presented key is definitely
+# not in the set.
+#
+#
+# Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
+# Portions Copyright (c) 1994, Regents of the University of California
+#
+# src/tools/PerfectHash.pm
+#
+#----------------------------------------------------------------------
+
+package PerfectHash;
+
+use strict;
+use warnings;
+
+
+# At runtime, we'll compute two simple hash functions of the input key,
+# and use them to index into a mapping table. The hash functions are just
+# multiply-and-add in uint32 arithmetic, with different multipliers and
+# initial seeds. All the complexity in this module is concerned with
+# selecting hash parameters that will work and building the mapping table.
+
+# We support making case-insensitive hash functions, though this only
+# works for a strict-ASCII interpretation of case insensitivity,
+# ie, A-Z maps onto a-z and nothing else.
+my $case_fold = 0;
+
+
+#
+# Construct a C function implementing a perfect hash for the given keys.
+# The C function definition is returned as a string.
+#
+# The keys should be passed as an array reference. They can be any set
+# of Perl strings; it is caller's responsibility that there not be any
+# duplicates. (Note that the "strings" can be binary data, but hashing
+# e.g. OIDs has endianness hazards that callers must overcome.)
+#
+# The name to use for the function is specified as the second argument.
+# It will be a global function by default, but the caller may prepend
+# "static " to the result string if it wants a static function.
+#
+# Additional options can be specified as keyword-style arguments:
+#
+# case_fold => bool
+# If specified as true, the hash function is case-insensitive, for the
+# limited idea of case-insensitivity explained above.
+#
+# fixed_key_length => N
+# If specified, all keys are assumed to have length N bytes, and the
+# hash function signature will be just "int f(const void *key)"
+# rather than "int f(const void *key, size_t keylen)".
+#
+sub generate_hash_function
+{
+ my ($keys_ref, $funcname, %options) = @_;
+
+ # It's not worth passing this around as a parameter; just use a global.
+ $case_fold = $options{case_fold} || 0;
+
+ # Try different hash function parameters until we find a set that works
+ # for these keys. The multipliers are chosen to be primes that are cheap
+ # to calculate via shift-and-add, so don't change them without care.
+ # (Commonly, random seeds are tried, but we want reproducible results
+ # from this program so we don't do that.)
+ my $hash_mult1 = 31;
+ my $hash_mult2;
+ my $hash_seed1;
+ my $hash_seed2;
+ my @subresult;
+ FIND_PARAMS:
+ foreach (127, 257, 521, 1033, 2053)
+ {
+ $hash_mult2 = $_; # "foreach $hash_mult2" doesn't work
+ for ($hash_seed1 = 0; $hash_seed1 < 10; $hash_seed1++)
+ {
+ for ($hash_seed2 = 0; $hash_seed2 < 10; $hash_seed2++)
+ {
+ @subresult = _construct_hash_table(
+ $keys_ref, $hash_mult1, $hash_mult2,
+ $hash_seed1, $hash_seed2);
+ last FIND_PARAMS if @subresult;
+ }
+ }
+ }
+
+ # Choke if we couldn't find a workable set of parameters.
+ die "failed to generate perfect hash" if !@subresult;
+
+ # Extract info from _construct_hash_table's result array.
+ my $elemtype = $subresult[0];
+ my @hashtab = @{ $subresult[1] };
+ my $nhash = scalar(@hashtab);
+
+ # OK, construct the hash function definition including the hash table.
+ my $f = '';
+ $f .= sprintf "int\n";
+ if (defined $options{fixed_key_length})
+ {
+ $f .= sprintf "%s(const void *key)\n{\n", $funcname;
+ }
+ else
+ {
+ $f .= sprintf "%s(const void *key, size_t keylen)\n{\n", $funcname;
+ }
+ $f .= sprintf "\tstatic const %s h[%d] = {\n", $elemtype, $nhash;
+ for (my $i = 0; $i < $nhash; $i++)
+ {
+ $f .= sprintf "%s%6d,%s",
+ ($i % 8 == 0 ? "\t\t" : " "),
+ $hashtab[$i],
+ ($i % 8 == 7 ? "\n" : "");
+ }
+ $f .= sprintf "\n" if ($nhash % 8 != 0);
+ $f .= sprintf "\t};\n\n";
+ $f .= sprintf "\tconst unsigned char *k = (const unsigned char *) key;\n";
+ $f .= sprintf "\tsize_t\t\tkeylen = %d;\n", $options{fixed_key_length}
+ if (defined $options{fixed_key_length});
+ $f .= sprintf "\tuint32\t\ta = %d;\n", $hash_seed1;
+ $f .= sprintf "\tuint32\t\tb = %d;\n\n", $hash_seed2;
+ $f .= sprintf "\twhile (keylen--)\n\t{\n";
+ $f .= sprintf "\t\tunsigned char c = *k++";
+ $f .= sprintf " | 0x20" if $case_fold; # see comment below
+ $f .= sprintf ";\n\n";
+ $f .= sprintf "\t\ta = a * %d + c;\n", $hash_mult1;
+ $f .= sprintf "\t\tb = b * %d + c;\n", $hash_mult2;
+ $f .= sprintf "\t}\n";
+ $f .= sprintf "\treturn h[a %% %d] + h[b %% %d];\n", $nhash, $nhash;
+ $f .= sprintf "}\n";
+
+ return $f;
+}
+
+
+# Calculate a hash function as the run-time code will do.
+#
+# If we are making a case-insensitive hash function, we implement that
+# by OR'ing 0x20 into each byte of the key. This correctly transforms
+# upper-case ASCII into lower-case ASCII, while not changing digits or
+# dollar signs. (It does change '_', as well as other characters not
+# likely to appear in keywords; this has little effect on the hash's
+# ability to discriminate keywords.)
+sub _calc_hash
+{
+ my ($key, $mult, $seed) = @_;
+
+ my $result = $seed;
+ for my $c (split //, $key)
+ {
+ my $cn = ord($c);
+ $cn |= 0x20 if $case_fold;
+ $result = ($result * $mult + $cn) % 4294967296;
+ }
+ return $result;
+}
+
+
+# Attempt to construct a mapping table for a minimal perfect hash function
+# for the given keys, using the specified hash parameters.
+#
+# Returns an array containing the mapping table element type name as the
+# first element, and a ref to an array of the table values as the second.
+#
+# Returns an empty array on failure; then caller should choose different
+# hash parameter(s) and try again.
+sub _construct_hash_table
+{
+ my ($keys_ref, $hash_mult1, $hash_mult2, $hash_seed1, $hash_seed2) = @_;
+ my @keys = @{$keys_ref};
+
+ # This algorithm is based on a graph whose edges correspond to the
+ # keys and whose vertices correspond to entries of the mapping table.
+ # A key's edge links the two vertices whose indexes are the outputs of
+ # the two hash functions for that key. For K keys, the mapping
+ # table must have at least 2*K+1 entries, guaranteeing that there's at
+ # least one unused entry. (In principle, larger mapping tables make it
+ # easier to find a workable hash and increase the number of inputs that
+ # can be rejected due to touching unused hashtable entries. In practice,
+ # neither effect seems strong enough to justify using a larger table.)
+ my $nedges = scalar @keys; # number of edges
+ my $nverts = 2 * $nedges + 1; # number of vertices
+
+ # However, it would be very bad if $nverts were exactly equal to either
+ # $hash_mult1 or $hash_mult2: effectively, that hash function would be
+ # sensitive to only the last byte of each key. Cases where $nverts is a
+ # multiple of either multiplier likewise lose information. (But $nverts
+ # can't actually divide them, if they've been intelligently chosen as
+ # primes.) We can avoid such problems by adjusting the table size.
+ while ($nverts % $hash_mult1 == 0
+ || $nverts % $hash_mult2 == 0)
+ {
+ $nverts++;
+ }
+
+ # Initialize the array of edges.
+ my @E = ();
+ foreach my $kw (@keys)
+ {
+ # Calculate hashes for this key.
+ # The hashes are immediately reduced modulo the mapping table size.
+ my $hash1 = _calc_hash($kw, $hash_mult1, $hash_seed1) % $nverts;
+ my $hash2 = _calc_hash($kw, $hash_mult2, $hash_seed2) % $nverts;
+
+ # If the two hashes are the same for any key, we have to fail
+ # since this edge would itself form a cycle in the graph.
+ return () if $hash1 == $hash2;
+
+ # Add the edge for this key.
+ push @E, { left => $hash1, right => $hash2 };
+ }
+
+ # Initialize the array of vertices, giving them all empty lists
+ # of associated edges. (The lists will be hashes of edge numbers.)
+ my @V = ();
+ for (my $v = 0; $v < $nverts; $v++)
+ {
+ push @V, { edges => {} };
+ }
+
+ # Insert each edge in the lists of edges connected to its vertices.
+ for (my $e = 0; $e < $nedges; $e++)
+ {
+ my $v = $E[$e]{left};
+ $V[$v]{edges}->{$e} = 1;
+
+ $v = $E[$e]{right};
+ $V[$v]{edges}->{$e} = 1;
+ }
+
+ # Now we attempt to prove the graph acyclic.
+ # A cycle-free graph is either empty or has some vertex of degree 1.
+ # Removing the edge attached to that vertex doesn't change this property,
+ # so doing that repeatedly will reduce the size of the graph.
+ # If the graph is empty at the end of the process, it was acyclic.
+ # We track the order of edge removal so that the next phase can process
+ # them in reverse order of removal.
+ my @output_order = ();
+
+ # Consider each vertex as a possible starting point for edge-removal.
+ for (my $startv = 0; $startv < $nverts; $startv++)
+ {
+ my $v = $startv;
+
+ # If vertex v is of degree 1 (i.e. exactly 1 edge connects to it),
+ # remove that edge, and then consider the edge's other vertex to see
+ # if it is now of degree 1. The inner loop repeats until reaching a
+ # vertex not of degree 1.
+ while (scalar(keys(%{ $V[$v]{edges} })) == 1)
+ {
+ # Unlink its only edge.
+ my $e = (keys(%{ $V[$v]{edges} }))[0];
+ delete($V[$v]{edges}->{$e});
+
+ # Unlink the edge from its other vertex, too.
+ my $v2 = $E[$e]{left};
+ $v2 = $E[$e]{right} if ($v2 == $v);
+ delete($V[$v2]{edges}->{$e});
+
+ # Push e onto the front of the output-order list.
+ unshift @output_order, $e;
+
+ # Consider v2 on next iteration of inner loop.
+ $v = $v2;
+ }
+ }
+
+ # We succeeded only if all edges were removed from the graph.
+ return () if (scalar(@output_order) != $nedges);
+
+ # OK, build the hash table of size $nverts.
+ my @hashtab = (0) x $nverts;
+ # We need a "visited" flag array in this step, too.
+ my @visited = (0) x $nverts;
+
+ # The goal is that for any key, the sum of the hash table entries for
+ # its first and second hash values is the desired output (i.e., the key
+ # number). By assigning hash table values in the selected edge order,
+ # we can guarantee that that's true. This works because the edge first
+ # removed from the graph (and hence last to be visited here) must have
+ # at least one vertex it shared with no other edge; hence it will have at
+ # least one vertex (hashtable entry) still unvisited when we reach it here,
+ # and we can assign that unvisited entry a value that makes the sum come
+ # out as we wish. By induction, the same holds for all the other edges.
+ foreach my $e (@output_order)
+ {
+ my $l = $E[$e]{left};
+ my $r = $E[$e]{right};
+ if (!$visited[$l])
+ {
+ # $hashtab[$r] might be zero, or some previously assigned value.
+ $hashtab[$l] = $e - $hashtab[$r];
+ }
+ else
+ {
+ die "oops, doubly used hashtab entry" if $visited[$r];
+ # $hashtab[$l] might be zero, or some previously assigned value.
+ $hashtab[$r] = $e - $hashtab[$l];
+ }
+ # Now freeze both of these hashtab entries.
+ $visited[$l] = 1;
+ $visited[$r] = 1;
+ }
+
+ # Detect range of values needed in hash table.
+ my $hmin = $nedges;
+ my $hmax = 0;
+ for (my $v = 0; $v < $nverts; $v++)
+ {
+ $hmin = $hashtab[$v] if $hashtab[$v] < $hmin;
+ $hmax = $hashtab[$v] if $hashtab[$v] > $hmax;
+ }
+
+ # Choose width of hashtable entries. In addition to the actual values,
+ # we need to be able to store a flag for unused entries, and we wish to
+ # have the property that adding any other entry value to the flag gives
+ # an out-of-range result (>= $nedges).
+ my $elemtype;
+ my $unused_flag;
+
+ if ( $hmin >= -0x7F
+ && $hmax <= 0x7F
+ && $hmin + 0x7F >= $nedges)
+ {
+ # int8 will work
+ $elemtype = 'int8';
+ $unused_flag = 0x7F;
+ }
+ elsif ($hmin >= -0x7FFF
+ && $hmax <= 0x7FFF
+ && $hmin + 0x7FFF >= $nedges)
+ {
+ # int16 will work
+ $elemtype = 'int16';
+ $unused_flag = 0x7FFF;
+ }
+ elsif ($hmin >= -0x7FFFFFFF
+ && $hmax <= 0x7FFFFFFF
+ && $hmin + 0x3FFFFFFF >= $nedges)
+ {
+ # int32 will work
+ $elemtype = 'int32';
+ $unused_flag = 0x3FFFFFFF;
+ }
+ else
+ {
+ die "hash table values too wide";
+ }
+
+ # Set any unvisited hashtable entries to $unused_flag.
+ for (my $v = 0; $v < $nverts; $v++)
+ {
+ $hashtab[$v] = $unused_flag if !$visited[$v];
+ }
+
+ return ($elemtype, \@hashtab);
+}
+
+1;
diff --git a/tools/gen_keywordlist.pl b/tools/gen_keywordlist.pl
new file mode 100755
index 000000000..499300433
--- /dev/null
+++ b/tools/gen_keywordlist.pl
@@ -0,0 +1,221 @@
+#
+# For PostgreSQL Database Management System:
+# (formerly known as Postgres, then as Postgres95)
+#
+# Portions Copyright (c) 1996-2010, The PostgreSQL Global Development Group
+#
+# Portions Copyright (c) 1994, The Regents of the University of California
+#
+# Permission to use, copy, modify, and distribute this software and its documentation for any purpose,
+# without fee, and without a written agreement is hereby granted, provided that the above copyright notice
+# and this paragraph and the following two paragraphs appear in all copies.
+#
+# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT,
+# INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS,
+# ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY
+# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
+# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+#
+# THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA
+# HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+#
+#----------------------------------------------------------------------
+#
+# gen_keywordlist.pl
+# Perl script that transforms a list of keywords into a ScanKeywordList
+# data structure that can be passed to ScanKeywordLookup().
+#
+# The input is a C header file containing a series of macro calls
+# PG_KEYWORD("keyword", ...)
+# Lines not starting with PG_KEYWORD are ignored. The keywords are
+# implicitly numbered 0..N-1 in order of appearance in the header file.
+# Currently, the keywords are required to appear in ASCII order.
+#
+# The output is a C header file that defines a "const ScanKeywordList"
+# variable named according to the -v switch ("ScanKeywords" by default).
+# The variable is marked "static" unless the -e switch is given.
+#
+# ScanKeywordList uses hash-based lookup, so this script also selects
+# a minimal perfect hash function for the keyword set, and emits a
+# static hash function that is referenced in the ScanKeywordList struct.
+# The hash function is case-insensitive unless --no-case-fold is specified.
+# Note that case folding works correctly only for all-ASCII keywords!
+#
+#
+# Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
+# Portions Copyright (c) 1994, Regents of the University of California
+#
+# src/tools/gen_keywordlist.pl
+#
+#----------------------------------------------------------------------
+
+
+use strict;
+use warnings;
+use Getopt::Long;
+
+use FindBin;
+use lib $FindBin::RealBin;
+
+use PerfectHash;
+
+my $output_path = '';
+my $extern = 0;
+my $case_fold = 1;
+my $varname = 'ScanKeywords';
+
+GetOptions(
+ 'output:s' => \$output_path,
+ 'extern' => \$extern,
+ 'case-fold!' => \$case_fold,
+ 'varname:s' => \$varname) || usage();
+
+my $kw_input_file = shift @ARGV || die "No input file.\n";
+
+# Make sure output_path ends in a slash if needed.
+if ($output_path ne '' && substr($output_path, -1) ne '/')
+{
+ $output_path .= '/';
+}
+
+$kw_input_file =~ /(\w+)\.h$/
+ || die "Input file must be named something.h.\n";
+my $base_filename = $1 . '_d';
+my $kw_def_file = $output_path . $base_filename . '.h';
+
+open(my $kif, '<', $kw_input_file) || die "$kw_input_file: $!\n";
+open(my $kwdef, '>', $kw_def_file) || die "$kw_def_file: $!\n";
+
+# Opening boilerplate for keyword definition header.
+printf $kwdef <)
+{
+ if (/^PG_KEYWORD\("(\w+)"/)
+ {
+ push @keywords, $1;
+ }
+}
+
+# When being case-insensitive, insist that the input be all-lower-case.
+if ($case_fold)
+{
+ foreach my $kw (@keywords)
+ {
+ die qq|The keyword "$kw" is not lower-case in $kw_input_file\n|
+ if ($kw ne lc $kw);
+ }
+}
+
+# Error out if the keyword names are not in ASCII order.
+#
+# While this isn't really necessary with hash-based lookup, it's still
+# helpful because it provides a cheap way to reject duplicate keywords.
+# Also, insisting on sorted order ensures that code that scans the keyword
+# table linearly will see the keywords in a canonical order.
+for my $i (0 .. $#keywords - 1)
+{
+ die
+ qq|The keyword "$keywords[$i + 1]" is out of order in $kw_input_file\n|
+ if ($keywords[$i] cmp $keywords[ $i + 1 ]) >= 0;
+}
+
+# Emit the string containing all the keywords.
+
+printf $kwdef qq|static const char %s_kw_string[] =\n\t"|, $varname;
+print $kwdef join qq|\\0"\n\t"|, @keywords;
+print $kwdef qq|";\n\n|;
+
+# Emit an array of numerical offsets which will be used to index into the
+# keyword string. Also determine max keyword length.
+
+printf $kwdef "static const uint16 %s_kw_offsets[] = {\n", $varname;
+
+my $offset = 0;
+my $max_len = 0;
+foreach my $name (@keywords)
+{
+ my $this_length = length($name);
+
+ print $kwdef "\t$offset,\n";
+
+ # Calculate the cumulative offset of the next keyword,
+ # taking into account the null terminator.
+ $offset += $this_length + 1;
+
+ # Update max keyword length.
+ $max_len = $this_length if $max_len < $this_length;
+}
+
+print $kwdef "};\n\n";
+
+# Emit a macro defining the number of keywords.
+# (In some places it's useful to have access to that as a constant.)
+
+printf $kwdef "#define %s_NUM_KEYWORDS %d\n\n", uc $varname, scalar @keywords;
+
+# Emit the definition of the hash function.
+
+my $funcname = $varname . "_hash_func";
+
+my $f = PerfectHash::generate_hash_function(\@keywords, $funcname,
+ case_fold => $case_fold);
+
+printf $kwdef qq|static %s\n|, $f;
+
+# Emit the struct that wraps all this lookup info into one variable.
+
+printf $kwdef "static " if !$extern;
+printf $kwdef "const ScanKeywordList %s = {\n", $varname;
+printf $kwdef qq|\t%s_kw_string,\n|, $varname;
+printf $kwdef qq|\t%s_kw_offsets,\n|, $varname;
+printf $kwdef qq|\t%s,\n|, $funcname;
+printf $kwdef qq|\t%s_NUM_KEYWORDS,\n|, uc $varname;
+printf $kwdef qq|\t%d\n|, $max_len;
+printf $kwdef "};\n\n";
+
+printf $kwdef "#endif\t\t\t\t\t\t\t/* %s_H */\n", uc $base_filename;
+
+
+sub usage
+{
+ die <] [--varname/-v ] [--extern/-e] [--[no-]case-fold] input_file
+ --output Output directory (default '.')
+ --varname Name for ScanKeywordList variable (default 'ScanKeywords')
+ --extern Allow the ScanKeywordList variable to be globally visible
+ --no-case-fold Keyword matching is to be case-sensitive
+
+gen_keywordlist.pl transforms a list of keywords into a ScanKeywordList.
+The output filename is derived from the input file by inserting _d,
+for example kwlist_d.h is produced from kwlist.h.
+EOM
+}