diff --git a/.github/workflows/installcheck.yml b/.github/workflows/installcheck.yml new file mode 100644 index 000000000..76503fe06 --- /dev/null +++ b/.github/workflows/installcheck.yml @@ -0,0 +1,40 @@ +name: PG12 Regression + +on: + push: + branches: [ '*' ] + pull_request: + branches: [ '*' ] + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - name: Get latest commit id of PostgreSQL 12 + run: | + echo "PG_COMMIT_HASH=$(git ls-remote git://git.postgresql.org/git/postgresql.git refs/heads/REL_12_STABLE | awk '{print $1}')" >> $GITHUB_ENV + + - name: Cache PostgreSQL 12 + uses: actions/cache@v2 + id: pg12cache + with: + path: ~/pg12 + key: ${{ runner.os }}-v1-pg12-${{ env.PG_COMMIT_HASH }} + + - name: Install PostgreSQL 12 + if: steps.pg12cache.outputs.cache-hit != 'true' + run: | + git clone --depth 1 --branch REL_12_STABLE git://git.postgresql.org/git/postgresql.git ~/pg12source + cd ~/pg12source + ./configure --prefix=$HOME/pg12 CFLAGS="-std=gnu99 -ggdb -O0" --enable-cassert + make install -j$(nproc) > /dev/null + + - uses: actions/checkout@v2 + - name: Regression + run: | + export PG_CONFIG=$HOME/pg12/bin/pg_config + make -j$(nproc) + make install + make installcheck \ No newline at end of file diff --git a/.gitignore b/.gitignore index 78e991256..46a93606b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,5 @@ *.o *.so -.gitignore build.sh .idea .deps diff --git a/.travis.yml b/.travis.yml index 7dadc21a4..b8c9a5ac3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,13 +4,13 @@ dist: bionic arch: amd64 jobs: include: - - name: PostgreSQL 11 + - name: PostgreSQL 12 compiler: gcc addons: apt: packages: - - postgresql-11 - - postgresql-server-dev-11 + - postgresql-12 + - postgresql-server-dev-12 script: - sudo make install -j$(nproc) - make installcheck diff --git a/Dockerfile b/Dockerfile index d0b449aed..cd9622ca0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,14 +17,14 @@ # -FROM postgres:11 +FROM postgres:12 RUN apt-get update RUN apt-get install --assume-yes --no-install-recommends --no-install-suggests \ bison \ build-essential \ flex \ - postgresql-server-dev-11 + postgresql-server-dev-12 COPY . /age RUN cd /age && make install diff --git a/Makefile b/Makefile index deb3b4d90..f8b1369b5 100644 --- a/Makefile +++ b/Makefile @@ -70,7 +70,7 @@ OBJS = src/backend/age.o \ EXTENSION = age -DATA = age--1.0.0.sql +DATA = age--1.1.0.sql # sorted in dependency order REGRESS = scan \ @@ -99,15 +99,23 @@ ag_regress_dir = $(srcdir)/regress REGRESS_OPTS = --load-extension=age --inputdir=$(ag_regress_dir) --outputdir=$(ag_regress_dir) --temp-instance=$(ag_regress_dir)/instance --port=61958 --encoding=UTF-8 ag_regress_out = instance/ log/ results/ regression.* -EXTRA_CLEAN = $(addprefix $(ag_regress_dir)/, $(ag_regress_out)) src/backend/parser/cypher_gram.c src/include/parser/cypher_gram_def.h +EXTRA_CLEAN = $(addprefix $(ag_regress_dir)/, $(ag_regress_out)) src/backend/parser/cypher_gram.c src/include/parser/cypher_gram_def.h src/include/parser/cypher_kwlist_d.h + +GEN_KEYWORDLIST = $(PERL) -I ./tools/ ./tools/gen_keywordlist.pl +GEN_KEYWORDLIST_DEPS = ./tools/gen_keywordlist.pl tools/PerfectHash.pm ag_include_dir = $(srcdir)/src/include PG_CPPFLAGS = -I$(ag_include_dir) -I$(ag_include_dir)/parser -PG_CONFIG = pg_config +PG_CONFIG ?= pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) include $(PGXS) +src/backend/parser/cypher_keywords.o: src/include/parser/cypher_kwlist_d.h + +src/include/parser/cypher_kwlist_d.h: src/include/parser/cypher_kwlist.h $(GEN_KEYWORDLIST_DEPS) + $(GEN_KEYWORDLIST) --extern --varname CypherKeyword --output src/include/parser $< + src/include/parser/cypher_gram_def.h: src/backend/parser/cypher_gram.c src/backend/parser/cypher_gram.c: BISONFLAGS += --defines=src/include/parser/cypher_gram_def.h diff --git a/NOTICE b/NOTICE index 282a6fc1a..40ce5ef8a 100644 --- a/NOTICE +++ b/NOTICE @@ -1,4 +1,4 @@ -Apache AGE (incubating) +Apache AGE Copyright 2022 The Apache Software Foundation. This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/README.md b/README.md index 7484e4d76..285496329 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ Intelligent -- AGE allows you to perform graph queries that are the basis for ma ## Latest happenings -- Latest Apache AGE release, [Apache AGE 1.0.0 (https://github.com/apache/age/releases/tag/v1.0.0-rc1). +- Latest Apache AGE release, [Apache AGE 1.0.0 (https://github.com/apache/age/releases/tag/v1.1.0-rc1). - The latest Apache AGE documentation is now available at [here](https://age.apache.org/docs/master/index.html). - The roadmap has been updated, please check out the [Apache AGE website](http://age.apache.org/). - Send all your comments and inquiries to the user mailing list, users@age.apache.org. diff --git a/RELEASE b/RELEASE index 3e706219f..a8369970f 100644 --- a/RELEASE +++ b/RELEASE @@ -15,22 +15,28 @@ # specific language governing permissions and limitations # under the License. -Release Notes for Apache AGE release v1.0.0 +Release Notes for Apache AGE release v1.1.0 -Apache AGE 1.0.0 - Release Notes +Apache AGE 1.1.0 - Release Notes + + Support for Agtype containment ops and GIN Indices. + Add CALL [YIELD] grammar rules for the implementation of CALL procedures. + VLE path variable integration performance patch. + Improve WHERE clause performance and support index scans. + Allow global graph contexts to see currentCommandIds. + Cache Agtype and GRAPHID OIDs. + Allow lists and maps to be used in the SET clause. + Fix bug in aggregate function collect(). + Fix Bug in WHERE clause and property constraints. + Fix VLE local cache bug (crash). + Fix bug where integers were not being serialized correctly when stored in GIN indices. + Fix the VLE peek_stack_head routine to return a NULL if the stack is NULL. + Fix MERGE visibility in chained commands, SET specifically. + Fix github issue #212 - Add access operator (`->`, `->>`) to Agtype. + Fix github issue #220 - fix local cached contexts for static procedures. + Fix github issue #224 - fix regression tests to fix issues on mac with trigonometric functions. + Fix github issue #235 - when MERGE and SET were used together. + Fix github issue #240 - negative array bounds. + Fix github issue #240 - negative array bounds - addendum. + Updated README. - Add an upgrading SQL script file from 0.5.0 to 0.6.0 - Add upgrading file age--0.6.0--0.7.0.sql - Refactor function get_agtype_value_object_value - Age load issue (#188) - Refactor agtype_access_operator - Bugfix - Remove INLINE from function declaration - Rebase VLE code - Implement Merge Clause - Bugfix: chained union logic - Allow a path of one vertex - Created functions for load graph from CSV files - Add UNION into EXPLAIN grammar rule - Implement `UNWIND` clause(#173) - Bugfix:(nodejs) Corrects parsing for independence value(#177) - Feat: Implement `OPTIONAL MATCH` (#175) diff --git a/age--0.5.0--0.6.0.sql b/age--0.5.0--0.6.0.sql deleted file mode 100644 index dbe620f13..000000000 --- a/age--0.5.0--0.6.0.sql +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "ALTER EXTENSION age UPDATE TO '0.6.0'" to load this file. \quit - -CREATE OR REPLACE FUNCTION ag_catalog.age_vle(IN agtype, IN agtype, IN agtype, - IN agtype, IN agtype, IN agtype, - IN agtype, OUT edges agtype) -RETURNS SETOF agtype -LANGUAGE C -IMMUTABLE -STRICT -AS 'MODULE_PATHNAME'; - --- --- End --- diff --git a/age--0.6.0--0.7.0.sql b/age--0.6.0--0.7.0.sql deleted file mode 100644 index cdbaf4406..000000000 --- a/age--0.6.0--0.7.0.sql +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "ALTER EXTENSION age UPDATE TO '0.7.0'" to load this file. \quit - -CREATE FUNCTION ag_catalog.create_vlabel(graph_name name, label_name name) - RETURNS void - LANGUAGE c -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.create_elabel(graph_name name, label_name name) - RETURNS void - LANGUAGE c -AS 'MODULE_PATHNAME'; - --- binary I/O functions -CREATE FUNCTION ag_catalog.graphid_send(graphid) -RETURNS bytea -LANGUAGE c -IMMUTABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.graphid_recv(internal) -RETURNS graphid -LANGUAGE c -IMMUTABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -UPDATE pg_type SET -typsend = 'ag_catalog.graphid_send', -typreceive = 'ag_catalog.graphid_recv' -WHERE typname = 'graphid'; - --- binary I/O functions -CREATE FUNCTION ag_catalog.agtype_send(agtype) -RETURNS bytea -LANGUAGE c -IMMUTABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.agtype_recv(internal) -RETURNS agtype -LANGUAGE c -IMMUTABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -UPDATE pg_type SET -typsend = 'ag_catalog.agtype_send', -typreceive = 'ag_catalog.agtype_recv' -WHERE typname = 'agtype'; - --- agtype -> int4[] -CREATE FUNCTION ag_catalog.agtype_to_int4_array(variadic "any") - RETURNS int[] - LANGUAGE c - STABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE CAST (agtype AS int[]) - WITH FUNCTION ag_catalog.agtype_to_int4_array(variadic "any"); - -CREATE FUNCTION ag_catalog.age_eq_tilde(agtype, agtype) -RETURNS agtype -LANGUAGE c -STABLE -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE OR REPLACE FUNCTION ag_catalog.age_vle(IN agtype, IN agtype, IN agtype, - IN agtype, IN agtype, IN agtype, - IN agtype, OUT edges agtype) -RETURNS SETOF agtype -LANGUAGE C -STABLE -CALLED ON NULL INPUT -PARALLEL UNSAFE -- might be safe -AS 'MODULE_PATHNAME'; - --- function to build an edge for a VLE match -CREATE FUNCTION ag_catalog.age_build_vle_match_edge(agtype, agtype) -RETURNS agtype -LANGUAGE C -STABLE -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - --- function to match a terminal vle edge -CREATE FUNCTION ag_catalog.age_match_vle_terminal_edge(agtype, agtype, agtype) -RETURNS boolean -LANGUAGE C -STABLE -CALLED ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - --- function to create an AGTV_PATH from a VLE_path_container -CREATE FUNCTION ag_catalog.age_materialize_vle_path(agtype) -RETURNS agtype -LANGUAGE C -STABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - --- function to create an AGTV_ARRAY of edges from a VLE_path_container -CREATE FUNCTION ag_catalog.age_materialize_vle_edges(agtype) -RETURNS agtype -LANGUAGE C -STABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.age_match_vle_edge_to_id_qual(agtype, agtype, agtype) -RETURNS boolean -LANGUAGE C -STABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.age_match_two_vle_edges(agtype, agtype) -RETURNS boolean -LANGUAGE C -STABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - --- list functions -CREATE FUNCTION ag_catalog.age_keys(agtype) -RETURNS agtype -LANGUAGE c -STABLE -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.age_labels(agtype) -RETURNS agtype -LANGUAGE c -STABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.age_nodes(agtype) -RETURNS agtype -LANGUAGE c -STABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.age_relationships(agtype) -RETURNS agtype -LANGUAGE c -STABLE -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.age_range(variadic "any") -RETURNS agtype -LANGUAGE c -STABLE -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - --- --- End --- diff --git a/age--0.7.0--1.0.0.sql b/age--0.7.0--1.0.0.sql deleted file mode 100644 index 59d3bff82..000000000 --- a/age--0.7.0--1.0.0.sql +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "ALTER EXTENSION age UPDATE TO '1.0.0'" to load this file. \quit - -CREATE FUNCTION ag_catalog.load_labels_from_file(graph_name name, - label_name name, - file_path text, - id_field_exists bool default true) - RETURNS void - LANGUAGE c - AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.load_edges_from_file(graph_name name, - label_name name, - file_path text) - RETURNS void - LANGUAGE c - AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog._cypher_merge_clause(internal) -RETURNS void -LANGUAGE c -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.age_unnest(agtype, block_types boolean = false) - RETURNS SETOF agtype - LANGUAGE c - STABLE -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - --- --- End --- diff --git a/age--1.0.0.sql b/age--1.1.0.sql similarity index 99% rename from age--1.0.0.sql rename to age--1.1.0.sql index e5c42192a..f6b555b3f 100644 --- a/age--1.0.0.sql +++ b/age--1.1.0.sql @@ -24,12 +24,14 @@ -- catalog tables -- + CREATE TABLE ag_graph ( + graphid oid NOT NULL, name name NOT NULL, namespace regnamespace NOT NULL -) WITH (OIDS); +); -CREATE UNIQUE INDEX ag_graph_oid_index ON ag_graph USING btree (oid); +CREATE UNIQUE INDEX ag_graph_graphid_index ON ag_graph USING btree (graphid); CREATE UNIQUE INDEX ag_graph_name_index ON ag_graph USING btree (name); @@ -43,20 +45,22 @@ CREATE DOMAIN label_id AS int NOT NULL CHECK (VALUE > 0 AND VALUE <= 65535); CREATE DOMAIN label_kind AS "char" NOT NULL CHECK (VALUE = 'v' OR VALUE = 'e'); CREATE TABLE ag_label ( + name name NOT NULL, graph oid NOT NULL, id label_id, kind label_kind, - relation regclass NOT NULL -) WITH (OIDS); - -CREATE UNIQUE INDEX ag_label_oid_index ON ag_label USING btree (oid); + relation regclass NOT NULL, + CONSTRAINT fk_graph_oid + FOREIGN KEY(graph) + REFERENCES ag_graph(graphid) +); CREATE UNIQUE INDEX ag_label_name_graph_index ON ag_label USING btree (name, graph); -CREATE UNIQUE INDEX ag_label_graph_id_index +CREATE UNIQUE INDEX ag_label_graph_oid_index ON ag_label USING btree (graph, id); diff --git a/age.control b/age.control index 4e320d024..b0fb1401d 100644 --- a/age.control +++ b/age.control @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. -default_version = '1.0.0' +default_version = '1.1.0' comment = 'AGE database extension' module_pathname = '$libdir/age' diff --git a/regress/expected/agtype.out b/regress/expected/agtype.out index 196747a7e..4b92a2932 100644 --- a/regress/expected/agtype.out +++ b/regress/expected/agtype.out @@ -23,6 +23,7 @@ -- Load extension and set path -- LOAD 'age'; +SET extra_float_digits = 0; SET search_path TO ag_catalog; -- -- Create a table using the AGTYPE type diff --git a/regress/expected/catalog.out b/regress/expected/catalog.out index 8a83f45a6..19ef801e7 100644 --- a/regress/expected/catalog.out +++ b/regress/expected/catalog.out @@ -28,7 +28,7 @@ NOTICE: graph "g" has been created (1 row) -SELECT * FROM ag_graph WHERE name = 'g'; +SELECT name, namespace FROM ag_graph WHERE name = 'g'; name | namespace ------+----------- g | g @@ -119,7 +119,7 @@ NOTICE: graph "GraphB" has been created (1 row) -- Show GraphA's construction to verify case is preserved. -SELECT * FROM ag_graph WHERE name = 'GraphA'; +SELECT name, namespace FROM ag_graph WHERE name = 'GraphA'; name | namespace --------+----------- GraphA | "GraphA" @@ -140,7 +140,7 @@ NOTICE: graph "GraphA" renamed to "GraphX" (1 row) -- Show GraphX's construction to verify case is preserved. -SELECT * FROM ag_graph WHERE name = 'GraphX'; +SELECT name, namespace FROM ag_graph WHERE name = 'GraphX'; name | namespace --------+----------- GraphX | "GraphX" @@ -153,14 +153,14 @@ SELECT nspname FROM pg_namespace WHERE nspname = 'GraphX'; (1 row) -- Verify there isn't a graph GraphA anymore. -SELECT * FROM ag_graph WHERE name = 'GraphA'; +SELECT name, namespace FROM ag_graph WHERE name = 'GraphA'; name | namespace ------+----------- (0 rows) SELECT * FROM pg_namespace WHERE nspname = 'GraphA'; - nspname | nspowner | nspacl ----------+----------+-------- + oid | nspname | nspowner | nspacl +-----+---------+----------+-------- (0 rows) -- Sanity check that graphx does not exist - should return 0. diff --git a/regress/expected/cypher_set.out b/regress/expected/cypher_set.out index 5cd42ee97..ce55c722c 100644 --- a/regress/expected/cypher_set.out +++ b/regress/expected/cypher_set.out @@ -246,9 +246,9 @@ EXECUTE p_1; {"id": 281474976710659, "label": "", "properties": {"i": 3, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": 3, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": 3, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": 3, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": 3, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": 3, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": 3, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": 3, "k": 10}}::vertex @@ -261,9 +261,9 @@ EXECUTE p_1; {"id": 281474976710659, "label": "", "properties": {"i": 3, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": 3, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": 3, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": 3, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": 3, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": 3, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": 3, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": 3, "k": 10}}::vertex @@ -277,9 +277,9 @@ EXECUTE p_2('{"var_name": 4}'); {"id": 281474976710659, "label": "", "properties": {"i": 4, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": 4, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": 4, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 4, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": 4, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": 4, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 4, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": 4, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": 4, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": 4, "k": 10}}::vertex @@ -292,9 +292,9 @@ EXECUTE p_2('{"var_name": 6}'); {"id": 281474976710659, "label": "", "properties": {"i": 6, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": 6, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": 6, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 6, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": 6, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": 6, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 6, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": 6, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": 6, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": 6, "k": 10}}::vertex @@ -316,9 +316,9 @@ SELECT set_test(); {"id": 281474976710659, "label": "", "properties": {"i": 7, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": 7, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": 7, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 7, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": 7, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": 7, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 7, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": 7, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": 7, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": 7, "k": 10}}::vertex @@ -331,9 +331,9 @@ SELECT set_test(); {"id": 281474976710659, "label": "", "properties": {"i": 7, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": 7, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": 7, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 7, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": 7, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": 7, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 7, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": 7, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": 7, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": 7, "k": 10}}::vertex @@ -349,9 +349,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) SET n.i = 3, n.j = 5 RETURN n $$) {"id": 281474976710659, "label": "", "properties": {"i": 3, "j": 5, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": 3, "j": 5, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": 3, "j": 5, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": 3, "j": 5, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": 3, "j": 5, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": 3, "j": 5, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": 3, "j": 5, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": 3, "j": 5, "k": 10}}::vertex @@ -500,9 +500,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) SET n.i = [3, 'test', [1, 2, 3], {"id": 281474976710659, "label": "", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "k": 10}}::vertex @@ -518,9 +518,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) RETURN n$$) AS (a agtype); {"id": 281474976710659, "label": "", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "k": 10}}::vertex @@ -550,9 +550,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) SET n.i = [] RETURN n$$) AS (a ag {"id": 281474976710659, "label": "", "properties": {"i": [], "j": 5, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": [], "j": 5, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": [], "j": 5, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": [], "j": 5, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": [], "j": 5, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": [], "j": 5, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": [], "j": 5, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": [], "j": 5, "k": 10}}::vertex @@ -568,9 +568,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) RETURN n$$) AS (a agtype); {"id": 281474976710659, "label": "", "properties": {"i": [], "j": 5, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": [], "j": 5, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": [], "j": 5, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": [], "j": 5, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": [], "j": 5, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": [], "j": 5, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": [], "j": 5, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": [], "j": 5, "k": 10}}::vertex @@ -587,9 +587,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) SET n.i = {prop1: 3, prop2:'test' {"id": 281474976710659, "label": "", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "k": 10}}::vertex @@ -605,9 +605,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) RETURN n$$) AS (a agtype); {"id": 281474976710659, "label": "", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "k": 10}}::vertex @@ -637,9 +637,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) SET n.i = {} RETURN n$$) AS (a ag {"id": 281474976710659, "label": "", "properties": {"i": {}, "j": 5, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": {}, "j": 5, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": {}, "j": 5, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": {}, "j": 5, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": {}, "j": 5, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": {}, "j": 5, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": {}, "j": 5, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": {}, "j": 5, "k": 10}}::vertex @@ -655,9 +655,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) RETURN n$$) AS (a agtype); {"id": 281474976710659, "label": "", "properties": {"i": {}, "j": 5, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": {}, "j": 5, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": {}, "j": 5, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": {}, "j": 5, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": {}, "j": 5, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": {}, "j": 5, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": {}, "j": 5, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": {}, "j": 5, "k": 10}}::vertex diff --git a/regress/expected/expr.out b/regress/expected/expr.out index 5f28e3642..5142cb1c5 100644 --- a/regress/expected/expr.out +++ b/regress/expected/expr.out @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +SET extra_float_digits = 0; LOAD 'age'; SET search_path TO ag_catalog; SELECT * FROM create_graph('expr'); @@ -1143,7 +1144,7 @@ $$) AS (i bigint); SELECT * FROM cypher('type_coercion', $$ RETURN '1.0' $$) AS (i bigint); -ERROR: invalid input syntax for integer: "1.0" +ERROR: invalid input syntax for type bigint: "1.0" -- Casting to ints that will cause overflow SELECT * FROM cypher('type_coercion', $$ RETURN 10000000000000000000 @@ -1312,11 +1313,11 @@ SELECT agtype_in('null::int'); SELECT * FROM cypher('expr', $$ RETURN '0.0'::int $$) AS r(result agtype); -ERROR: invalid input syntax for integer: "0.0" +ERROR: invalid input syntax for type bigint: "0.0" SELECT * FROM cypher('expr', $$ RETURN '1.5'::int $$) AS r(result agtype); -ERROR: invalid input syntax for integer: "1.5" +ERROR: invalid input syntax for type bigint: "1.5" SELECT * FROM cypher('graph_name', $$ RETURN "15555555555555555555555555555"::int $$) AS (string_result agtype); @@ -5302,7 +5303,7 @@ SELECT * FROM cypher('UCSC', $$ MATCH (u) RETURN stDev(u.gpa), stDevP(u.gpa) $$) AS (stDev agtype, stDevP agtype); stdev | stdevp -------------------+------------------- - 0.549566929066705 | 0.508800109100231 + 0.549566929066706 | 0.508800109100232 (1 row) -- should return 0 diff --git a/regress/expected/scan.out b/regress/expected/scan.out index af82dbf22..d96d80049 100644 --- a/regress/expected/scan.out +++ b/regress/expected/scan.out @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +SET extra_float_digits = 0; LOAD 'age'; SET search_path TO ag_catalog; SELECT create_graph('scan'); diff --git a/regress/sql/agtype.sql b/regress/sql/agtype.sql index 3a116cc4d..608cc8af5 100644 --- a/regress/sql/agtype.sql +++ b/regress/sql/agtype.sql @@ -25,6 +25,7 @@ -- Load extension and set path -- LOAD 'age'; +SET extra_float_digits = 0; SET search_path TO ag_catalog; -- diff --git a/regress/sql/catalog.sql b/regress/sql/catalog.sql index 6bc19814c..641ef2ecf 100644 --- a/regress/sql/catalog.sql +++ b/regress/sql/catalog.sql @@ -25,7 +25,7 @@ SET search_path TO ag_catalog; -- SELECT create_graph('g'); -SELECT * FROM ag_graph WHERE name = 'g'; +SELECT name, namespace FROM ag_graph WHERE name = 'g'; -- create a label to test drop_label() SELECT * FROM cypher('g', $$CREATE (:l)$$) AS r(a agtype); @@ -62,18 +62,18 @@ SELECT create_graph('GraphA'); SELECT create_graph('GraphB'); -- Show GraphA's construction to verify case is preserved. -SELECT * FROM ag_graph WHERE name = 'GraphA'; +SELECT name, namespace FROM ag_graph WHERE name = 'GraphA'; SELECT nspname FROM pg_namespace WHERE nspname = 'GraphA'; -- Rename GraphA to GraphX. SELECT alter_graph('GraphA', 'RENAME', 'GraphX'); -- Show GraphX's construction to verify case is preserved. -SELECT * FROM ag_graph WHERE name = 'GraphX'; +SELECT name, namespace FROM ag_graph WHERE name = 'GraphX'; SELECT nspname FROM pg_namespace WHERE nspname = 'GraphX'; -- Verify there isn't a graph GraphA anymore. -SELECT * FROM ag_graph WHERE name = 'GraphA'; +SELECT name, namespace FROM ag_graph WHERE name = 'GraphA'; SELECT * FROM pg_namespace WHERE nspname = 'GraphA'; -- Sanity check that graphx does not exist - should return 0. diff --git a/regress/sql/expr.sql b/regress/sql/expr.sql index 7fe904fdb..8553b3c3c 100644 --- a/regress/sql/expr.sql +++ b/regress/sql/expr.sql @@ -17,6 +17,7 @@ * under the License. */ +SET extra_float_digits = 0; LOAD 'age'; SET search_path TO ag_catalog; diff --git a/regress/sql/scan.sql b/regress/sql/scan.sql index 97804e5c7..840a822f2 100644 --- a/regress/sql/scan.sql +++ b/regress/sql/scan.sql @@ -17,6 +17,7 @@ * under the License. */ +SET extra_float_digits = 0; LOAD 'age'; SET search_path TO ag_catalog; diff --git a/src/backend/catalog/ag_graph.c b/src/backend/catalog/ag_graph.c index c1e53d6ab..a344a6a37 100644 --- a/src/backend/catalog/ag_graph.c +++ b/src/backend/catalog/ag_graph.c @@ -26,9 +26,11 @@ #include "access/skey.h" #include "access/stratnum.h" #include "catalog/indexing.h" +#include "catalog/namespace.h" +#include "nodes/makefuncs.h" #include "storage/lockdefs.h" -#include "utils/builtins.h" #include "utils/fmgroids.h" +#include "utils/fmgrprotos.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/relcache.h" @@ -39,36 +41,36 @@ static Oid get_graph_namespace(const char *graph_name); // INSERT INTO ag_catalog.ag_graph VALUES (graph_name, nsp_id) -Oid insert_graph(const Name graph_name, const Oid nsp_id) +void insert_graph(const Name graph_name, const Oid nsp_id) { Datum values[Natts_ag_graph]; bool nulls[Natts_ag_graph]; Relation ag_graph; HeapTuple tuple; - Oid graph_oid; + AssertArg(graph_name); AssertArg(OidIsValid(nsp_id)); + ag_graph = table_open(ag_graph_relation_id(), RowExclusiveLock); + values[Anum_ag_graph_oid - 1] = ObjectIdGetDatum(nsp_id); + nulls[Anum_ag_graph_oid - 1] = false; + values[Anum_ag_graph_name - 1] = NameGetDatum(graph_name); nulls[Anum_ag_graph_name - 1] = false; values[Anum_ag_graph_namespace - 1] = ObjectIdGetDatum(nsp_id); nulls[Anum_ag_graph_namespace - 1] = false; - ag_graph = heap_open(ag_graph_relation_id(), RowExclusiveLock); - tuple = heap_form_tuple(RelationGetDescr(ag_graph), values, nulls); /* * CatalogTupleInsert() is originally for PostgreSQL's catalog. However, * it is used at here for convenience. */ - graph_oid = CatalogTupleInsert(ag_graph, tuple); - - heap_close(ag_graph, RowExclusiveLock); + CatalogTupleInsert(ag_graph, tuple); - return graph_oid; + table_close(ag_graph, RowExclusiveLock); } // DELETE FROM ag_catalog.ag_graph WHERE name = graph_name @@ -82,7 +84,7 @@ void delete_graph(const Name graph_name) ScanKeyInit(&scan_keys[0], Anum_ag_graph_name, BTEqualStrategyNumber, F_NAMEEQ, NameGetDatum(graph_name)); - ag_graph = heap_open(ag_graph_relation_id(), RowExclusiveLock); + ag_graph = table_open(ag_graph_relation_id(), RowExclusiveLock); scan_desc = systable_beginscan(ag_graph, ag_graph_name_index_id(), true, NULL, 1, scan_keys); @@ -97,7 +99,7 @@ void delete_graph(const Name graph_name) CatalogTupleDelete(ag_graph, &tuple->t_self); systable_endscan(scan_desc); - heap_close(ag_graph, RowExclusiveLock); + table_close(ag_graph, RowExclusiveLock); } // Function updates graph name in ag_graph table. @@ -116,7 +118,7 @@ void update_graph_name(const Name graph_name, const Name new_name) ScanKeyInit(&scan_keys[0], Anum_ag_graph_name, BTEqualStrategyNumber, F_NAMEEQ, NameGetDatum(graph_name)); - ag_graph = heap_open(ag_graph_relation_id(), RowExclusiveLock); + ag_graph = table_open(ag_graph_relation_id(), RowExclusiveLock); scan_desc = systable_beginscan(ag_graph, ag_graph_name_index_id(), true, NULL, 1, scan_keys); @@ -146,7 +148,7 @@ void update_graph_name(const Name graph_name, const Name new_name) // end scan and close ag_graph systable_endscan(scan_desc); - heap_close(ag_graph, RowExclusiveLock); + table_close(ag_graph, RowExclusiveLock); } Oid get_graph_oid(const char *graph_name) diff --git a/src/backend/catalog/ag_label.c b/src/backend/catalog/ag_label.c index 8001f53c4..f5572c8d3 100644 --- a/src/backend/catalog/ag_label.c +++ b/src/backend/catalog/ag_label.c @@ -26,6 +26,7 @@ #include "access/skey.h" #include "access/stratnum.h" #include "catalog/indexing.h" +#include "catalog/namespace.h" #include "fmgr.h" #include "nodes/execnodes.h" #include "nodes/makefuncs.h" @@ -45,32 +46,32 @@ // INSERT INTO ag_catalog.ag_label // VALUES (label_name, label_graph, label_id, label_kind, label_relation) -Oid insert_label(const char *label_name, Oid label_graph, int32 label_id, - char label_kind, Oid label_relation) +void insert_label(const char *label_name, Oid graph_oid, int32 label_id, + char label_kind, Oid label_relation) { NameData label_name_data; Datum values[Natts_ag_label]; bool nulls[Natts_ag_label]; Relation ag_label; HeapTuple tuple; - Oid label_oid; /* * NOTE: Is it better to make use of label_id and label_kind domain types * than to use assert to check label_id and label_kind are valid? */ AssertArg(label_name); - AssertArg(OidIsValid(label_graph)); AssertArg(label_id_is_valid(label_id)); AssertArg(label_kind == LABEL_KIND_VERTEX || label_kind == LABEL_KIND_EDGE); AssertArg(OidIsValid(label_relation)); + ag_label = table_open(ag_label_relation_id(), RowExclusiveLock); + namestrcpy(&label_name_data, label_name); values[Anum_ag_label_name - 1] = NameGetDatum(&label_name_data); nulls[Anum_ag_label_name - 1] = false; - values[Anum_ag_label_graph - 1] = ObjectIdGetDatum(label_graph); + values[Anum_ag_label_graph - 1] = ObjectIdGetDatum(graph_oid); nulls[Anum_ag_label_graph - 1] = false; values[Anum_ag_label_id - 1] = Int32GetDatum(label_id); @@ -82,19 +83,15 @@ Oid insert_label(const char *label_name, Oid label_graph, int32 label_id, values[Anum_ag_label_relation - 1] = ObjectIdGetDatum(label_relation); nulls[Anum_ag_label_relation - 1] = false; - ag_label = heap_open(ag_label_relation_id(), RowExclusiveLock); - tuple = heap_form_tuple(RelationGetDescr(ag_label), values, nulls); /* * CatalogTupleInsert() is originally for PostgreSQL's catalog. However, * it is used at here for convenience. */ - label_oid = CatalogTupleInsert(ag_label, tuple); + CatalogTupleInsert(ag_label, tuple); - heap_close(ag_label, RowExclusiveLock); - - return label_oid; + table_close(ag_label, RowExclusiveLock); } // DELETE FROM ag_catalog.ag_label WHERE relation = relation @@ -108,7 +105,7 @@ void delete_label(Oid relation) ScanKeyInit(&scan_keys[0], Anum_ag_label_relation, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relation)); - ag_label = heap_open(ag_label_relation_id(), RowExclusiveLock); + ag_label = table_open(ag_label_relation_id(), RowExclusiveLock); scan_desc = systable_beginscan(ag_label, ag_label_relation_index_id(), true, NULL, 1, scan_keys); @@ -123,45 +120,34 @@ void delete_label(Oid relation) CatalogTupleDelete(ag_label, &tuple->t_self); systable_endscan(scan_desc); - heap_close(ag_label, RowExclusiveLock); -} - -Oid get_label_oid(const char *label_name, Oid label_graph) -{ - label_cache_data *cache_data; - - cache_data = search_label_name_graph_cache(label_name, label_graph); - if (cache_data) - return cache_data->oid; - else - return InvalidOid; + table_close(ag_label, RowExclusiveLock); } -int32 get_label_id(const char *label_name, Oid label_graph) +int32 get_label_id(const char *label_name, Oid graph_oid) { label_cache_data *cache_data; - cache_data = search_label_name_graph_cache(label_name, label_graph); + cache_data = search_label_name_graph_cache(label_name, graph_oid); if (cache_data) return cache_data->id; else return INVALID_LABEL_ID; } -Oid get_label_relation(const char *label_name, Oid label_graph) +Oid get_label_relation(const char *label_name, Oid graph_oid) { label_cache_data *cache_data; - cache_data = search_label_name_graph_cache(label_name, label_graph); + cache_data = search_label_name_graph_cache(label_name, graph_oid); if (cache_data) return cache_data->relation; else return InvalidOid; } -char *get_label_relation_name(const char *label_name, Oid label_graph) +char *get_label_relation_name(const char *label_name, Oid graph_oid) { - return get_rel_name(get_label_relation(label_name, label_graph)); + return get_rel_name(get_label_relation(label_name, graph_oid)); } PG_FUNCTION_INFO_V1(_label_name); @@ -185,7 +171,7 @@ Datum _label_name(PG_FUNCTION_ARGS) label_id = (int32)(((uint64)AG_GETARG_GRAPHID(1)) >> ENTRY_ID_BITS); - label_cache = search_label_graph_id_cache(graph, label_id); + label_cache = search_label_graph_oid_cache(graph, label_id); label_name = NameStr(label_cache->name); @@ -222,23 +208,23 @@ PG_FUNCTION_INFO_V1(_extract_label_id); Datum _extract_label_id(PG_FUNCTION_ARGS) { - graphid graph_id; + graphid graph_oid; if (PG_ARGISNULL(0)) { ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), - errmsg("graph_id must not be null"))); + errmsg("graph_oid must not be null"))); } - graph_id = AG_GETARG_GRAPHID(0); + graph_oid = AG_GETARG_GRAPHID(0); - PG_RETURN_INT32(get_graphid_label_id(graph_id)); + PG_RETURN_INT32(get_graphid_label_id(graph_oid)); } -bool label_id_exists(Oid label_graph, int32 label_id) +bool label_id_exists(Oid graph_oid, int32 label_id) { label_cache_data *cache_data; - cache_data = search_label_graph_id_cache(label_graph, label_id); + cache_data = search_label_graph_oid_cache(graph_oid, label_id); if (cache_data) return true; else @@ -267,15 +253,16 @@ RangeVar *get_label_range_var(char *graph_name, Oid graph_oid, * XXX: We may want to use the cache system for this function, * however the cache system currently requires us to know the * name of the label we want. - */ + */ List *get_all_edge_labels_per_graph(EState *estate, Oid graph_oid) { List *labels = NIL; ScanKeyData scan_keys[2]; Relation ag_label; - HeapScanDesc scan_desc; + TableScanDesc scan_desc; HeapTuple tuple; TupleTableSlot *slot; + ResultRelInfo *resultRelInfo; // setup scan keys to get all edges for the given graph oid ScanKeyInit(&scan_keys[1], Anum_ag_label_graph, BTEqualStrategyNumber, @@ -284,11 +271,15 @@ List *get_all_edge_labels_per_graph(EState *estate, Oid graph_oid) F_CHAREQ, CharGetDatum(LABEL_TYPE_EDGE)); // setup the table to be scanned - ag_label = heap_open(ag_label_relation_id(), RowExclusiveLock); - scan_desc = heap_beginscan(ag_label, estate->es_snapshot, 2, scan_keys); + ag_label = table_open(ag_label_relation_id(), RowExclusiveLock); + scan_desc = table_beginscan(ag_label, estate->es_snapshot, 2, scan_keys); + + resultRelInfo = create_entity_result_rel_info(estate, "ag_catalog", + "ag_label"); - slot = ExecInitExtraTupleSlot(estate, - RelationGetDescr(ag_label)); + slot = ExecInitExtraTupleSlot( + estate, RelationGetDescr(resultRelInfo->ri_RelationDesc), + &TTSOpsHeapTuple); // scan through the results and get all the label names. while(true) @@ -303,7 +294,7 @@ List *get_all_edge_labels_per_graph(EState *estate, Oid graph_oid) if (!HeapTupleIsValid(tuple)) break; - ExecStoreTuple(tuple, slot, InvalidBuffer, false); + ExecStoreHeapTuple(tuple, slot, false); datum = slot_getattr(slot, Anum_ag_label_name, &isNull); label = DatumGetName(datum); @@ -311,8 +302,10 @@ List *get_all_edge_labels_per_graph(EState *estate, Oid graph_oid) labels = lappend(labels, label); } - heap_endscan(scan_desc); - heap_close(ag_label, RowExclusiveLock); + table_endscan(scan_desc); + + destroy_entity_result_rel_info(resultRelInfo); + table_close(resultRelInfo->ri_RelationDesc, RowExclusiveLock); return labels; } diff --git a/src/backend/commands/graph_commands.c b/src/backend/commands/graph_commands.c index c298324c8..6ef0b3cb3 100644 --- a/src/backend/commands/graph_commands.c +++ b/src/backend/commands/graph_commands.c @@ -19,9 +19,9 @@ #include "postgres.h" -#include "access/xact.h" #include "access/genam.h" #include "access/heapam.h" +#include "access/xact.h" #include "catalog/dependency.h" #include "catalog/objectaddress.h" #include "commands/defrem.h" @@ -35,9 +35,8 @@ #include "nodes/pg_list.h" #include "nodes/value.h" #include "parser/parser.h" -#include "utils/fmgroids.h" -#include "utils/relcache.h" #include "utils/rel.h" +#include "utils/relcache.h" #include "catalog/ag_graph.h" #include "catalog/ag_label.h" @@ -166,9 +165,8 @@ Datum drop_graph(PG_FUNCTION_ARGS) graph_name_str = NameStr(*graph_name); if (!graph_exists(graph_name_str)) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_SCHEMA), - errmsg("graph \"%s\" does not exist", graph_name_str))); + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_SCHEMA), + errmsg("graph \"%s\" does not exist", graph_name_str))); } drop_schema_for_graph(graph_name_str, cascade); @@ -345,11 +343,11 @@ List *get_graphnames(void) List *graphnames = NIL; char *str; - ag_graph = heap_open(ag_graph_relation_id(), RowExclusiveLock); + ag_graph = table_open(ag_graph_relation_id(), RowExclusiveLock); scan_desc = systable_beginscan(ag_graph, ag_graph_name_index_id(), true, NULL, 0, NULL); - slot = MakeTupleTableSlot(RelationGetDescr(ag_graph)); + slot = MakeTupleTableSlot(RelationGetDescr(ag_graph), &TTSOpsHeapTuple); for (;;) { @@ -358,17 +356,17 @@ List *get_graphnames(void) break; ExecClearTuple(slot); - ExecStoreTuple(tuple, slot, InvalidBuffer, false); + ExecStoreHeapTuple(tuple, slot, false); slot_getallattrs(slot); - str = DatumGetCString(slot->tts_values[0]); + str = DatumGetCString(slot->tts_values[Anum_ag_graph_name - 1]); graphnames = lappend(graphnames, str); } ExecDropSingleTupleTableSlot(slot); systable_endscan(scan_desc); - heap_close(ag_graph, RowExclusiveLock); + table_close(ag_graph, RowExclusiveLock); return graphnames; } diff --git a/src/backend/commands/label_commands.c b/src/backend/commands/label_commands.c index 66b6818fe..c682d3988 100644 --- a/src/backend/commands/label_commands.c +++ b/src/backend/commands/label_commands.c @@ -259,8 +259,8 @@ Datum create_elabel(PG_FUNCTION_ARGS) * new table and sequence. Returns the oid from the new tuple in * ag_catalog.ag_label. */ -Oid create_label(char *graph_name, char *label_name, char label_type, - List *parents) +void create_label(char *graph_name, char *label_name, char label_type, + List *parents) { graph_cache_data *cache_data; Oid graph_oid; @@ -271,7 +271,6 @@ Oid create_label(char *graph_name, char *label_name, char label_type, RangeVar *seq_range_var; int32 label_id; Oid relation_id; - Oid label_oid; cache_data = search_graph_name_cache(graph_name); if (!cache_data) @@ -307,12 +306,9 @@ Oid create_label(char *graph_name, char *label_name, char label_type, // get a new "id" for the new label label_id = get_new_label_id(graph_oid, nsp_id); - label_oid = insert_label(label_name, graph_oid, label_id, label_type, - relation_id); + insert_label(label_name, graph_oid, label_id, label_type, relation_id); CommandCounterIncrement(); - - return label_oid; } // CREATE TABLE `schema_name`.`rel_name` ( @@ -677,10 +673,10 @@ static int32 get_new_label_id(Oid graph_oid, Oid nsp_id) for (cnt = LABEL_ID_MIN; cnt <= LABEL_ID_MAX; cnt++) { - int64 label_id; + int32 label_id; // the data type of the sequence is integer (int4) - label_id = nextval_internal(seq_id, true); + label_id = (int32) nextval_internal(seq_id, true); Assert(label_id_is_valid(label_id)); if (!label_id_exists(graph_oid, label_id)) return (int32)label_id; diff --git a/src/backend/executor/cypher_create.c b/src/backend/executor/cypher_create.c index 7a03efd3d..ca4f1777c 100644 --- a/src/backend/executor/cypher_create.c +++ b/src/backend/executor/cypher_create.c @@ -19,6 +19,7 @@ #include "postgres.h" +#include "access/heapam.h" #include "access/htup_details.h" #include "access/xact.h" #include "executor/tuptable.h" @@ -26,17 +27,14 @@ #include "nodes/extensible.h" #include "nodes/nodes.h" #include "nodes/plannodes.h" -#include "parser/parse_relation.h" #include "rewrite/rewriteHandler.h" #include "utils/rel.h" -#include "utils/tqual.h" #include "catalog/ag_label.h" #include "executor/cypher_executor.h" #include "executor/cypher_utils.h" #include "nodes/cypher_nodes.h" #include "utils/agtype.h" -#include "utils/ag_cache.h" #include "utils/graphid.h" static void begin_cypher_create(CustomScanState *node, EState *estate, @@ -85,7 +83,8 @@ static void begin_cypher_create(CustomScanState *node, EState *estate, ExecAssignExprContext(estate, &node->ss.ps); ExecInitScanTupleSlot(estate, &node->ss, - ExecGetResultType(node->ss.ps.lefttree)); + ExecGetResultType(node->ss.ps.lefttree), + &TTSOpsHeapTuple); if (!CYPHER_CLAUSE_IS_TERMINAL(css->flags)) { @@ -108,7 +107,7 @@ static void begin_cypher_create(CustomScanState *node, EState *estate, continue; // Open relation and aquire a row exclusive lock. - rel = heap_open(cypher_node->relid, RowExclusiveLock); + rel = table_open(cypher_node->relid, RowExclusiveLock); // Initialize resultRelInfo for the vertex cypher_node->resultRelInfo = makeNode(ResultRelInfo); @@ -120,9 +119,8 @@ static void begin_cypher_create(CustomScanState *node, EState *estate, ExecOpenIndices(cypher_node->resultRelInfo, false); // Setup the relation's tuple slot - cypher_node->elemTupleSlot = ExecInitExtraTupleSlot( - estate, - RelationGetDescr(cypher_node->resultRelInfo->ri_RelationDesc)); + cypher_node->elemTupleSlot = table_slot_create( + rel, &estate->es_tupleTable); if (cypher_node->id_expr != NULL) { @@ -274,8 +272,8 @@ static void end_cypher_create(CustomScanState *node) ExecCloseIndices(cypher_node->resultRelInfo); // close the relation itself - heap_close(cypher_node->resultRelInfo->ri_RelationDesc, - RowExclusiveLock); + table_close(cypher_node->resultRelInfo->ri_RelationDesc, + RowExclusiveLock); } } } @@ -425,6 +423,7 @@ static void create_edge(cypher_create_custom_scan_state *css, prev_path = lappend(prev_path, DatumGetPointer(result)); css->path_values = list_concat(prev_path, css->path_values); } + if (CYPHER_TARGET_NODE_IS_VARIABLE(node->flags)) { scantuple->tts_values[node->tuple_position - 1] = result; diff --git a/src/backend/executor/cypher_delete.c b/src/backend/executor/cypher_delete.c index 3d9406bdc..7bb7c077d 100644 --- a/src/backend/executor/cypher_delete.c +++ b/src/backend/executor/cypher_delete.c @@ -19,27 +19,23 @@ #include "postgres.h" -#include "access/sysattr.h" +#include "access/heapam.h" #include "access/htup_details.h" #include "access/multixact.h" +#include "access/table.h" #include "access/xact.h" -#include "storage/bufmgr.h" #include "executor/tuptable.h" #include "nodes/execnodes.h" #include "nodes/extensible.h" #include "nodes/nodes.h" #include "nodes/plannodes.h" #include "parser/parsetree.h" -#include "parser/parse_relation.h" -#include "rewrite/rewriteHandler.h" +#include "storage/bufmgr.h" #include "utils/rel.h" -#include "utils/tqual.h" #include "catalog/ag_label.h" -#include "commands/label_commands.h" #include "executor/cypher_executor.h" #include "executor/cypher_utils.h" -#include "parser/cypher_parse_node.h" #include "nodes/cypher_nodes.h" #include "utils/agtype.h" #include "utils/graphid.h" @@ -99,7 +95,8 @@ static void begin_cypher_delete(CustomScanState *node, EState *estate, // setup scan tuple slot and projection info ExecInitScanTupleSlot(estate, &node->ss, - ExecGetResultType(node->ss.ps.lefttree)); + ExecGetResultType(node->ss.ps.lefttree), + &TTSOpsHeapTuple); if (!CYPHER_CLAUSE_IS_TERMINAL(css->flags)) { @@ -284,9 +281,9 @@ static void delete_entity(EState *estate, ResultRelInfo *resultRelInfo, { ResultRelInfo *saved_resultRelInfo; LockTupleMode lockmode; - HeapUpdateFailureData hufd; - HTSU_Result lock_result; - HTSU_Result delete_result; + TM_FailureData hufd; + TM_Result lock_result; + TM_Result delete_result; Buffer buffer; // Find the physical tuple, this variable is coming from @@ -303,11 +300,11 @@ static void delete_entity(EState *estate, ResultRelInfo *resultRelInfo, * It is possible the entity may have already been deleted. If the tuple * can be deleted, the lock result will be HeapTupleMayBeUpdated. If the * tuple was already deleted by this DELETE clause, the result would be - * HeapTupleSelfUpdated, if the result was deleted by a previous delete - * clause, the result will HeapTupleInvisible. Throw an error if any + * TM_SelfModified, if the result was deleted by a previous delete + * clause, the result will TM_Invisible. Throw an error if any * other result was returned. */ - if (lock_result == HeapTupleMayBeUpdated) + if (lock_result == TM_Ok) { delete_result = heap_delete(resultRelInfo->ri_RelationDesc, &tuple->t_self, GetCurrentCommandId(true), @@ -320,30 +317,32 @@ static void delete_entity(EState *estate, ResultRelInfo *resultRelInfo, */ switch (delete_result) { - case HeapTupleMayBeUpdated: - break; - case HeapTupleSelfUpdated: - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("deleting the same entity more than once cannot happen"))); - /* ereport never gets here */ - break; - case HeapTupleUpdated: - ereport(ERROR, - (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("could not serialize access due to concurrent update"))); - /* ereport never gets here */ - break; - default: - elog(ERROR, "Entity failed to be update"); - /* elog never gets here */ - break; + case TM_Ok: + break; + case TM_SelfModified: + ereport( + ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg( + "deleting the same entity more than once cannot happen"))); + /* ereport never gets here */ + break; + case TM_Updated: + ereport( + ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + /* ereport never gets here */ + break; + default: + elog(ERROR, "Entity failed to be update"); + /* elog never gets here */ + break; } /* increment the command counter */ CommandCounterIncrement(); } - else if (lock_result != HeapTupleInvisible && - lock_result != HeapTupleSelfUpdated) + else if (lock_result != TM_Invisible && lock_result != TM_SelfModified) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), @@ -374,7 +373,7 @@ static void process_delete_list(CustomScanState *node) cypher_delete_item *item; agtype_value *original_entity_value, *id, *label; ScanKeyData scan_keys[1]; - HeapScanDesc scan_desc; + TableScanDesc scan_desc; ResultRelInfo *resultRelInfo; HeapTuple heap_tuple; char *label_name; @@ -424,8 +423,8 @@ static void process_delete_list(CustomScanState *node) /* * Setup the scan description, with the correct snapshot and scan keys. */ - scan_desc = heap_beginscan(resultRelInfo->ri_RelationDesc, - estate->es_snapshot, 1, scan_keys); + scan_desc = table_beginscan(resultRelInfo->ri_RelationDesc, + estate->es_snapshot, 1, scan_keys); /* Retrieve the tuple. */ heap_tuple = heap_getnext(scan_desc, ForwardScanDirection); @@ -437,7 +436,7 @@ static void process_delete_list(CustomScanState *node) */ if (!HeapTupleIsValid(heap_tuple)) { - heap_endscan(scan_desc); + table_endscan(scan_desc); destroy_entity_result_rel_info(resultRelInfo); continue; @@ -459,7 +458,7 @@ static void process_delete_list(CustomScanState *node) delete_entity(estate, resultRelInfo, heap_tuple); /* Close the scan and the relation. */ - heap_endscan(scan_desc); + table_endscan(scan_desc); destroy_entity_result_rel_info(resultRelInfo); } } @@ -492,18 +491,19 @@ static void find_connected_edges(CustomScanState *node, char *graph_name, { char *label_name = lfirst(lc); ResultRelInfo *resultRelInfo; - HeapScanDesc scan_desc; + TableScanDesc scan_desc; HeapTuple tuple; TupleTableSlot *slot; resultRelInfo = create_entity_result_rel_info(estate, graph_name, label_name); - scan_desc = heap_beginscan(resultRelInfo->ri_RelationDesc, - estate->es_snapshot, 0, NULL); + scan_desc = table_beginscan(resultRelInfo->ri_RelationDesc, + estate->es_snapshot, 0, NULL); - slot = ExecInitExtraTupleSlot(estate, - RelationGetDescr(resultRelInfo->ri_RelationDesc)); + slot = ExecInitExtraTupleSlot( + estate, RelationGetDescr(resultRelInfo->ri_RelationDesc), + &TTSOpsHeapTuple); // scan the table while(true) @@ -517,7 +517,7 @@ static void find_connected_edges(CustomScanState *node, char *graph_name, if (!HeapTupleIsValid(tuple)) break; - ExecStoreTuple(tuple, slot, InvalidBuffer, false); + ExecStoreHeapTuple(tuple, slot, false); startid = GRAPHID_GET_DATUM(slot_getattr(slot, Anum_ag_label_edge_table_start_id, &isNull)); endid = GRAPHID_GET_DATUM(slot_getattr(slot, Anum_ag_label_edge_table_end_id, &isNull)); @@ -540,7 +540,7 @@ static void find_connected_edges(CustomScanState *node, char *graph_name, } } - heap_endscan(scan_desc); + table_endscan(scan_desc); destroy_entity_result_rel_info(resultRelInfo); } diff --git a/src/backend/executor/cypher_merge.c b/src/backend/executor/cypher_merge.c index c65abf2fe..fbfce8f1f 100644 --- a/src/backend/executor/cypher_merge.c +++ b/src/backend/executor/cypher_merge.c @@ -20,23 +20,20 @@ #include "postgres.h" #include "access/htup_details.h" +#include "access/table.h" #include "access/xact.h" #include "executor/tuptable.h" #include "nodes/execnodes.h" #include "nodes/extensible.h" #include "nodes/nodes.h" #include "nodes/plannodes.h" -#include "parser/parse_relation.h" -#include "rewrite/rewriteHandler.h" #include "utils/rel.h" -#include "utils/tqual.h" #include "catalog/ag_label.h" #include "executor/cypher_executor.h" #include "executor/cypher_utils.h" #include "nodes/cypher_nodes.h" #include "utils/agtype.h" -#include "utils/ag_cache.h" #include "utils/graphid.h" static void begin_cypher_merge(CustomScanState *node, EState *estate, @@ -90,7 +87,8 @@ static void begin_cypher_merge(CustomScanState *node, EState *estate, ExecAssignExprContext(estate, &node->ss.ps); ExecInitScanTupleSlot(estate, &node->ss, - ExecGetResultType(node->ss.ps.lefttree)); + ExecGetResultType(node->ss.ps.lefttree), + &TTSOpsVirtual); /* * When MERGE is not the last clause in a cypher query. Setup projection @@ -125,7 +123,7 @@ static void begin_cypher_merge(CustomScanState *node, EState *estate, } // Open relation and aquire a row exclusive lock. - rel = heap_open(cypher_node->relid, RowExclusiveLock); + rel = table_open(cypher_node->relid, RowExclusiveLock); // Initialize resultRelInfo for the vertex cypher_node->resultRelInfo = makeNode(ResultRelInfo); @@ -139,7 +137,8 @@ static void begin_cypher_merge(CustomScanState *node, EState *estate, // Setup the relation's tuple slot cypher_node->elemTupleSlot = ExecInitExtraTupleSlot( estate, - RelationGetDescr(cypher_node->resultRelInfo->ri_RelationDesc)); + RelationGetDescr(cypher_node->resultRelInfo->ri_RelationDesc), + &TTSOpsHeapTuple); if (cypher_node->id_expr != NULL) { @@ -162,7 +161,9 @@ static void begin_cypher_merge(CustomScanState *node, EState *estate, * that have modified the command id. */ if (estate->es_output_cid == 0) + { estate->es_output_cid = estate->es_snapshot->curcid; + } /* store the currentCommandId for this instance */ css->base_currentCommandId = GetCurrentCommandId(false); @@ -262,7 +263,6 @@ static void process_simple_merge(CustomScanState *node) /* setup the scantuple that the process_path needs */ econtext->ecxt_scantuple = node->ss.ps.lefttree->ps_ResultTupleSlot; - econtext->ecxt_scantuple->tts_isempty = false; process_path(css); } @@ -462,7 +462,6 @@ static TupleTableSlot *exec_cypher_merge(CustomScanState *node) */ ExprContext *econtext = node->ss.ps.ps_ExprContext; SubqueryScanState *sss = (SubqueryScanState *)node->ss.ps.lefttree; - HeapTuple heap_tuple; /* * Our child execution node is always a subquery. If not there @@ -489,8 +488,8 @@ static TupleTableSlot *exec_cypher_merge(CustomScanState *node) * it. */ ExecInitScanTupleSlot(estate, &sss->ss, - ExecGetResultType(sss->subplan)); - + ExecGetResultType(sss->subplan), + &TTSOpsVirtual); /* setup the scantuple that the process_path needs */ econtext->ecxt_scantuple = sss->ss.ss_ScanTupleSlot; @@ -507,14 +506,8 @@ static TupleTableSlot *exec_cypher_merge(CustomScanState *node) */ mark_tts_isnull(econtext->ecxt_scantuple); - // create the physical heap tuple - heap_tuple = heap_form_tuple( - econtext->ecxt_scantuple->tts_tupleDescriptor, - econtext->ecxt_scantuple->tts_values, - econtext->ecxt_scantuple->tts_isnull); - // store the heap tuble - ExecStoreTuple(heap_tuple, econtext->ecxt_scantuple, InvalidBuffer, false); + ExecStoreVirtualTuple(econtext->ecxt_scantuple); /* * make the subquery's projection scan slot be the tuple table we @@ -564,8 +557,8 @@ static void end_cypher_merge(CustomScanState *node) ExecCloseIndices(cypher_node->resultRelInfo); // close the relation itself - heap_close(cypher_node->resultRelInfo->ri_RelationDesc, - RowExclusiveLock); + table_close(cypher_node->resultRelInfo->ri_RelationDesc, + RowExclusiveLock); } } diff --git a/src/backend/executor/cypher_set.c b/src/backend/executor/cypher_set.c index 1bc74b764..73ce1fa7f 100644 --- a/src/backend/executor/cypher_set.c +++ b/src/backend/executor/cypher_set.c @@ -19,25 +19,20 @@ #include "postgres.h" -#include "access/sysattr.h" +#include "access/heapam.h" #include "access/htup_details.h" #include "access/xact.h" -#include "storage/bufmgr.h" #include "executor/tuptable.h" #include "nodes/execnodes.h" #include "nodes/extensible.h" #include "nodes/nodes.h" #include "nodes/plannodes.h" -#include "parser/parsetree.h" -#include "parser/parse_relation.h" #include "rewrite/rewriteHandler.h" +#include "storage/bufmgr.h" #include "utils/rel.h" -#include "catalog/ag_label.h" -#include "commands/label_commands.h" #include "executor/cypher_executor.h" #include "executor/cypher_utils.h" -#include "parser/cypher_parse_node.h" #include "nodes/cypher_nodes.h" #include "utils/agtype.h" #include "utils/graphid.h" @@ -82,7 +77,8 @@ static void begin_cypher_set(CustomScanState *node, EState *estate, ExecAssignExprContext(estate, &node->ss.ps); ExecInitScanTupleSlot(estate, &node->ss, - ExecGetResultType(node->ss.ps.lefttree)); + ExecGetResultType(node->ss.ps.lefttree), + &TTSOpsHeapTuple); if (!CYPHER_CLAUSE_IS_TERMINAL(css->flags)) { @@ -112,12 +108,13 @@ static HeapTuple update_entity_tuple(ResultRelInfo *resultRelInfo, { HeapTuple tuple = NULL; LockTupleMode lockmode; - HeapUpdateFailureData hufd; - HTSU_Result lock_result; - HTSU_Result update_result; + TM_FailureData hufd; + TM_Result lock_result; Buffer buffer; + bool update_indexes; + TM_Result result; - ResultRelInfo *saved_resultRelInfo = saved_resultRelInfo;; + ResultRelInfo *saved_resultRelInfo = estate->es_result_relation_info; estate->es_result_relation_info = resultRelInfo; lockmode = ExecUpdateLockMode(estate, resultRelInfo); @@ -126,10 +123,11 @@ static HeapTuple update_entity_tuple(ResultRelInfo *resultRelInfo, GetCurrentCommandId(false), lockmode, LockWaitBlock, false, &buffer, &hufd); - if (lock_result == HeapTupleMayBeUpdated) + if (lock_result == TM_Ok) { + ExecOpenIndices(resultRelInfo, false); ExecStoreVirtualTuple(elemTupleSlot); - tuple = ExecMaterializeSlot(elemTupleSlot); + tuple = ExecFetchSlotHeapTuple(elemTupleSlot, true, NULL); tuple->t_self = old_tuple->t_self; // Check the constraints of the tuple @@ -139,26 +137,59 @@ static HeapTuple update_entity_tuple(ResultRelInfo *resultRelInfo, ExecConstraints(resultRelInfo, elemTupleSlot, estate); } - // Insert the tuple normally - update_result = heap_update(resultRelInfo->ri_RelationDesc, - &(tuple->t_self), tuple, + result = table_tuple_update(resultRelInfo->ri_RelationDesc, + &tuple->t_self, elemTupleSlot, GetCurrentCommandId(true), - estate->es_crosscheck_snapshot, true, &hufd, - &lockmode); + //estate->es_output_cid, + estate->es_snapshot,// NULL, + estate->es_crosscheck_snapshot, + true /* wait for commit */ , + &hufd, &lockmode, &update_indexes); - if (update_result != HeapTupleMayBeUpdated) + if (result == TM_SelfModified) + { + if (hufd.cmax != estate->es_output_cid) + { + ereport(ERROR, + (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), + errmsg("tuple to be updated was already modified"))); + } + + ExecCloseIndices(resultRelInfo); + estate->es_result_relation_info = saved_resultRelInfo; + + return tuple; + } + + if (result != TM_Ok) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("Entity failed to be updated: %i", update_result))); + errmsg("Entity failed to be updated: %i", result))); } // Insert index entries for the tuple - if (resultRelInfo->ri_NumIndices > 0) + if (resultRelInfo->ri_NumIndices > 0 && update_indexes) { - ExecInsertIndexTuples(elemTupleSlot, &(tuple->t_self), estate, - false, NULL, NIL); + ExecInsertIndexTuples(elemTupleSlot, estate, false, NULL, NIL); } + + ExecCloseIndices(resultRelInfo); } + else if (lock_result == TM_SelfModified) + { + if (hufd.cmax != estate->es_output_cid) + { + ereport(ERROR, + (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), + errmsg("tuple to be updated was already modified"))); + } + } + else + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("Entity failed to be updated: %i", lock_result))); + } + ReleaseBuffer(buffer); estate->es_result_relation_info = saved_resultRelInfo; @@ -376,13 +407,14 @@ static void process_update_list(CustomScanState *node) TupleTableSlot *slot; ResultRelInfo *resultRelInfo; ScanKeyData scan_keys[1]; - HeapScanDesc scan_desc; + TableScanDesc scan_desc; bool remove_property; char *label_name; cypher_update_item *update_item; Datum new_entity; HeapTuple heap_tuple; char *clause_name = css->set_list->clause_name; + int cid; update_item = (cypher_update_item *)lfirst(lc); @@ -418,8 +450,8 @@ static void process_update_list(CustomScanState *node) /* get the id and label for later */ id = GET_AGTYPE_VALUE_OBJECT_VALUE(original_entity_value, "id"); label = GET_AGTYPE_VALUE_OBJECT_VALUE(original_entity_value, "label"); - label_name = pnstrdup(label->val.string.val, label->val.string.len); + label_name = pnstrdup(label->val.string.val, label->val.string.len); /* get the properties we need to update */ original_properties = GET_AGTYPE_VALUE_OBJECT_VALUE(original_entity_value, "properties"); @@ -461,11 +493,12 @@ static void process_update_list(CustomScanState *node) new_property_value, remove_property); - resultRelInfo = create_entity_result_rel_info(estate, - css->set_list->graph_name, - label_name); + resultRelInfo = create_entity_result_rel_info( + estate, css->set_list->graph_name, label_name); - slot = ExecInitExtraTupleSlot(estate, RelationGetDescr(resultRelInfo->ri_RelationDesc)); + slot = ExecInitExtraTupleSlot( + estate, RelationGetDescr(resultRelInfo->ri_RelationDesc), + &TTSOpsHeapTuple); /* * Now that we have the updated properties, create a either a vertex or @@ -516,6 +549,9 @@ static void process_update_list(CustomScanState *node) * If the last update index for the entity is equal to the current loop * index, then update this tuple. */ + cid = estate->es_snapshot->curcid; + estate->es_snapshot->curcid = GetCurrentCommandId(false); + if (luindex[update_item->entity_position - 1] == lidx) { /* @@ -528,8 +564,8 @@ static void process_update_list(CustomScanState *node) * Setup the scan description, with the correct snapshot and scan * keys. */ - scan_desc = heap_beginscan(resultRelInfo->ri_RelationDesc, - estate->es_snapshot, 1, scan_keys); + scan_desc = table_beginscan(resultRelInfo->ri_RelationDesc, + estate->es_snapshot, 1, scan_keys); /* Retrieve the tuple. */ heap_tuple = heap_getnext(scan_desc, ForwardScanDirection); @@ -543,12 +579,13 @@ static void process_update_list(CustomScanState *node) heap_tuple); } /* close the ScanDescription */ - heap_endscan(scan_desc); + table_endscan(scan_desc); } + estate->es_snapshot->curcid = cid; /* close relation */ ExecCloseIndices(resultRelInfo); - heap_close(resultRelInfo->ri_RelationDesc, RowExclusiveLock); + table_close(resultRelInfo->ri_RelationDesc, RowExclusiveLock); /* increment loop index */ lidx++; diff --git a/src/backend/executor/cypher_utils.c b/src/backend/executor/cypher_utils.c index 3558fe696..7b92fbc61 100644 --- a/src/backend/executor/cypher_utils.c +++ b/src/backend/executor/cypher_utils.c @@ -27,7 +27,9 @@ #include "access/htup_details.h" #include "access/sysattr.h" #include "access/xact.h" +#include "access/heapam.h" #include "access/multixact.h" +#include "access/xact.h" #include "nodes/extensible.h" #include "nodes/makefuncs.h" #include "nodes/nodes.h" @@ -44,6 +46,7 @@ #include "executor/cypher_utils.h" #include "utils/agtype.h" #include "utils/ag_cache.h" +#include "utils/agtype.h" #include "utils/graphid.h" /* @@ -92,7 +95,7 @@ void destroy_entity_result_rel_info(ResultRelInfo *result_rel_info) ExecCloseIndices(result_rel_info); // close the rel - heap_close(result_rel_info->ri_RelationDesc, RowExclusiveLock); + table_close(result_rel_info->ri_RelationDesc, RowExclusiveLock); } TupleTableSlot *populate_vertex_tts( @@ -171,7 +174,7 @@ bool entity_exists(EState *estate, Oid graph_oid, graphid id) { label_cache_data *label; ScanKeyData scan_keys[1]; - HeapScanDesc scan_desc; + TableScanDesc scan_desc; HeapTuple tuple; Relation rel; bool result = true; @@ -180,14 +183,14 @@ bool entity_exists(EState *estate, Oid graph_oid, graphid id) * Extract the label id from the graph id and get the table name * the entity is part of. */ - label = search_label_graph_id_cache(graph_oid, GET_LABEL_ID(id)); + label = search_label_graph_oid_cache(graph_oid, GET_LABEL_ID(id)); // Setup the scan key to be the graphid ScanKeyInit(&scan_keys[0], 1, BTEqualStrategyNumber, F_GRAPHIDEQ, GRAPHID_GET_DATUM(id)); - rel = heap_open(label->relation, RowExclusiveLock); - scan_desc = heap_beginscan(rel, estate->es_snapshot, 1, scan_keys); + rel = table_open(label->relation, RowExclusiveLock); + scan_desc = table_beginscan(rel, estate->es_snapshot, 1, scan_keys); tuple = heap_getnext(scan_desc, ForwardScanDirection); @@ -200,8 +203,8 @@ bool entity_exists(EState *estate, Oid graph_oid, graphid id) result = false; } - heap_endscan(scan_desc); - heap_close(rel, RowExclusiveLock); + table_endscan(scan_desc); + table_close(rel, RowExclusiveLock); return result; } @@ -235,7 +238,7 @@ HeapTuple insert_entity_tuple_cid(ResultRelInfo *resultRelInfo, HeapTuple tuple = NULL; ExecStoreVirtualTuple(elemTupleSlot); - tuple = ExecMaterializeSlot(elemTupleSlot); + tuple = ExecFetchSlotHeapTuple(elemTupleSlot, true, NULL); /* Check the constraints of the tuple */ tuple->t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); @@ -244,14 +247,14 @@ HeapTuple insert_entity_tuple_cid(ResultRelInfo *resultRelInfo, ExecConstraints(resultRelInfo, elemTupleSlot, estate); } - /* Insert the tuple using the passed in cid */ - heap_insert(resultRelInfo->ri_RelationDesc, tuple, cid, 0, NULL); + // Insert the tuple normally + table_tuple_insert(resultRelInfo->ri_RelationDesc, elemTupleSlot, + GetCurrentCommandId(true), 0, NULL); - /* Insert index entries for the tuple */ + // Insert index entries for the tuple if (resultRelInfo->ri_NumIndices > 0) { - ExecInsertIndexTuples(elemTupleSlot, &(tuple->t_self), estate, false, - NULL, NIL); + ExecInsertIndexTuples(elemTupleSlot, estate, false, NULL, NIL); } return tuple; diff --git a/src/backend/nodes/cypher_outfuncs.c b/src/backend/nodes/cypher_outfuncs.c index 05c0f7f50..1d84bc289 100644 --- a/src/backend/nodes/cypher_outfuncs.c +++ b/src/backend/nodes/cypher_outfuncs.c @@ -303,7 +303,7 @@ void out_cypher_create_target_nodes(StringInfo str, const ExtensibleNode *node) WRITE_NODE_FIELD(paths); WRITE_INT32_FIELD(flags); - WRITE_OID_FIELD(graph_oid); + WRITE_INT32_FIELD(graph_oid); } // serialization function for the cypher_create_path ExtensibleNode. @@ -370,7 +370,7 @@ void out_cypher_delete_information(StringInfo str, const ExtensibleNode *node) WRITE_NODE_FIELD(delete_items); WRITE_INT32_FIELD(flags); WRITE_STRING_FIELD(graph_name); - WRITE_OID_FIELD(graph_oid); + WRITE_INT32_FIELD(graph_oid); WRITE_BOOL_FIELD(detach); } @@ -389,7 +389,7 @@ void out_cypher_merge_information(StringInfo str, const ExtensibleNode *node) DEFINE_AG_NODE(cypher_merge_information); WRITE_INT32_FIELD(flags); - WRITE_OID_FIELD(graph_oid); + WRITE_INT32_FIELD(graph_oid); WRITE_INT32_FIELD(merge_function_attr); WRITE_NODE_FIELD(path); } diff --git a/src/backend/nodes/cypher_readfuncs.c b/src/backend/nodes/cypher_readfuncs.c index 3d3249d9a..02e5afba5 100644 --- a/src/backend/nodes/cypher_readfuncs.c +++ b/src/backend/nodes/cypher_readfuncs.c @@ -29,11 +29,19 @@ * * Macros for declaring appropriate local variables. */ -// Declare the extensible node and local fields for the pg_strtok +/* A few guys need only local_node */ +#define READ_LOCALS_NO_FIELDS(nodeTypeName) \ + nodeTypeName *local_node = (nodeTypeName *) node + +/* And a few guys need only the pg_strtok support fields */ +#define READ_TEMP_LOCALS() \ + const char *token; \ + int length + +/* ... but most need both */ #define READ_LOCALS(nodeTypeName) \ - nodeTypeName *local_node = (nodeTypeName *)node; \ - char *token; \ - int length; + READ_LOCALS_NO_FIELDS(nodeTypeName); \ + READ_TEMP_LOCALS() /* * The READ_*_FIELD defines first skips the :fildname token (key) part of the string @@ -49,7 +57,7 @@ #define READ_INT_FIELD(fldname) \ token = pg_strtok(&length); \ token = pg_strtok(&length); \ - local_node->fldname = atoi(token) + local_node->fldname = strtol(token, 0, 10) // Read an unsigned integer field (anything written as ":fldname %u") #define READ_UINT_FIELD(fldname) \ @@ -85,7 +93,7 @@ #define READ_ENUM_FIELD(fldname, enumtype) \ token = pg_strtok(&length); \ token = pg_strtok(&length); \ - local_node->fldname = (enumtype) atoi(token) + local_node->fldname = (enumtype) strtol(token, 0, 10) // Read a float field #define READ_FLOAT_FIELD(fldname) \ @@ -179,7 +187,7 @@ void read_cypher_create_target_nodes(struct ExtensibleNode *node) READ_NODE_FIELD(paths); READ_INT_FIELD(flags); - READ_OID_FIELD(graph_oid); + READ_INT_FIELD(graph_oid); } /* @@ -261,7 +269,7 @@ void read_cypher_delete_information(struct ExtensibleNode *node) READ_NODE_FIELD(delete_items); READ_INT_FIELD(flags); READ_STRING_FIELD(graph_name); - READ_OID_FIELD(graph_oid); + READ_INT_FIELD(graph_oid); READ_BOOL_FIELD(detach); } @@ -286,7 +294,7 @@ void read_cypher_merge_information(struct ExtensibleNode *node) READ_LOCALS(cypher_merge_information); READ_INT_FIELD(flags); - READ_OID_FIELD(graph_oid); + READ_UINT_FIELD(graph_oid); READ_INT_FIELD(merge_function_attr); READ_NODE_FIELD(path); } diff --git a/src/backend/optimizer/cypher_createplan.c b/src/backend/optimizer/cypher_createplan.c index 9e0863423..c6480d154 100644 --- a/src/backend/optimizer/cypher_createplan.c +++ b/src/backend/optimizer/cypher_createplan.c @@ -19,13 +19,10 @@ #include "postgres.h" -#include "access/sysattr.h" -#include "catalog/pg_type_d.h" #include "nodes/extensible.h" #include "nodes/nodes.h" #include "nodes/pg_list.h" #include "nodes/plannodes.h" -#include "nodes/relation.h" #include "executor/cypher_executor.h" #include "optimizer/cypher_createplan.h" diff --git a/src/backend/optimizer/cypher_pathnode.c b/src/backend/optimizer/cypher_pathnode.c index 4e04b752c..cdd0b0635 100644 --- a/src/backend/optimizer/cypher_pathnode.c +++ b/src/backend/optimizer/cypher_pathnode.c @@ -22,7 +22,6 @@ #include "nodes/extensible.h" #include "nodes/nodes.h" #include "nodes/pg_list.h" -#include "nodes/relation.h" #include "optimizer/cypher_createplan.h" #include "optimizer/cypher_pathnode.h" diff --git a/src/backend/optimizer/cypher_paths.c b/src/backend/optimizer/cypher_paths.c index b1ac6938a..80d916127 100644 --- a/src/backend/optimizer/cypher_paths.c +++ b/src/backend/optimizer/cypher_paths.c @@ -19,11 +19,8 @@ #include "postgres.h" -#include "access/sysattr.h" -#include "catalog/pg_type_d.h" #include "nodes/parsenodes.h" #include "nodes/primnodes.h" -#include "nodes/relation.h" #include "optimizer/pathnode.h" #include "optimizer/paths.h" diff --git a/src/backend/parser/ag_scanner.l b/src/backend/parser/ag_scanner.l index 3bab06157..68b15a22c 100644 --- a/src/backend/parser/ag_scanner.l +++ b/src/backend/parser/ag_scanner.l @@ -316,6 +316,7 @@ static int _scan_errposition(const int location, const ag_yy_extra *extra); * and is the same with "ag_scanner_t". */ #define YY_DECL ag_token ag_scanner_next_token(yyscan_t yyscanner) +#define NDIGITS_PER_REMAINDER 9 %} %% @@ -902,7 +903,6 @@ static void _numstr_to_decimal(const char *numstr, const int base, strbuf *sb) */ const uint64 divisor = 1000000000; const int ndivisions = 3; - const int ndigits_per_remainder = 9; int ndigits; int nwords; @@ -1048,11 +1048,11 @@ static void _numstr_to_decimal(const char *numstr, const int base, strbuf *sb) // convert the collected remainders to a string, starting from the last one for (i = nremainders - 1; i >= 0; i--) { - char buf[ndigits_per_remainder]; + char buf[NDIGITS_PER_REMAINDER]; int buf_i; uint32 tmp; - buf_i = ndigits_per_remainder; + buf_i = NDIGITS_PER_REMAINDER; for (tmp = remainders[i]; tmp > 0; tmp /= 10) buf[--buf_i] = '0' + (char)(tmp % 10); @@ -1064,7 +1064,7 @@ static void _numstr_to_decimal(const char *numstr, const int base, strbuf *sb) buf[--buf_i] = '0'; } - strbuf_append_buf(sb, &buf[buf_i], ndigits_per_remainder - buf_i); + strbuf_append_buf(sb, &buf[buf_i], NDIGITS_PER_REMAINDER - buf_i); } pfree(remainders); diff --git a/src/backend/parser/cypher_analyze.c b/src/backend/parser/cypher_analyze.c index 15222ffa2..9ffb86342 100644 --- a/src/backend/parser/cypher_analyze.c +++ b/src/backend/parser/cypher_analyze.c @@ -32,7 +32,6 @@ #include "parser/parse_node.h" #include "parser/parse_relation.h" #include "parser/parse_target.h" -#include "parser/parsetree.h" #include "utils/builtins.h" #include "catalog/ag_graph.h" @@ -58,11 +57,11 @@ static const char *expr_get_const_cstring(Node *expr, const char *source_str); static int get_query_location(const int location, const char *source_str); static Query *analyze_cypher(List *stmt, ParseState *parent_pstate, const char *query_str, int query_loc, - char *graph_name, Oid graph_oid, Param *params); + char *graph_name, uint32 graph_oid, Param *params); static Query *analyze_cypher_and_coerce(List *stmt, RangeTblFunction *rtfunc, ParseState *parent_pstate, const char *query_str, int query_loc, - char *graph_name, Oid graph_oid, + char *graph_name, uint32 graph_oid, Param *params); void post_parse_analyze_init(void) @@ -175,7 +174,7 @@ static bool convert_cypher_walker(Node *node, ParseState *pstate) * QTW_IGNORE_JOINALIASES * We are not interested in this. */ - flags = QTW_EXAMINE_RTES | QTW_IGNORE_RT_SUBQUERIES | + flags = QTW_EXAMINE_RTES_BEFORE | QTW_IGNORE_RT_SUBQUERIES | QTW_IGNORE_JOINALIASES; /* clear the global variable extra_node */ @@ -271,7 +270,7 @@ static void convert_cypher_to_subquery(RangeTblEntry *rte, ParseState *pstate) FuncExpr *funcexpr = (FuncExpr *)rtfunc->funcexpr; Node *arg; Name graph_name; - Oid graph_oid; + uint32 graph_oid; const char *query_str; int query_loc; Param *params; @@ -485,7 +484,7 @@ static int get_query_location(const int location, const char *source_str) static Query *analyze_cypher(List *stmt, ParseState *parent_pstate, const char *query_str, int query_loc, - char *graph_name, Oid graph_oid, Param *params) + char *graph_name, uint32 graph_oid, Param *params) { cypher_clause *clause; ListCell *lc; @@ -564,7 +563,7 @@ static Query *analyze_cypher(List *stmt, ParseState *parent_pstate, static Query *analyze_cypher_and_coerce(List *stmt, RangeTblFunction *rtfunc, ParseState *parent_pstate, const char *query_str, int query_loc, - char *graph_name, Oid graph_oid, + char *graph_name, uint32 graph_oid, Param *params) { ParseState *pstate; diff --git a/src/backend/parser/cypher_clause.c b/src/backend/parser/cypher_clause.c index 2269d3f3d..e07d07a83 100644 --- a/src/backend/parser/cypher_clause.c +++ b/src/backend/parser/cypher_clause.c @@ -25,6 +25,7 @@ #include "postgres.h" #include "access/sysattr.h" +#include "access/heapam.h" #include "catalog/pg_type_d.h" #include "miscadmin.h" #include "nodes/makefuncs.h" @@ -33,7 +34,7 @@ #include "nodes/parsenodes.h" #include "nodes/pg_list.h" #include "nodes/primnodes.h" -#include "optimizer/var.h" +#include "optimizer/optimizer.h" #include "parser/parse_clause.h" #include "parser/parse_coerce.h" #include "parser/parse_collate.h" @@ -85,6 +86,14 @@ #define AGE_VARNAME_ID AGE_DEFAULT_VARNAME_PREFIX"id" #define AGE_VARNAME_SET_CLAUSE AGE_DEFAULT_VARNAME_PREFIX"set_clause" +/* + * In the transformation stage, we need to track + * where a variable came from. When moving between + * clauses, Postgres parsestate and Query data structures + * are insufficient for some of the information we + * need. + */ + /* * Rules to determine if a node must be included: * @@ -4352,7 +4361,7 @@ transform_create_cypher_edge(cypher_parsestate *cpstate, List **target_list, rel->relid = RelationGetRelid(label_relation); rte = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation, - NULL, false, false); + AccessShareLock, NULL, false, false); rte->requiredPerms = ACL_INSERT; // Build Id expression, always use the default logic @@ -4578,7 +4587,7 @@ transform_create_cypher_new_node(cypher_parsestate *cpstate, rel->relid = RelationGetRelid(label_relation); rte = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation, - NULL, false, false); + AccessShareLock, NULL, false, false); rte->requiredPerms = ACL_INSERT; // id @@ -5366,7 +5375,7 @@ transform_merge_cypher_edge(cypher_parsestate *cpstate, List **target_list, rel->relid = RelationGetRelid(label_relation); rte = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation, - NULL, false, false); + AccessShareLock, NULL, false, false); rte->requiredPerms = ACL_INSERT; // Build Id expression, always use the default logic @@ -5471,7 +5480,7 @@ transform_merge_cypher_node(cypher_parsestate *cpstate, List **target_list, rel->relid = RelationGetRelid(label_relation); rte = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation, - NULL, false, false); + AccessShareLock, NULL, false, false); rte->requiredPerms = ACL_INSERT; // id diff --git a/src/backend/parser/cypher_expr.c b/src/backend/parser/cypher_expr.c index cb437880b..f0627eba5 100644 --- a/src/backend/parser/cypher_expr.c +++ b/src/backend/parser/cypher_expr.c @@ -41,6 +41,7 @@ #include "parser/parse_oper.h" #include "parser/parse_relation.h" #include "utils/builtins.h" +#include "utils/float.h" #include "utils/int8.h" #include "utils/lsyscache.h" #include "utils/syscache.h" diff --git a/src/backend/parser/cypher_keywords.c b/src/backend/parser/cypher_keywords.c index 5ed120288..cd4082260 100644 --- a/src/backend/parser/cypher_keywords.c +++ b/src/backend/parser/cypher_keywords.c @@ -33,63 +33,27 @@ #include "funcapi.h" #include "parser/cypher_gram.h" +#include "parser/cypher_kwlist_d.h" /* * This list must be sorted by ASCII name, because binary search is used to * locate entries. */ -const ScanKeyword cypher_keywords[] = { - {"all", ALL, RESERVED_KEYWORD}, - {"analyze", ANALYZE, RESERVED_KEYWORD}, - {"and", AND, RESERVED_KEYWORD}, - {"as", AS, RESERVED_KEYWORD}, - {"asc", ASC, RESERVED_KEYWORD}, - {"ascending", ASCENDING, RESERVED_KEYWORD}, - {"by", BY, RESERVED_KEYWORD}, - {"call", CALL, RESERVED_KEYWORD}, - {"case", CASE, RESERVED_KEYWORD}, - {"coalesce", COALESCE, RESERVED_KEYWORD}, - {"contains", CONTAINS, RESERVED_KEYWORD}, - {"create", CREATE, RESERVED_KEYWORD}, - {"delete", DELETE, RESERVED_KEYWORD}, - {"desc", DESC, RESERVED_KEYWORD}, - {"descending", DESCENDING, RESERVED_KEYWORD}, - {"detach", DETACH, RESERVED_KEYWORD}, - {"distinct", DISTINCT, RESERVED_KEYWORD}, - {"else", ELSE, RESERVED_KEYWORD}, - {"end", END_P, RESERVED_KEYWORD}, - {"ends", ENDS, RESERVED_KEYWORD}, - {"exists", EXISTS, RESERVED_KEYWORD}, - {"explain", EXPLAIN, RESERVED_KEYWORD}, - {"false", FALSE_P, RESERVED_KEYWORD}, - {"in", IN, RESERVED_KEYWORD}, - {"is", IS, RESERVED_KEYWORD}, - {"limit", LIMIT, RESERVED_KEYWORD}, - {"match", MATCH, RESERVED_KEYWORD}, - {"merge", MERGE, RESERVED_KEYWORD}, - {"not", NOT, RESERVED_KEYWORD}, - {"null", NULL_P, RESERVED_KEYWORD}, - {"optional", OPTIONAL, RESERVED_KEYWORD}, - {"or", OR, RESERVED_KEYWORD}, - {"order", ORDER, RESERVED_KEYWORD}, - {"remove", REMOVE, RESERVED_KEYWORD}, - {"return", RETURN, RESERVED_KEYWORD}, - {"set", SET, RESERVED_KEYWORD}, - {"skip", SKIP, RESERVED_KEYWORD}, - {"starts", STARTS, RESERVED_KEYWORD}, - {"then", THEN, RESERVED_KEYWORD}, - {"true", TRUE_P, RESERVED_KEYWORD}, - {"union", UNION, RESERVED_KEYWORD}, - {"unwind", UNWIND, RESERVED_KEYWORD}, - {"verbose", VERBOSE, RESERVED_KEYWORD}, - {"when", WHEN, RESERVED_KEYWORD}, - {"where", WHERE, RESERVED_KEYWORD}, - {"with", WITH, RESERVED_KEYWORD}, - {"xor", XOR, RESERVED_KEYWORD}, - {"yield", YIELD, RESERVED_KEYWORD} +#define PG_KEYWORD(kwname, value, category) value, + +const uint16 CypherKeywordTokens[] = { +#include "parser/cypher_kwlist.h" +}; + +#undef PG_KEYWORD + +#define PG_KEYWORD(kwname, value, category) category, + +const uint16 CypherKeywordCategories[] = { +#include "parser/cypher_kwlist.h" }; -const int num_cypher_keywords = lengthof(cypher_keywords); +#undef PG_KEYWORD PG_FUNCTION_INFO_V1(get_cypher_keywords); @@ -106,7 +70,7 @@ Datum get_cypher_keywords(PG_FUNCTION_ARGS) func_ctx = SRF_FIRSTCALL_INIT(); old_mem_ctx = MemoryContextSwitchTo(func_ctx->multi_call_memory_ctx); - tup_desc = CreateTemplateTupleDesc(3, false); + tup_desc = CreateTemplateTupleDesc(3); TupleDescInitEntry(tup_desc, (AttrNumber)1, "word", TEXTOID, -1, 0); TupleDescInitEntry(tup_desc, (AttrNumber)2, "catcode", CHAROID, -1, 0); TupleDescInitEntry(tup_desc, (AttrNumber)3, "catdesc", TEXTOID, -1, 0); @@ -118,15 +82,16 @@ Datum get_cypher_keywords(PG_FUNCTION_ARGS) func_ctx = SRF_PERCALL_SETUP(); - if (func_ctx->call_cntr < num_cypher_keywords) + if (func_ctx->call_cntr < CypherKeyword.num_keywords) { char *values[3]; HeapTuple tuple; // cast-away-const is ugly but alternatives aren't much better - values[0] = (char *)cypher_keywords[func_ctx->call_cntr].name; + values[0] = (char *) GetScanKeyword((int) func_ctx->call_cntr, + &CypherKeyword); - switch (cypher_keywords[func_ctx->call_cntr].category) + switch (CypherKeywordCategories[func_ctx->call_cntr]) { case UNRESERVED_KEYWORD: values[1] = "U"; diff --git a/src/backend/parser/cypher_parse_agg.c b/src/backend/parser/cypher_parse_agg.c index b5654e778..8fdb71d3e 100644 --- a/src/backend/parser/cypher_parse_agg.c +++ b/src/backend/parser/cypher_parse_agg.c @@ -27,7 +27,7 @@ #include "catalog/pg_constraint.h" #include "nodes/nodeFuncs.h" #include "optimizer/tlist.h" -#include "optimizer/var.h" +#include "optimizer/optimizer.h" #include "parser/cypher_parse_agg.h" #include "parser/parsetree.h" #include "rewrite/rewriteManip.h" @@ -192,7 +192,7 @@ void parse_check_aggregates(ParseState *pstate, Query *qry) root->planner_cxt = CurrentMemoryContext; root->hasJoinRTEs = true; - groupClauses = (List *) flatten_join_alias_vars(root, + groupClauses = (List *) flatten_join_alias_vars((Query*)root, (Node *) groupClauses); } @@ -236,7 +236,7 @@ void parse_check_aggregates(ParseState *pstate, Query *qry) finalize_grouping_exprs(clause, pstate, qry, groupClauses, root, have_non_var_grouping); if (hasJoinRTEs) - clause = flatten_join_alias_vars(root, clause); + clause = flatten_join_alias_vars((Query*)root, clause); check_ungrouped_columns(clause, pstate, qry, groupClauses, groupClauseCommonVars, have_non_var_grouping, &func_grouped_rels); @@ -245,7 +245,7 @@ void parse_check_aggregates(ParseState *pstate, Query *qry) finalize_grouping_exprs(clause, pstate, qry, groupClauses, root, have_non_var_grouping); if (hasJoinRTEs) - clause = flatten_join_alias_vars(root, clause); + clause = flatten_join_alias_vars((Query*)root, clause); check_ungrouped_columns(clause, pstate, qry, groupClauses, groupClauseCommonVars, have_non_var_grouping, &func_grouped_rels); @@ -562,7 +562,7 @@ static bool finalize_grouping_exprs_walker(Node *node, Index ref = 0; if (context->root) - expr = flatten_join_alias_vars(context->root, expr); + expr = flatten_join_alias_vars((Query*)context->root, expr); /* * Each expression must match a grouping entry at the current diff --git a/src/backend/parser/cypher_parser.c b/src/backend/parser/cypher_parser.c index 8dd53df26..e12c7efdc 100644 --- a/src/backend/parser/cypher_parser.c +++ b/src/backend/parser/cypher_parser.c @@ -19,7 +19,6 @@ #include "postgres.h" -#include "common/keywords.h" #include "nodes/pg_list.h" #include "parser/scansup.h" @@ -67,20 +66,19 @@ int cypher_yylex(YYSTYPE *lvalp, YYLTYPE *llocp, ag_scanner_t scanner) break; case AG_TOKEN_IDENTIFIER: { - const ScanKeyword *keyword; + int kwnum; char *ident; - keyword = ScanKeywordLookup(token.value.s, cypher_keywords, - num_cypher_keywords); - if (keyword) + kwnum = ScanKeywordLookup(token.value.s, &CypherKeyword); + if (kwnum >= 0) { /* * use token.value.s instead of keyword->name to preserve * case sensitivity */ - lvalp->keyword = token.value.s; + lvalp->keyword = GetScanKeyword(kwnum, &CypherKeyword); *llocp = token.location; - return keyword->value; + return CypherKeywordTokens[kwnum]; } ident = pstrdup(token.value.s); diff --git a/src/backend/utils/adt/ag_float8_supp.c b/src/backend/utils/adt/ag_float8_supp.c index 286f074ef..450fdc07a 100644 --- a/src/backend/utils/adt/ag_float8_supp.c +++ b/src/backend/utils/adt/ag_float8_supp.c @@ -27,6 +27,7 @@ #include +#include "utils/float.h" #include "utils/builtins.h" #include "utils/ag_float8_supp.h" diff --git a/src/backend/utils/adt/age_global_graph.c b/src/backend/utils/adt/age_global_graph.c index 3da11b44c..b980bd642 100644 --- a/src/backend/utils/adt/age_global_graph.c +++ b/src/backend/utils/adt/age_global_graph.c @@ -19,14 +19,23 @@ #include "postgres.h" +#include "access/heapam.h" +#include "access/relscan.h" +#include "access/skey.h" +#include "access/table.h" +#include "access/tableam.h" #include "catalog/namespace.h" +#include "commands/label_commands.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/rel.h" #include "utils/snapmgr.h" #include "commands/label_commands.h" +#include "catalog/ag_graph.h" +#include "catalog/ag_label.h" #include "utils/age_global_graph.h" +#include "utils/age_graphid_ds.h" #include "utils/agtype.h" #include "catalog/ag_graph.h" #include "catalog/ag_label.h" @@ -189,7 +198,7 @@ static List *get_ag_labels_names(Snapshot snapshot, Oid graph_oid, List *labels = NIL; ScanKeyData scan_keys[2]; Relation ag_label; - HeapScanDesc scan_desc; + TableScanDesc scan_desc; HeapTuple tuple; TupleDesc tupdesc; @@ -203,8 +212,8 @@ static List *get_ag_labels_names(Snapshot snapshot, Oid graph_oid, F_CHAREQ, CharGetDatum(label_type)); /* setup the table to be scanned, ag_label in this case */ - ag_label = heap_open(ag_label_relation_id(), ShareLock); - scan_desc = heap_beginscan(ag_label, snapshot, 2, scan_keys); + ag_label = table_open(ag_label_relation_id(), ShareLock); + scan_desc = table_beginscan(ag_label, snapshot, 2, scan_keys); /* get the tupdesc - we don't need to release this one */ tupdesc = RelationGetDescr(ag_label); @@ -228,8 +237,8 @@ static List *get_ag_labels_names(Snapshot snapshot, Oid graph_oid, } /* close up scan */ - heap_endscan(scan_desc); - heap_close(ag_label, ShareLock); + table_endscan(scan_desc); + table_close(ag_label, ShareLock); return labels; } @@ -399,7 +408,7 @@ static void load_vertex_hashtable(GRAPH_global_context *ggctx) foreach (lc, vertex_label_names) { Relation graph_vertex_label; - HeapScanDesc scan_desc; + TableScanDesc scan_desc; HeapTuple tuple; char *vertex_label_name; Oid vertex_label_table_oid; @@ -411,8 +420,8 @@ static void load_vertex_hashtable(GRAPH_global_context *ggctx) vertex_label_table_oid = get_relname_relid(vertex_label_name, graph_namespace_oid); /* open the relation (table) and begin the scan */ - graph_vertex_label = heap_open(vertex_label_table_oid, ShareLock); - scan_desc = heap_beginscan(graph_vertex_label, snapshot, 0, NULL); + graph_vertex_label = table_open(vertex_label_table_oid, ShareLock); + scan_desc = table_beginscan(graph_vertex_label, snapshot, 0, NULL); /* get the tupdesc - we don't need to release this one */ tupdesc = RelationGetDescr(graph_vertex_label); /* bail if the number of columns differs */ @@ -452,8 +461,8 @@ static void load_vertex_hashtable(GRAPH_global_context *ggctx) } /* end the scan and close the relation */ - heap_endscan(scan_desc); - heap_close(graph_vertex_label, ShareLock); + table_endscan(scan_desc); + table_close(graph_vertex_label, ShareLock); } } @@ -498,7 +507,7 @@ static void load_edge_hashtable(GRAPH_global_context *ggctx) foreach (lc, edge_label_names) { Relation graph_edge_label; - HeapScanDesc scan_desc; + TableScanDesc scan_desc; HeapTuple tuple; char *edge_label_name; Oid edge_label_table_oid; @@ -510,8 +519,8 @@ static void load_edge_hashtable(GRAPH_global_context *ggctx) edge_label_table_oid = get_relname_relid(edge_label_name, graph_namespace_oid); /* open the relation (table) and begin the scan */ - graph_edge_label = heap_open(edge_label_table_oid, ShareLock); - scan_desc = heap_beginscan(graph_edge_label, snapshot, 0, NULL); + graph_edge_label = table_open(edge_label_table_oid, ShareLock); + scan_desc = table_beginscan(graph_edge_label, snapshot, 0, NULL); /* get the tupdesc - we don't need to release this one */ tupdesc = RelationGetDescr(graph_edge_label); /* bail if the number of columns differs */ @@ -573,8 +582,8 @@ static void load_edge_hashtable(GRAPH_global_context *ggctx) } /* end the scan and close the relation */ - heap_endscan(scan_desc); - heap_close(graph_edge_label, ShareLock); + table_endscan(scan_desc); + table_close(graph_edge_label, ShareLock); } } diff --git a/src/backend/utils/adt/age_vle.c b/src/backend/utils/adt/age_vle.c index 35d7e71d7..e3e6385d5 100644 --- a/src/backend/utils/adt/age_vle.c +++ b/src/backend/utils/adt/age_vle.c @@ -19,6 +19,8 @@ #include "postgres.h" +#include "access/heapam.h" +#include "catalog/namespace.h" #include "catalog/pg_type.h" #include "funcapi.h" #include "utils/lsyscache.h" diff --git a/src/backend/utils/adt/agtype.c b/src/backend/utils/adt/agtype.c index 908f38870..1c5ba86c6 100644 --- a/src/backend/utils/adt/agtype.c +++ b/src/backend/utils/adt/agtype.c @@ -32,8 +32,15 @@ #include +#include "access/genam.h" +#include "access/heapam.h" +#include "access/skey.h" +#include "access/table.h" +#include "access/tableam.h" #include "access/htup_details.h" #include "catalog/namespace.h" +#include "catalog/pg_collation.h" +#include "catalog/pg_operator.h" #include "catalog/pg_type.h" #include "catalog/pg_aggregate_d.h" #include "catalog/pg_collation_d.h" @@ -45,6 +52,7 @@ #include "parser/parse_coerce.h" #include "nodes/pg_list.h" #include "utils/builtins.h" +#include "utils/float.h" #include "utils/fmgroids.h" #include "utils/int8.h" #include "utils/lsyscache.h" @@ -151,7 +159,7 @@ static bool is_array_path(agtype_value *agtv); /* graph entity retrieval */ static Datum get_vertex(const char *graph, const char *vertex_label, int64 graphid); -static char *get_label_name(const char *graph_name, int64 graph_id); +static char *get_label_name(const char *graph_name, int64 label_id); static float8 get_float_compatible_arg(Datum arg, Oid type, char *funcname, bool *is_null); static Numeric get_numeric_compatible_arg(Datum arg, Oid type, char *funcname, @@ -185,8 +193,9 @@ Oid get_AGTYPEOID(void) { if (g_AGTYPEOID == InvalidOid) { - g_AGTYPEOID = GetSysCacheOid2(TYPENAMENSP, CStringGetDatum("agtype"), - ObjectIdGetDatum(ag_catalog_namespace_id())); + g_AGTYPEOID = GetSysCacheOid2(TYPENAMENSP, Anum_pg_type_oid, + CStringGetDatum("agtype"), + ObjectIdGetDatum(ag_catalog_namespace_id())); } return g_AGTYPEOID; @@ -197,7 +206,7 @@ Oid get_AGTYPEARRAYOID(void) { if (g_AGTYPEARRAYOID == InvalidOid) { - g_AGTYPEARRAYOID = GetSysCacheOid2(TYPENAMENSP, + g_AGTYPEARRAYOID = GetSysCacheOid2(TYPENAMENSP,Anum_pg_type_oid, CStringGetDatum("_agtype"), ObjectIdGetDatum(ag_catalog_namespace_id())); } @@ -2144,7 +2153,7 @@ Datum _agtype_build_vertex(PG_FUNCTION_ARGS) result.res = push_agtype_value(&result.parse_state, WAGT_KEY, string_to_agtype_value("id")); - if (fcinfo->argnull[0]) + if (fcinfo->args[0].isnull) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("_agtype_build_vertex() graphid cannot be NULL"))); @@ -2157,7 +2166,7 @@ Datum _agtype_build_vertex(PG_FUNCTION_ARGS) result.res = push_agtype_value(&result.parse_state, WAGT_KEY, string_to_agtype_value("label")); - if (fcinfo->argnull[1]) + if (fcinfo->args[1].isnull) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("_agtype_build_vertex() label cannot be NULL"))); @@ -2170,7 +2179,7 @@ Datum _agtype_build_vertex(PG_FUNCTION_ARGS) string_to_agtype_value("properties")); //if the properties object is null, push an empty object - if (fcinfo->argnull[2]) + if (fcinfo->args[2].isnull) { result.res = push_agtype_value(&result.parse_state, WAGT_BEGIN_OBJECT, NULL); @@ -2226,7 +2235,7 @@ Datum _agtype_build_edge(PG_FUNCTION_ARGS) result.res = push_agtype_value(&result.parse_state, WAGT_KEY, string_to_agtype_value("id")); - if (fcinfo->argnull[0]) + if (fcinfo->args[0].isnull) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("_agtype_build_edge() graphid cannot be NULL"))); @@ -2239,7 +2248,7 @@ Datum _agtype_build_edge(PG_FUNCTION_ARGS) result.res = push_agtype_value(&result.parse_state, WAGT_KEY, string_to_agtype_value("label")); - if (fcinfo->argnull[3]) + if (fcinfo->args[3].isnull) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("_agtype_build_vertex() label cannot be NULL"))); @@ -2251,7 +2260,7 @@ Datum _agtype_build_edge(PG_FUNCTION_ARGS) result.res = push_agtype_value(&result.parse_state, WAGT_KEY, string_to_agtype_value("end_id")); - if (fcinfo->argnull[2]) + if (fcinfo->args[2].isnull) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("_agtype_build_edge() endid cannot be NULL"))); @@ -2264,7 +2273,7 @@ Datum _agtype_build_edge(PG_FUNCTION_ARGS) result.res = push_agtype_value(&result.parse_state, WAGT_KEY, string_to_agtype_value("start_id")); - if (fcinfo->argnull[1]) + if (fcinfo->args[1].isnull) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("_agtype_build_edge() startid cannot be NULL"))); @@ -2278,7 +2287,7 @@ Datum _agtype_build_edge(PG_FUNCTION_ARGS) string_to_agtype_value("properties")); /* if the properties object is null, push an empty object */ - if (fcinfo->argnull[4]) + if (fcinfo->args[4].isnull) { result.res = push_agtype_value(&result.parse_state, WAGT_BEGIN_OBJECT, NULL); @@ -4490,7 +4499,7 @@ Datum column_get_datum(TupleDesc tupdesc, HeapTuple tuple, int column, * function returns a pointer to a duplicated string that needs to be freed * when you are finished using it. */ -static char *get_label_name(const char *graph_name, int64 graphid) +static char *get_label_name(const char *graph_name, int64 label_id) { ScanKeyData scan_keys[2]; Relation ag_label; @@ -4498,48 +4507,47 @@ static char *get_label_name(const char *graph_name, int64 graphid) HeapTuple tuple; TupleDesc tupdesc; char *result = NULL; + bool column_is_null; - Oid graphoid = get_graph_oid(graph_name); + Oid graph_id = get_graph_oid(graph_name); /* scankey for first match in ag_label, column 2, graphoid, BTEQ, OidEQ */ ScanKeyInit(&scan_keys[0], Anum_ag_label_graph, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(graphoid)); + F_OIDEQ, ObjectIdGetDatum(graph_id)); /* scankey for second match in ag_label, column 3, label id, BTEQ, Int4EQ */ ScanKeyInit(&scan_keys[1], Anum_ag_label_id, BTEqualStrategyNumber, - F_INT4EQ, Int32GetDatum(get_graphid_label_id(graphid))); + F_INT42EQ, Int32GetDatum(get_graphid_label_id(label_id))); - ag_label = heap_open(ag_relation_id("ag_label", "table"), ShareLock); - scan_desc = systable_beginscan(ag_label, - ag_relation_id("ag_label_graph_id_index", - "index"), true, NULL, 2, - scan_keys); + ag_label = table_open(ag_label_relation_id(), ShareLock); + scan_desc = systable_beginscan(ag_label, ag_label_graph_oid_index_id(), true, + NULL, 2, scan_keys); tuple = systable_getnext(scan_desc); if (!HeapTupleIsValid(tuple)) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_SCHEMA), - errmsg("graphid %lu does not exist", graphid))); + errmsg("graphid abc %lu does not exist", label_id))); } /* get the tupdesc - we don't need to release this one */ tupdesc = RelationGetDescr(ag_label); /* bail if the number of columns differs */ - if (tupdesc->natts != 5) + if (tupdesc->natts != Natts_ag_label) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("Invalid number of attributes for ag_catalog.ag_label"))); /* get the label name */ - result = NameStr(*DatumGetName(column_get_datum(tupdesc, tuple, 0, "name", - NAMEOID, true))); + result = NameStr(*DatumGetName( + heap_getattr(tuple, Anum_ag_label_name, tupdesc, &column_is_null))); /* duplicate it */ result = strdup(result); /* end the scan and close the relation */ systable_endscan(scan_desc); - heap_close(ag_label, ShareLock); + table_close(ag_label, ShareLock); return result; } @@ -4549,7 +4557,7 @@ static Datum get_vertex(const char *graph, const char *vertex_label, { ScanKeyData scan_keys[1]; Relation graph_vertex_label; - HeapScanDesc scan_desc; + TableScanDesc scan_desc; HeapTuple tuple; TupleDesc tupdesc; Datum id, properties, result; @@ -4567,8 +4575,8 @@ static Datum get_vertex(const char *graph, const char *vertex_label, Int64GetDatum(graphid)); /* open the relation (table), begin the scan, and get the tuple */ - graph_vertex_label = heap_open(vertex_label_table_oid, ShareLock); - scan_desc = heap_beginscan(graph_vertex_label, snapshot, 1, scan_keys); + graph_vertex_label = table_open(vertex_label_table_oid, ShareLock); + scan_desc = table_beginscan(graph_vertex_label, snapshot, 1, scan_keys); tuple = heap_getnext(scan_desc, ForwardScanDirection); /* bail if the tuple isn't valid */ @@ -4576,7 +4584,7 @@ static Datum get_vertex(const char *graph, const char *vertex_label, { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), - errmsg("graphid %lu does not exist", graphid))); + errmsg("graphid cde %lu does not exist", graphid))); } /* get the tupdesc - we don't need to release this one */ @@ -4597,8 +4605,8 @@ static Datum get_vertex(const char *graph, const char *vertex_label, result = DirectFunctionCall3(_agtype_build_vertex, id, CStringGetDatum(vertex_label), properties); /* end the scan and close the relation */ - heap_endscan(scan_desc); - heap_close(graph_vertex_label, ShareLock); + table_endscan(scan_desc); + table_close(graph_vertex_label, ShareLock); /* return the vertex datum */ return result; } @@ -4612,7 +4620,7 @@ Datum age_startnode(PG_FUNCTION_ARGS) agtype_value *agtv_value = NULL; char *graph_name = NULL; char *label_name = NULL; - graphid graph_id; + graphid graph_oid; Datum result; /* we need the graph name */ @@ -4654,14 +4662,14 @@ Datum age_startnode(PG_FUNCTION_ARGS) /* it must not be null and must be an integer */ Assert(agtv_value != NULL); Assert(agtv_value->type = AGTV_INTEGER); - graph_id = agtv_value->val.int_value; + graph_oid = agtv_value->val.int_value; /* get the label */ - label_name = get_label_name(graph_name, graph_id); + label_name = get_label_name(graph_name, graph_oid); /* it must not be null and must be a string */ Assert(label_name != NULL); - result = get_vertex(graph_name, label_name, graph_id); + result = get_vertex(graph_name, label_name, graph_oid); free(label_name); @@ -4677,7 +4685,7 @@ Datum age_endnode(PG_FUNCTION_ARGS) agtype_value *agtv_value = NULL; char *graph_name = NULL; char *label_name = NULL; - graphid graph_id; + graphid graph_oid; Datum result; /* we need the graph name */ @@ -4719,14 +4727,14 @@ Datum age_endnode(PG_FUNCTION_ARGS) /* it must not be null and must be an integer */ Assert(agtv_value != NULL); Assert(agtv_value->type = AGTV_INTEGER); - graph_id = agtv_value->val.int_value; + graph_oid = agtv_value->val.int_value; /* get the label */ - label_name = get_label_name(graph_name, graph_id); + label_name = get_label_name(graph_name, graph_oid); /* it must not be null and must be a string */ Assert(label_name != NULL); - result = get_vertex(graph_name, label_name, graph_id); + result = get_vertex(graph_name, label_name, graph_oid); free(label_name); @@ -6931,10 +6939,9 @@ Datum age_replace(PG_FUNCTION_ARGS) * We need the strings as a text strings so that we can let PG deal with * multibyte characters in the string. */ - text_result = DatumGetTextPP(DirectFunctionCall3(replace_text, - PointerGetDatum(text_string), - PointerGetDatum(text_search), - PointerGetDatum(text_replace))); + text_result = DatumGetTextPP(DirectFunctionCall3Coll( + replace_text, C_COLLATION_OID, PointerGetDatum(text_string), + PointerGetDatum(text_search), PointerGetDatum(text_replace))); /* convert it back to a cstring */ string = text_to_cstring(text_result); diff --git a/src/backend/utils/adt/agtype_gin.c b/src/backend/utils/adt/agtype_gin.c index 669935c21..9a9adc9b9 100644 --- a/src/backend/utils/adt/agtype_gin.c +++ b/src/backend/utils/adt/agtype_gin.c @@ -33,6 +33,7 @@ #include "access/stratnum.h" #include "catalog/pg_collation.h" #include "catalog/pg_type.h" +#include "utils/float.h" #include "utils/builtins.h" #include "utils/varlena.h" diff --git a/src/backend/utils/adt/graphid.c b/src/backend/utils/adt/graphid.c index bd65b957e..89bd8e528 100644 --- a/src/backend/utils/adt/graphid.c +++ b/src/backend/utils/adt/graphid.c @@ -36,8 +36,9 @@ Oid get_GRAPHIDOID(void) { if (g_GRAPHIDOID == InvalidOid) { - g_GRAPHIDOID = GetSysCacheOid2(TYPENAMENSP, CStringGetDatum("graphid"), - ObjectIdGetDatum(ag_catalog_namespace_id())); + g_GRAPHIDOID = GetSysCacheOid2(TYPENAMENSP, Anum_pg_type_oid, + CStringGetDatum("graphid"), + ObjectIdGetDatum(ag_catalog_namespace_id())); } return g_GRAPHIDOID; @@ -48,7 +49,7 @@ Oid get_GRAPHIDARRAYOID(void) { if (g_GRAPHIDARRAYOID == InvalidOid) { - g_GRAPHIDARRAYOID = GetSysCacheOid2(TYPENAMENSP, + g_GRAPHIDARRAYOID = GetSysCacheOid2(TYPENAMENSP, Anum_pg_type_oid, CStringGetDatum("_graphid"), ObjectIdGetDatum(ag_catalog_namespace_id())); } diff --git a/src/backend/utils/ag_func.c b/src/backend/utils/ag_func.c index cbd2167e2..35a03464b 100644 --- a/src/backend/utils/ag_func.c +++ b/src/backend/utils/ag_func.c @@ -27,7 +27,6 @@ #include "access/htup.h" #include "access/htup_details.h" #include "catalog/pg_proc.h" -#include "fmgr.h" #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/syscache.h" @@ -54,6 +53,7 @@ bool is_oid_ag_func(Oid func_oid, const char *func_name) ReleaseSysCache(proctup); return false; } + nspid = proc->pronamespace; ReleaseSysCache(proctup); @@ -81,7 +81,8 @@ Oid get_ag_func_oid(const char *func_name, const int nargs, ...) arg_types = buildoidvector(oids, nargs); - func_oid = GetSysCacheOid3(PROCNAMEARGSNSP, CStringGetDatum(func_name), + func_oid = GetSysCacheOid3(PROCNAMEARGSNSP, Anum_pg_proc_oid, + CStringGetDatum(func_name), PointerGetDatum(arg_types), ObjectIdGetDatum(ag_catalog_namespace_id())); if (!OidIsValid(func_oid)) @@ -111,7 +112,8 @@ Oid get_pg_func_oid(const char *func_name, const int nargs, ...) arg_types = buildoidvector(oids, nargs); - func_oid = GetSysCacheOid3(PROCNAMEARGSNSP, CStringGetDatum(func_name), + func_oid = GetSysCacheOid3(PROCNAMEARGSNSP, Anum_pg_proc_oid, + CStringGetDatum(func_name), PointerGetDatum(arg_types), ObjectIdGetDatum(pg_catalog_namespace_id())); if (!OidIsValid(func_oid)) diff --git a/src/backend/utils/cache/ag_cache.c b/src/backend/utils/cache/ag_cache.c index ec34355c6..09a5f51f4 100644 --- a/src/backend/utils/cache/ag_cache.c +++ b/src/backend/utils/cache/ag_cache.c @@ -26,8 +26,8 @@ #include "access/htup_details.h" #include "access/skey.h" #include "access/stratnum.h" -#include "access/sysattr.h" #include "access/tupdesc.h" +#include "catalog/pg_collation.h" #include "fmgr.h" #include "storage/lockdefs.h" #include "utils/builtins.h" @@ -69,17 +69,17 @@ typedef struct label_name_graph_cache_entry label_cache_data data; } label_name_graph_cache_entry; -typedef struct label_graph_id_cache_key +typedef struct label_graph_oid_cache_key { Oid graph; int32 id; -} label_graph_id_cache_key; +} label_graph_oid_cache_key; -typedef struct label_graph_id_cache_entry +typedef struct label_graph_oid_cache_entry { - label_graph_id_cache_key key; // hash key + label_graph_oid_cache_key key; // hash key label_cache_data data; -} label_graph_id_cache_entry; +} label_graph_oid_cache_entry; typedef struct label_relation_cache_entry { @@ -95,17 +95,13 @@ static ScanKeyData graph_name_scan_keys[1]; static HTAB *graph_namespace_cache_hash = NULL; static ScanKeyData graph_namespace_scan_keys[1]; -// ag_label.oid -static HTAB *label_oid_cache_hash = NULL; -static ScanKeyData label_oid_scan_keys[1]; - // ag_label.name, ag_label.graph static HTAB *label_name_graph_cache_hash = NULL; static ScanKeyData label_name_graph_scan_keys[2]; // ag_label.graph, ag_label.id -static HTAB *label_graph_id_cache_hash = NULL; -static ScanKeyData label_graph_id_scan_keys[2]; +static HTAB *label_graph_oid_cache_hash = NULL; +static ScanKeyData label_graph_oid_scan_keys[2]; // ag_label.relation static HTAB *label_relation_cache_hash = NULL; @@ -136,27 +132,23 @@ static void fill_graph_cache_data(graph_cache_data *cache_data, // ag_label static void initialize_label_caches(void); static void create_label_caches(void); -static void create_label_oid_cache(void); static void create_label_name_graph_cache(void); -static void create_label_graph_id_cache(void); +static void create_label_graph_oid_cache(void); static void create_label_relation_cache(void); static void invalidate_label_caches(Datum arg, Oid relid); -static void invalidate_label_oid_cache(Oid relid); -static void flush_label_oid_cache(void); static void invalidate_label_name_graph_cache(Oid relid); static void flush_label_name_graph_cache(void); -static void invalidate_label_graph_id_cache(Oid relid); -static void flush_label_graph_id_cache(void); +static void invalidate_label_graph_oid_cache(Oid relid); +static void flush_label_graph_oid_cache(void); static void invalidate_label_relation_cache(Oid relid); static void flush_label_relation_cache(void); -static label_cache_data *search_label_oid_cache_miss(Oid oid); static label_cache_data *search_label_name_graph_cache_miss(Name name, Oid graph); static void *label_name_graph_cache_hash_search(Name name, Oid graph, - HASHACTION action, - bool *found); -static label_cache_data *search_label_graph_id_cache_miss(Oid graph, int32 id); -static void *label_graph_id_cache_hash_search(Oid graph, int32 id, + HASHACTION action, bool *found); +static label_cache_data *search_label_graph_oid_cache_miss(Oid graph, + uint32 id); +static void *label_graph_oid_cache_hash_search(uint32 graph, int32 id, HASHACTION action, bool *found); static label_cache_data *search_label_relation_cache_miss(Oid relation); static void fill_label_cache_data(label_cache_data *cache_data, @@ -185,7 +177,7 @@ static void ag_cache_scan_key_init(ScanKey entry, AttrNumber attno, entry->sk_attno = attno; entry->sk_strategy = BTEqualStrategyNumber; entry->sk_subtype = InvalidOid; - entry->sk_collation = InvalidOid; + entry->sk_collation = C_COLLATION_OID; fmgr_info_cxt(func, &entry->sk_func, CacheMemoryContext); entry->sk_argument = (Datum)0; } @@ -353,11 +345,11 @@ static graph_cache_data *search_graph_name_cache_miss(Name name) scan_keys[0].sk_argument = NameGetDatum(name); /* - * Calling heap_open() might call AcceptInvalidationMessage() and that + * Calling table_open() might call AcceptInvalidationMessage() and that * might flush the graph caches. This is OK because this function is called * when the desired entry is not in the cache. */ - ag_graph = heap_open(ag_graph_relation_id(), AccessShareLock); + ag_graph = table_open(ag_graph_relation_id(), AccessShareLock); scan_desc = systable_beginscan(ag_graph, ag_graph_name_index_id(), true, NULL, 1, scan_keys); @@ -366,7 +358,7 @@ static graph_cache_data *search_graph_name_cache_miss(Name name) if (!HeapTupleIsValid(tuple)) { systable_endscan(scan_desc); - heap_close(ag_graph, AccessShareLock); + table_close(ag_graph, AccessShareLock); return NULL; } @@ -379,7 +371,7 @@ static graph_cache_data *search_graph_name_cache_miss(Name name) fill_graph_cache_data(&entry->data, tuple, RelationGetDescr(ag_graph)); systable_endscan(scan_desc); - heap_close(ag_graph, AccessShareLock); + table_close(ag_graph, AccessShareLock); return &entry->data; } @@ -412,11 +404,11 @@ static graph_cache_data *search_graph_namespace_cache_miss(Oid namespace) scan_keys[0].sk_argument = ObjectIdGetDatum(namespace); /* - * Calling heap_open() might call AcceptInvalidationMessage() and that + * Calling table_open() might call AcceptInvalidationMessage() and that * might flush the graph caches. This is OK because this function is called * when the desired entry is not in the cache. */ - ag_graph = heap_open(ag_graph_relation_id(), AccessShareLock); + ag_graph = table_open(ag_graph_relation_id(), AccessShareLock); scan_desc = systable_beginscan(ag_graph, ag_graph_namespace_index_id(), true, NULL, 1, scan_keys); @@ -426,7 +418,7 @@ static graph_cache_data *search_graph_namespace_cache_miss(Oid namespace) if (!HeapTupleIsValid(tuple)) { systable_endscan(scan_desc); - heap_close(ag_graph, AccessShareLock); + table_close(ag_graph, AccessShareLock); return NULL; } @@ -440,7 +432,7 @@ static graph_cache_data *search_graph_namespace_cache_miss(Oid namespace) fill_graph_cache_data(&entry->data, tuple, RelationGetDescr(ag_graph)); systable_endscan(scan_desc); - heap_close(ag_graph, AccessShareLock); + table_close(ag_graph, AccessShareLock); return &entry->data; } @@ -451,8 +443,8 @@ static void fill_graph_cache_data(graph_cache_data *cache_data, bool is_null; Datum value; - // ag_graph.oid - value = heap_getattr(tuple, ObjectIdAttributeNumber, tuple_desc, &is_null); + // ag_graph.id + value = heap_getattr(tuple, Anum_ag_graph_oid, tuple_desc, &is_null); Assert(!is_null); cache_data->oid = DatumGetObjectId(value); // ag_graph.name @@ -467,20 +459,16 @@ static void fill_graph_cache_data(graph_cache_data *cache_data, static void initialize_label_caches(void) { - // ag_label.oid - ag_cache_scan_key_init(&label_oid_scan_keys[0], ObjectIdAttributeNumber, - F_OIDEQ); - // ag_label.name, ag_label.graph ag_cache_scan_key_init(&label_name_graph_scan_keys[0], Anum_ag_label_name, F_NAMEEQ); ag_cache_scan_key_init(&label_name_graph_scan_keys[1], Anum_ag_label_graph, - F_OIDEQ); + F_INT4EQ); // ag_label.graph, ag_label.id - ag_cache_scan_key_init(&label_graph_id_scan_keys[0], Anum_ag_label_graph, - F_OIDEQ); - ag_cache_scan_key_init(&label_graph_id_scan_keys[1], Anum_ag_label_id, + ag_cache_scan_key_init(&label_graph_oid_scan_keys[0], Anum_ag_label_graph, + F_INT4EQ); + ag_cache_scan_key_init(&label_graph_oid_scan_keys[1], Anum_ag_label_id, F_INT4EQ); // ag_label.relation @@ -502,32 +490,11 @@ static void create_label_caches(void) * All the hash tables are created using their dedicated memory contexts * which are under TopMemoryContext. */ - create_label_oid_cache(); create_label_name_graph_cache(); - create_label_graph_id_cache(); + create_label_graph_oid_cache(); create_label_relation_cache(); } -static void create_label_oid_cache(void) -{ - HASHCTL hash_ctl; - - /* - * Use label_cache_data itself since it has oid field as its first field - * that is the key for this hash. - */ - MemSet(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(Oid); - hash_ctl.entrysize = sizeof(label_cache_data); - - /* - * Please see the comment of hash_create() for the nelem value 16 here. - * HASH_BLOBS flag is set because the size of the key is sizeof(uint32). - */ - label_oid_cache_hash = hash_create("ag_label (oid) cache", 16, &hash_ctl, - HASH_ELEM | HASH_BLOBS); -} - static void create_label_name_graph_cache(void) { HASHCTL hash_ctl; @@ -545,19 +512,19 @@ static void create_label_name_graph_cache(void) HASH_ELEM | HASH_BLOBS); } -static void create_label_graph_id_cache(void) +static void create_label_graph_oid_cache(void) { HASHCTL hash_ctl; MemSet(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(label_graph_id_cache_key); - hash_ctl.entrysize = sizeof(label_graph_id_cache_entry); + hash_ctl.keysize = sizeof(label_graph_oid_cache_key); + hash_ctl.entrysize = sizeof(label_graph_oid_cache_entry); /* * Please see the comment of hash_create() for the nelem value 16 here. * HASH_BLOBS flag is set because the key for this hash is fixed-size. */ - label_graph_id_cache_hash = hash_create("ag_label (graph, id) cache", 16, + label_graph_oid_cache_hash = hash_create("ag_label (graph, id) cache", 16, &hash_ctl, HASH_ELEM | HASH_BLOBS); } @@ -584,75 +551,18 @@ static void invalidate_label_caches(Datum arg, Oid relid) if (OidIsValid(relid)) { - invalidate_label_oid_cache(relid); invalidate_label_name_graph_cache(relid); - invalidate_label_graph_id_cache(relid); + invalidate_label_graph_oid_cache(relid); invalidate_label_relation_cache(relid); } else { - flush_label_oid_cache(); flush_label_name_graph_cache(); - flush_label_graph_id_cache(); + flush_label_graph_oid_cache(); flush_label_relation_cache(); } } -static void invalidate_label_oid_cache(Oid relid) -{ - HASH_SEQ_STATUS hash_seq; - - hash_seq_init(&hash_seq, label_oid_cache_hash); - for (;;) - { - label_cache_data *entry; - void *removed; - - entry = hash_seq_search(&hash_seq); - if (!entry) - break; - - if (entry->relation != relid) - continue; - - removed = hash_search(label_oid_cache_hash, &entry->oid, HASH_REMOVE, - NULL); - hash_seq_term(&hash_seq); - - if (!removed) - { - ereport(ERROR, - (errmsg_internal("label (oid) cache corrupted"))); - } - - break; - } -} - -static void flush_label_oid_cache(void) -{ - HASH_SEQ_STATUS hash_seq; - - hash_seq_init(&hash_seq, label_name_graph_cache_hash); - for (;;) - { - label_cache_data *entry; - void *removed; - - entry = hash_seq_search(&hash_seq); - if (!entry) - break; - - removed = hash_search(label_oid_cache_hash, &entry->oid, HASH_REMOVE, - NULL); - if (!removed) - { - ereport(ERROR, - (errmsg_internal("label (oid) cache corrupted"))); - } - } -} - static void invalidate_label_name_graph_cache(Oid relid) { HASH_SEQ_STATUS hash_seq; @@ -708,14 +618,14 @@ static void flush_label_name_graph_cache(void) } } -static void invalidate_label_graph_id_cache(Oid relid) +static void invalidate_label_graph_oid_cache(Oid relid) { HASH_SEQ_STATUS hash_seq; - hash_seq_init(&hash_seq, label_graph_id_cache_hash); + hash_seq_init(&hash_seq, label_graph_oid_cache_hash); for (;;) { - label_graph_id_cache_entry *entry; + label_graph_oid_cache_entry *entry; void *removed; entry = hash_seq_search(&hash_seq); @@ -725,7 +635,7 @@ static void invalidate_label_graph_id_cache(Oid relid) if (entry->data.relation != relid) continue; - removed = hash_search(label_graph_id_cache_hash, &entry->key, + removed = hash_search(label_graph_oid_cache_hash, &entry->key, HASH_REMOVE, NULL); hash_seq_term(&hash_seq); @@ -739,21 +649,21 @@ static void invalidate_label_graph_id_cache(Oid relid) } } -static void flush_label_graph_id_cache(void) +static void flush_label_graph_oid_cache(void) { HASH_SEQ_STATUS hash_seq; - hash_seq_init(&hash_seq, label_graph_id_cache_hash); + hash_seq_init(&hash_seq, label_graph_oid_cache_hash); for (;;) { - label_graph_id_cache_entry *entry; + label_graph_oid_cache_entry *entry; void *removed; entry = hash_seq_search(&hash_seq); if (!entry) break; - removed = hash_search(label_graph_id_cache_hash, &entry->key, + removed = hash_search(label_graph_oid_cache_hash, &entry->key, HASH_REMOVE, NULL); if (!removed) { @@ -802,72 +712,12 @@ static void flush_label_relation_cache(void) } } -label_cache_data *search_label_oid_cache(Oid oid) -{ - label_cache_data *entry; - - initialize_caches(); - - entry = hash_search(label_oid_cache_hash, &oid, HASH_FIND, NULL); - if (entry) - return entry; - - return search_label_oid_cache_miss(oid); -} - -static label_cache_data *search_label_oid_cache_miss(Oid oid) -{ - ScanKeyData scan_keys[1]; - Relation ag_label; - SysScanDesc scan_desc; - HeapTuple tuple; - bool found; - label_cache_data *entry; - - memcpy(scan_keys, label_oid_scan_keys, sizeof(label_oid_scan_keys)); - scan_keys[0].sk_argument = ObjectIdGetDatum(oid); - - /* - * Calling heap_open() might call AcceptInvalidationMessage() and that - * might invalidate the label caches. This is OK because this function is - * called when the desired entry is not in the cache. - */ - ag_label = heap_open(ag_label_relation_id(), AccessShareLock); - scan_desc = systable_beginscan(ag_label, ag_label_oid_index_id(), true, - NULL, 1, scan_keys); - - // don't need to loop over scan_desc because ag_label_oid_index is UNIQUE - tuple = systable_getnext(scan_desc); - if (!HeapTupleIsValid(tuple)) - { - systable_endscan(scan_desc); - heap_close(ag_label, AccessShareLock); - - return NULL; - } - - // get a new entry - entry = hash_search(label_oid_cache_hash, &oid, HASH_ENTER, &found); - Assert(!found); // no concurrent update on label_oid_cache_hash - - // fill the new entry with the retrieved tuple - fill_label_cache_data(entry, tuple, RelationGetDescr(ag_label)); - // make sure that the oid field is the same with the hash key(oid) - Assert(entry->oid == oid); - - systable_endscan(scan_desc); - heap_close(ag_label, AccessShareLock); - - return entry; -} - label_cache_data *search_label_name_graph_cache(const char *name, Oid graph) { NameData name_key; label_name_graph_cache_entry *entry; AssertArg(name); - AssertArg(OidIsValid(graph)); initialize_caches(); @@ -896,11 +746,11 @@ static label_cache_data *search_label_name_graph_cache_miss(Name name, scan_keys[1].sk_argument = ObjectIdGetDatum(graph); /* - * Calling heap_open() might call AcceptInvalidationMessage() and that + * Calling table_open() might call AcceptInvalidationMessage() and that * might invalidate the label caches. This is OK because this function is * called when the desired entry is not in the cache. */ - ag_label = heap_open(ag_label_relation_id(), AccessShareLock); + ag_label = table_open(ag_label_relation_id(), AccessShareLock); scan_desc = systable_beginscan(ag_label, ag_label_name_graph_index_id(), true, NULL, 2, scan_keys); @@ -912,7 +762,7 @@ static label_cache_data *search_label_name_graph_cache_miss(Name name, if (!HeapTupleIsValid(tuple)) { systable_endscan(scan_desc); - heap_close(ag_label, AccessShareLock); + table_close(ag_label, AccessShareLock); return NULL; } @@ -926,7 +776,7 @@ static label_cache_data *search_label_name_graph_cache_miss(Name name, fill_label_cache_data(&entry->data, tuple, RelationGetDescr(ag_label)); systable_endscan(scan_desc); - heap_close(ag_label, AccessShareLock); + table_close(ag_label, AccessShareLock); return &entry->data; } @@ -943,81 +793,80 @@ static void *label_name_graph_cache_hash_search(Name name, Oid graph, return hash_search(label_name_graph_cache_hash, &key, action, found); } -label_cache_data *search_label_graph_id_cache(Oid graph, int32 id) +label_cache_data *search_label_graph_oid_cache(uint32 graph_oid, int32 id) { - label_graph_id_cache_entry *entry; + label_graph_oid_cache_entry *entry; - AssertArg(OidIsValid(graph)); AssertArg(label_id_is_valid(id)); initialize_caches(); - entry = label_graph_id_cache_hash_search(graph, id, HASH_FIND, NULL); + entry = label_graph_oid_cache_hash_search(graph_oid, id, HASH_FIND, NULL); if (entry) return &entry->data; - return search_label_graph_id_cache_miss(graph, id); + return search_label_graph_oid_cache_miss(graph_oid, id); } -static label_cache_data *search_label_graph_id_cache_miss(Oid graph, int32 id) +static label_cache_data *search_label_graph_oid_cache_miss(Oid graph, uint32 id) { ScanKeyData scan_keys[2]; Relation ag_label; SysScanDesc scan_desc; HeapTuple tuple; bool found; - label_graph_id_cache_entry *entry; + label_graph_oid_cache_entry *entry; - memcpy(scan_keys, label_graph_id_scan_keys, - sizeof(label_graph_id_scan_keys)); + memcpy(scan_keys, label_graph_oid_scan_keys, + sizeof(label_graph_oid_scan_keys)); scan_keys[0].sk_argument = ObjectIdGetDatum(graph); scan_keys[1].sk_argument = Int32GetDatum(id); /* - * Calling heap_open() might call AcceptInvalidationMessage() and that + * Calling table_open() might call AcceptInvalidationMessage() and that * might invalidate the label caches. This is OK because this function is * called when the desired entry is not in the cache. */ - ag_label = heap_open(ag_label_relation_id(), AccessShareLock); - scan_desc = systable_beginscan(ag_label, ag_label_graph_id_index_id(), - true, NULL, 2, scan_keys); + ag_label = table_open(ag_label_relation_id(), AccessShareLock); + scan_desc = systable_beginscan(ag_label, ag_label_graph_oid_index_id(), true, + NULL, 2, scan_keys); /* - * don't need to loop over scan_desc because ag_label_graph_id_index is + * don't need to loop over scan_desc because ag_label_graph_oid_index is * UNIQUE */ tuple = systable_getnext(scan_desc); if (!HeapTupleIsValid(tuple)) { systable_endscan(scan_desc); - heap_close(ag_label, AccessShareLock); + table_close(ag_label, AccessShareLock); return NULL; } // get a new entry - entry = label_graph_id_cache_hash_search(graph, id, HASH_ENTER, &found); - Assert(!found); // no concurrent update on label_graph_id_cache_hash + entry = label_graph_oid_cache_hash_search(graph, id, HASH_ENTER, &found); + Assert(!found); // no concurrent update on label_graph_oid_cache_hash // fill the new entry with the retrieved tuple fill_label_cache_data(&entry->data, tuple, RelationGetDescr(ag_label)); systable_endscan(scan_desc); - heap_close(ag_label, AccessShareLock); + table_close(ag_label, AccessShareLock); return &entry->data; } -static void *label_graph_id_cache_hash_search(Oid graph, int32 id, +static void *label_graph_oid_cache_hash_search(uint32 graph, int32 id, HASHACTION action, bool *found) { - label_graph_id_cache_key key; + label_graph_oid_cache_key key; - // initialize the hash key for label_graph_id_cache_hash + // initialize the hash key for label_graph_oid_cache_hash key.graph = graph; key.id = id; - return hash_search(label_graph_id_cache_hash, &key, action, found); + return hash_search(label_graph_oid_cache_hash, &key, action, found); } label_cache_data *search_label_relation_cache(Oid relation) @@ -1047,13 +896,13 @@ static label_cache_data *search_label_relation_cache_miss(Oid relation) scan_keys[0].sk_argument = ObjectIdGetDatum(relation); /* - * Calling heap_open() might call AcceptInvalidationMessage() and that + * Calling table_open() might call AcceptInvalidationMessage() and that * might invalidate the label caches. This is OK because this function is * called when the desired entry is not in the cache. */ - ag_label = heap_open(ag_label_relation_id(), AccessShareLock); - scan_desc = systable_beginscan(ag_label, ag_label_relation_index_id(), - true, NULL, 1, scan_keys); + ag_label = table_open(ag_label_relation_id(), AccessShareLock); + scan_desc = systable_beginscan(ag_label, ag_label_relation_index_id(), true, + NULL, 1, scan_keys); // don't need to loop over scan_desc because ag_label_relation_index is // UNIQUE @@ -1061,7 +910,7 @@ static label_cache_data *search_label_relation_cache_miss(Oid relation) if (!HeapTupleIsValid(tuple)) { systable_endscan(scan_desc); - heap_close(ag_label, AccessShareLock); + table_close(ag_label, AccessShareLock); return NULL; } @@ -1075,7 +924,7 @@ static label_cache_data *search_label_relation_cache_miss(Oid relation) fill_label_cache_data(entry, tuple, RelationGetDescr(ag_label)); systable_endscan(scan_desc); - heap_close(ag_label, AccessShareLock); + table_close(ag_label, AccessShareLock); return entry; } @@ -1086,10 +935,6 @@ static void fill_label_cache_data(label_cache_data *cache_data, bool is_null; Datum value; - // ag_label.oid - value = heap_getattr(tuple, ObjectIdAttributeNumber, tuple_desc, &is_null); - Assert(!is_null); - cache_data->oid = DatumGetObjectId(value); // ag_label.name value = heap_getattr(tuple, Anum_ag_label_name, tuple_desc, &is_null); Assert(!is_null); diff --git a/src/backend/utils/load/ag_load_edges.c b/src/backend/utils/load/ag_load_edges.c index 419f0097e..a7a88607b 100644 --- a/src/backend/utils/load/ag_load_edges.c +++ b/src/backend/utils/load/ag_load_edges.c @@ -17,16 +17,11 @@ * under the License. */ -#include -#include -#include -#include -#include +#include "postgres.h" -#include "utils/load/csv.h" #include "utils/load/ag_load_edges.h" #include "utils/load/age_load.h" - +#include "utils/load/csv.h" void edge_field_cb(void *field, size_t field_len, void *data) { @@ -66,14 +61,14 @@ void edge_row_cb(int delim __attribute__((unused)), void *data) size_t i, n_fields; int64 start_id_int; - graphid start_vertex_graph_id; + graphid start_vertex_graph_oid; int start_vertex_type_id; int64 end_id_int; - graphid end_vertex_graph_id; + graphid end_vertex_graph_oid; int end_vertex_type_id; - graphid object_graph_id; + graphid object_graph_oid; agtype* props = NULL; @@ -94,22 +89,22 @@ void edge_row_cb(int delim __attribute__((unused)), void *data) } else { - object_graph_id = make_graphid(cr->object_id, (int64)cr->row); + object_graph_oid = make_graphid(cr->object_id, (int64)cr->row); start_id_int = strtol(cr->fields[0], NULL, 10); - start_vertex_type_id = get_label_id(cr->fields[1], cr->graph_id); + start_vertex_type_id = get_label_id(cr->fields[1], cr->graph_oid); end_id_int = strtol(cr->fields[2], NULL, 10); - end_vertex_type_id = get_label_id(cr->fields[3], cr->graph_id); + end_vertex_type_id = get_label_id(cr->fields[3], cr->graph_oid); - start_vertex_graph_id = make_graphid(start_vertex_type_id, start_id_int); - end_vertex_graph_id = make_graphid(end_vertex_type_id, end_id_int); + start_vertex_graph_oid = make_graphid(start_vertex_type_id, start_id_int); + end_vertex_graph_oid = make_graphid(end_vertex_type_id, end_id_int); props = create_agtype_from_list_i(cr->header, cr->fields, n_fields, 3); - insert_edge_simple(cr->graph_id, cr->object_name, - object_graph_id, start_vertex_graph_id, - end_vertex_graph_id, props); + insert_edge_simple(cr->graph_oid, cr->object_name, + object_graph_oid, start_vertex_graph_oid, + end_vertex_graph_oid, props); } @@ -155,7 +150,7 @@ static int is_term(unsigned char c) int create_edges_from_csv_file(char *file_path, char *graph_name, - Oid graph_id, + Oid graph_oid, char *object_name, int object_id ) { @@ -191,7 +186,7 @@ int create_edges_from_csv_file(char *file_path, cr.header_row_length = 0; cr.curr_row_length = 0; cr.graph_name = graph_name; - cr.graph_id = graph_id; + cr.graph_oid = graph_oid; cr.object_name = object_name; cr.object_id = object_id; diff --git a/src/backend/utils/load/ag_load_labels.c b/src/backend/utils/load/ag_load_labels.c index c0ddf69bb..af7d20446 100644 --- a/src/backend/utils/load/ag_load_labels.c +++ b/src/backend/utils/load/ag_load_labels.c @@ -101,7 +101,7 @@ void vertex_row_cb(int delim __attribute__((unused)), void *data) csv_vertex_reader *cr = (csv_vertex_reader*)data; agtype *props = NULL; size_t i, n_fields; - graphid object_graph_id; + graphid object_graph_oid; int64 label_id_int; n_fields = cr->cur_field; @@ -131,12 +131,12 @@ void vertex_row_cb(int delim __attribute__((unused)), void *data) label_id_int = (int64)cr->row; } - object_graph_id = make_graphid(cr->object_id, label_id_int); + object_graph_oid = make_graphid(cr->object_id, label_id_int); props = create_agtype_from_list(cr->header, cr->fields, n_fields, label_id_int); - insert_vertex_simple(cr->graph_id, cr->object_name, - object_graph_id, props); + insert_vertex_simple(cr->graph_oid, cr->object_name, + object_graph_oid, props); } @@ -183,7 +183,7 @@ static int is_term(unsigned char c) int create_labels_from_csv_file(char *file_path, char *graph_name, - Oid graph_id, + Oid graph_oid, char *object_name, int object_id, bool id_field_exists) @@ -221,7 +221,7 @@ int create_labels_from_csv_file(char *file_path, cr.header_row_length = 0; cr.curr_row_length = 0; cr.graph_name = graph_name; - cr.graph_id = graph_id; + cr.graph_oid = graph_oid; cr.object_name = object_name; cr.object_id = object_id; cr.id_field_exists = id_field_exists; diff --git a/src/backend/utils/load/age_load.c b/src/backend/utils/load/age_load.c index b80e95086..28ee211dd 100644 --- a/src/backend/utils/load/age_load.c +++ b/src/backend/utils/load/age_load.c @@ -21,67 +21,45 @@ #include "access/heapam.h" #include "access/xact.h" -#include "catalog/dependency.h" -#include "catalog/namespace.h" -#include "catalog/objectaddress.h" -#include "catalog/pg_class_d.h" -#include "commands/defrem.h" -#include "commands/sequence.h" -#include "commands/tablecmds.h" -#include "miscadmin.h" -#include "nodes/makefuncs.h" -#include "nodes/nodes.h" -#include "nodes/parsenodes.h" -#include "nodes/pg_list.h" -#include "nodes/plannodes.h" -#include "nodes/primnodes.h" -#include "nodes/value.h" #include "parser/parse_node.h" -#include "parser/parser.h" #include "storage/lockdefs.h" #include "tcop/dest.h" -#include "tcop/utility.h" -#include "utils/acl.h" #include "utils/builtins.h" -#include "utils/inval.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "catalog/ag_graph.h" #include "catalog/ag_label.h" -#include "commands/label_commands.h" -#include "utils/ag_cache.h" #include "utils/agtype.h" #include "utils/graphid.h" -#include "utils/load/age_load.h" -#include "utils/load/ag_load_labels.h" #include "utils/load/ag_load_edges.h" +#include "utils/load/ag_load_labels.h" +#include "utils/load/age_load.h" -static agtype* create_empty_agtype(void) +static agtype *create_empty_agtype(void) { agtype_in_state result; memset(&result, 0, sizeof(agtype_in_state)); - result.res = push_agtype_value(&result.parse_state, - WAGT_BEGIN_OBJECT, NULL); - result.res = push_agtype_value(&result.parse_state, - WAGT_END_OBJECT, NULL); + result.res = push_agtype_value(&result.parse_state, WAGT_BEGIN_OBJECT, + NULL); + result.res = push_agtype_value(&result.parse_state, WAGT_END_OBJECT, NULL); return agtype_value_to_agtype(result.res); } -agtype* create_agtype_from_list(char **header, char **fields, - size_t fields_len, int64 vertex_id) +agtype *create_agtype_from_list(char **header, char **fields, size_t fields_len, + int64 vertex_id) { agtype_in_state result; int i; memset(&result, 0, sizeof(agtype_in_state)); - result.res = push_agtype_value(&result.parse_state, - WAGT_BEGIN_OBJECT, NULL); + result.res = push_agtype_value(&result.parse_state, WAGT_BEGIN_OBJECT, + NULL); result.res = push_agtype_value(&result.parse_state, WAGT_KEY, @@ -117,12 +95,13 @@ agtype* create_agtype_from_list_i(char **header, char **fields, { return create_empty_agtype(); } + memset(&result, 0, sizeof(agtype_in_state)); - result.res = push_agtype_value(&result.parse_state, - WAGT_BEGIN_OBJECT, NULL); + result.res = push_agtype_value(&result.parse_state, WAGT_BEGIN_OBJECT, + NULL); - for (i = start_index; iis_space = f; } - + void csv_set_term_func(struct csv_parser *p, int (*f)(unsigned char)) { @@ -259,7 +259,7 @@ csv_set_realloc_func(struct csv_parser *p, void *(*f)(void *, size_t)) /* Set the realloc function used to increase buffer size */ if (p && f) p->realloc_func = f; } - + void csv_set_free_func(struct csv_parser *p, void (*f)(void *)) { @@ -282,7 +282,7 @@ csv_get_buffer_size(const struct csv_parser *p) return p->entry_size; return 0; } - + static int csv_increase_buffer(struct csv_parser *p) { @@ -291,10 +291,10 @@ csv_increase_buffer(struct csv_parser *p) if (p == NULL) return 0; if (p->realloc_func == NULL) return 0; - - /* Increase the size of the entry buffer. Attempt to increase size by + + /* Increase the size of the entry buffer. Attempt to increase size by * p->blk_size, if this is larger than SIZE_MAX try to increase current - * buffer size to SIZE_MAX. If allocation fails, try to allocate halve + * buffer size to SIZE_MAX. If allocation fails, try to allocate halve * the size and try again until successful or increment size is zero. */ @@ -321,7 +321,7 @@ csv_increase_buffer(struct csv_parser *p) p->entry_size += to_add; return 0; } - + size_t csv_parse(struct csv_parser *p, const void *s, size_t len, void (*cb1)(void *, size_t, void *), void (*cb2)(int c, void *), void *data) { @@ -342,7 +342,7 @@ csv_parse(struct csv_parser *p, const void *s, size_t len, void (*cb1)(void *, s if (!p->entry_buf && pos < len) { /* Buffer hasn't been allocated yet and len > 0 */ - if (csv_increase_buffer(p) != 0) { + if (csv_increase_buffer(p) != 0) { p->quoted = quoted, p->pstate = pstate, p->spaces = spaces, p->entry_pos = entry_pos; return pos; } @@ -367,7 +367,7 @@ csv_parse(struct csv_parser *p, const void *s, size_t len, void (*cb1)(void *, s } else if (is_term ? is_term(c) : c == CSV_CR || c == CSV_LF) { /* Carriage Return or Line Feed */ if (pstate == FIELD_NOT_BEGUN) { SUBMIT_FIELD(p); - SUBMIT_ROW(p, c); + SUBMIT_ROW(p, c); } else { /* ROW_NOT_BEGUN */ /* Don't submit empty rows by default */ if (p->options & CSV_REPALL_NL) { diff --git a/src/include/catalog/ag_graph.h b/src/include/catalog/ag_graph.h index 98932023f..3704f9c01 100644 --- a/src/include/catalog/ag_graph.h +++ b/src/include/catalog/ag_graph.h @@ -24,21 +24,22 @@ #include "catalog/ag_catalog.h" -#define Anum_ag_graph_name 1 -#define Anum_ag_graph_namespace 2 +#define Anum_ag_graph_oid 1 +#define Anum_ag_graph_name 2 +#define Anum_ag_graph_namespace 3 -#define Natts_ag_graph 2 +#define Natts_ag_graph 3 #define ag_graph_relation_id() ag_relation_id("ag_graph", "table") #define ag_graph_name_index_id() ag_relation_id("ag_graph_name_index", "index") #define ag_graph_namespace_index_id() \ ag_relation_id("ag_graph_namespace_index", "index") -Oid insert_graph(const Name graph_name, const Oid nsp_id); +void insert_graph(const Name graph_name, const Oid nsp_id); void delete_graph(const Name graph_name); void update_graph_name(const Name graph_name, const Name new_name); -Oid get_graph_oid(const char *graph_name); +uint32 get_graph_oid(const char *graph_name); char *get_graph_namespace_name(const char *graph_name); List *get_graphnames(void); diff --git a/src/include/catalog/ag_label.h b/src/include/catalog/ag_label.h index d925b8d55..86dd829fc 100644 --- a/src/include/catalog/ag_label.h +++ b/src/include/catalog/ag_label.h @@ -53,11 +53,10 @@ #define Natts_ag_label 5 #define ag_label_relation_id() ag_relation_id("ag_label", "table") -#define ag_label_oid_index_id() ag_relation_id("ag_label_oid_index", "index") #define ag_label_name_graph_index_id() \ ag_relation_id("ag_label_name_graph_index", "index") -#define ag_label_graph_id_index_id() \ - ag_relation_id("ag_label_graph_id_index", "index") +#define ag_label_graph_oid_index_id() \ + ag_relation_id("ag_label_graph_oid_index", "index") #define ag_label_relation_index_id() \ ag_relation_id("ag_label_relation_index", "index") @@ -66,21 +65,21 @@ #define LABEL_KIND_VERTEX 'v' #define LABEL_KIND_EDGE 'e' -Oid insert_label(const char *label_name, Oid label_graph, int32 label_id, - char label_kind, Oid label_relation); +void insert_label(const char *label_name, Oid graph_oid, int32 label_id, + char label_kind, Oid label_relation); void delete_label(Oid relation); -Oid get_label_oid(const char *label_name, Oid label_graph); -int32 get_label_id(const char *label_name, Oid label_graph); -Oid get_label_relation(const char *label_name, Oid label_graph); -char *get_label_relation_name(const char *label_name, Oid label_graph); +int32 get_label_id(const char *label_name, Oid graph_oid); +Oid get_label_relation(const char *label_name, Oid graph_oid); +char *get_label_relation_name(const char *label_name, Oid graph_oid); -bool label_id_exists(Oid label_graph, int32 label_id); -RangeVar *get_label_range_var(char *graph_name, Oid graph_oid, char *label_name); +bool label_id_exists(Oid graph_oid, int32 label_id); +RangeVar *get_label_range_var(char *graph_name, Oid graph_oid, + char *label_name); List *get_all_edge_labels_per_graph(EState *estate, Oid graph_oid); #define label_exists(label_name, label_graph) \ - OidIsValid(get_label_oid(label_name, label_graph)) + OidIsValid(get_label_id(label_name, label_graph)) #endif diff --git a/src/include/commands/label_commands.h b/src/include/commands/label_commands.h index ad1041c4d..dcc03449a 100644 --- a/src/include/commands/label_commands.h +++ b/src/include/commands/label_commands.h @@ -54,7 +54,7 @@ #define IS_AG_DEFAULT_LABEL(x) \ (IS_DEFAULT_LABEL_EDGE(x) || IS_DEFAULT_LABEL_VERTEX(x)) -Oid create_label(char *graph_name, char *label_name, char label_type, - List *parents); +void create_label(char *graph_name, char *label_name, char label_type, + List *parents); #endif diff --git a/src/include/executor/cypher_utils.h b/src/include/executor/cypher_utils.h index 4d19937f6..603f00306 100644 --- a/src/include/executor/cypher_utils.h +++ b/src/include/executor/cypher_utils.h @@ -20,6 +20,9 @@ #ifndef AG_CYPHER_UTILS_H #define AG_CYPHER_UTILS_H +#include "access/heapam.h" +#include "access/table.h" +#include "access/tableam.h" #include "nodes/execnodes.h" #include "nodes/extensible.h" #include "nodes/nodes.h" diff --git a/src/include/nodes/cypher_nodes.h b/src/include/nodes/cypher_nodes.h index fe9f6bd70..533515da1 100644 --- a/src/include/nodes/cypher_nodes.h +++ b/src/include/nodes/cypher_nodes.h @@ -225,7 +225,7 @@ typedef struct cypher_create_target_nodes ExtensibleNode extensible; List *paths; uint32 flags; - Oid graph_oid; + uint32 graph_oid; } cypher_create_target_nodes; typedef struct cypher_create_path @@ -368,7 +368,7 @@ typedef struct cypher_delete_information List *delete_items; int flags; char *graph_name; - Oid graph_oid; + uint32 graph_oid; bool detach; } cypher_delete_information; @@ -383,7 +383,7 @@ typedef struct cypher_merge_information { ExtensibleNode extensible; int flags; - Oid graph_oid; + uint32 graph_oid; AttrNumber merge_function_attr; cypher_create_path *path; } cypher_merge_information; diff --git a/src/include/optimizer/cypher_createplan.h b/src/include/optimizer/cypher_createplan.h index 50a86e158..e42e20b71 100644 --- a/src/include/optimizer/cypher_createplan.h +++ b/src/include/optimizer/cypher_createplan.h @@ -22,7 +22,6 @@ #include "nodes/pg_list.h" #include "nodes/plannodes.h" -#include "nodes/relation.h" Plan *plan_cypher_create_path(PlannerInfo *root, RelOptInfo *rel, CustomPath *best_path, List *tlist, diff --git a/src/include/optimizer/cypher_pathnode.h b/src/include/optimizer/cypher_pathnode.h index 638268b7e..22404f259 100644 --- a/src/include/optimizer/cypher_pathnode.h +++ b/src/include/optimizer/cypher_pathnode.h @@ -21,7 +21,6 @@ #define AG_CYPHER_PATHNODE_H #include "nodes/pg_list.h" -#include "nodes/relation.h" #define CREATE_PATH_NAME "Cypher Create" #define SET_PATH_NAME "Cypher Set" diff --git a/src/include/parser/.gitignore b/src/include/parser/.gitignore index 891bb4cb1..a9f39a00c 100644 --- a/src/include/parser/.gitignore +++ b/src/include/parser/.gitignore @@ -1 +1,2 @@ cypher_gram_def.h +cypher_kwlist_d.h diff --git a/src/include/parser/cypher_keywords.h b/src/include/parser/cypher_keywords.h index 8d1bbb426..d578dba8d 100644 --- a/src/include/parser/cypher_keywords.h +++ b/src/include/parser/cypher_keywords.h @@ -20,9 +20,10 @@ #ifndef AG_KEYWORDS_H #define AG_KEYWORDS_H -#include "common/keywords.h" +#include "common/kwlookup.h" -extern const ScanKeyword cypher_keywords[]; -extern const int num_cypher_keywords; +extern const ScanKeywordList CypherKeyword; +extern const uint16 CypherKeywordTokens[]; +extern const uint16 CypherKeywordCategories[]; #endif diff --git a/src/include/parser/cypher_kwlist.h b/src/include/parser/cypher_kwlist.h new file mode 100644 index 000000000..d15a0e34a --- /dev/null +++ b/src/include/parser/cypher_kwlist.h @@ -0,0 +1,48 @@ +PG_KEYWORD("all", ALL, RESERVED_KEYWORD) +PG_KEYWORD("analyze", ANALYZE, RESERVED_KEYWORD) +PG_KEYWORD("and", AND, RESERVED_KEYWORD) +PG_KEYWORD("as", AS, RESERVED_KEYWORD) +PG_KEYWORD("asc", ASC, RESERVED_KEYWORD) +PG_KEYWORD("ascending", ASCENDING, RESERVED_KEYWORD) +PG_KEYWORD("by", BY, RESERVED_KEYWORD) +PG_KEYWORD("call", CALL, RESERVED_KEYWORD) +PG_KEYWORD("case", CASE, RESERVED_KEYWORD) +PG_KEYWORD("coalesce", COALESCE, RESERVED_KEYWORD) +PG_KEYWORD("contains", CONTAINS, RESERVED_KEYWORD) +PG_KEYWORD("create", CREATE, RESERVED_KEYWORD) +PG_KEYWORD("delete", DELETE, RESERVED_KEYWORD) +PG_KEYWORD("desc", DESC, RESERVED_KEYWORD) +PG_KEYWORD("descending", DESCENDING, RESERVED_KEYWORD) +PG_KEYWORD("detach", DETACH, RESERVED_KEYWORD) +PG_KEYWORD("distinct", DISTINCT, RESERVED_KEYWORD) +PG_KEYWORD("else", ELSE, RESERVED_KEYWORD) +PG_KEYWORD("end", END_P, RESERVED_KEYWORD) +PG_KEYWORD("ends", ENDS, RESERVED_KEYWORD) +PG_KEYWORD("exists", EXISTS, RESERVED_KEYWORD) +PG_KEYWORD("explain", EXPLAIN, RESERVED_KEYWORD) +PG_KEYWORD("false", FALSE_P, RESERVED_KEYWORD) +PG_KEYWORD("in", IN, RESERVED_KEYWORD) +PG_KEYWORD("is", IS, RESERVED_KEYWORD) +PG_KEYWORD("limit", LIMIT, RESERVED_KEYWORD) +PG_KEYWORD("match", MATCH, RESERVED_KEYWORD) +PG_KEYWORD("merge", MERGE, RESERVED_KEYWORD) +PG_KEYWORD("not", NOT, RESERVED_KEYWORD) +PG_KEYWORD("null", NULL_P, RESERVED_KEYWORD) +PG_KEYWORD("optional", OPTIONAL, RESERVED_KEYWORD) +PG_KEYWORD("or", OR, RESERVED_KEYWORD) +PG_KEYWORD("order", ORDER, RESERVED_KEYWORD) +PG_KEYWORD("remove", REMOVE, RESERVED_KEYWORD) +PG_KEYWORD("return", RETURN, RESERVED_KEYWORD) +PG_KEYWORD("set", SET, RESERVED_KEYWORD) +PG_KEYWORD("skip", SKIP, RESERVED_KEYWORD) +PG_KEYWORD("starts", STARTS, RESERVED_KEYWORD) +PG_KEYWORD("then", THEN, RESERVED_KEYWORD) +PG_KEYWORD("true", TRUE_P, RESERVED_KEYWORD) +PG_KEYWORD("union", UNION, RESERVED_KEYWORD) +PG_KEYWORD("unwind", UNWIND, RESERVED_KEYWORD) +PG_KEYWORD("verbose", VERBOSE, RESERVED_KEYWORD) +PG_KEYWORD("when", WHEN, RESERVED_KEYWORD) +PG_KEYWORD("where", WHERE, RESERVED_KEYWORD) +PG_KEYWORD("with", WITH, RESERVED_KEYWORD) +PG_KEYWORD("xor", XOR, RESERVED_KEYWORD) +PG_KEYWORD("yield", YIELD, RESERVED_KEYWORD) \ No newline at end of file diff --git a/src/include/parser/cypher_parse_node.h b/src/include/parser/cypher_parse_node.h index 5b6cc695a..268eb1abb 100644 --- a/src/include/parser/cypher_parse_node.h +++ b/src/include/parser/cypher_parse_node.h @@ -32,7 +32,7 @@ typedef struct cypher_parsestate { ParseState pstate; char *graph_name; - Oid graph_oid; + uint32 graph_oid; Param *params; int default_alias_num; List *entities; diff --git a/src/include/utils/ag_cache.h b/src/include/utils/ag_cache.h index deb50ef82..20da6e443 100644 --- a/src/include/utils/ag_cache.h +++ b/src/include/utils/ag_cache.h @@ -33,7 +33,6 @@ typedef struct graph_cache_data // label_cache_data contains the same fields that ag_label catalog table has typedef struct label_cache_data { - Oid oid; NameData name; Oid graph; int32 id; @@ -46,7 +45,7 @@ graph_cache_data *search_graph_name_cache(const char *name); graph_cache_data *search_graph_namespace_cache(Oid namespace); label_cache_data *search_label_oid_cache(Oid oid); label_cache_data *search_label_name_graph_cache(const char *name, Oid graph); -label_cache_data *search_label_graph_id_cache(Oid graph, int32 id); +label_cache_data *search_label_graph_oid_cache(Oid graph, int32 id); label_cache_data *search_label_relation_cache(Oid relation); #endif diff --git a/src/include/utils/age_graphid_ds.h b/src/include/utils/age_graphid_ds.h index cfacfc1db..ea9dabdc3 100644 --- a/src/include/utils/age_graphid_ds.h +++ b/src/include/utils/age_graphid_ds.h @@ -20,6 +20,8 @@ #ifndef AG_AGE_GRAPHID_DS_H #define AG_AGE_GRAPHID_DS_H +#include "utils/graphid.h" + #define IS_GRAPHID_STACK_EMPTY(stack) \ get_stack_size(stack) == 0 #define PEEK_GRAPHID_STACK(stack) \ diff --git a/src/include/utils/agtype.h b/src/include/utils/agtype.h index e5019d5df..c427f3cfe 100644 --- a/src/include/utils/agtype.h +++ b/src/include/utils/agtype.h @@ -31,8 +31,8 @@ #ifndef AG_AGTYPE_H #define AG_AGTYPE_H -#include "fmgr.h" #include "access/htup_details.h" +#include "fmgr.h" #include "lib/stringinfo.h" #include "nodes/pg_list.h" #include "utils/array.h" @@ -40,6 +40,7 @@ #include "utils/syscache.h" #include "catalog/ag_namespace.h" +#include "catalog/pg_type.h" #include "utils/graphid.h" /* Tokens used when sequentially processing an agtype value */ diff --git a/src/include/utils/graphid.h b/src/include/utils/graphid.h index 103977785..999661cf2 100644 --- a/src/include/utils/graphid.h +++ b/src/include/utils/graphid.h @@ -27,6 +27,7 @@ #include "utils/syscache.h" #include "catalog/ag_namespace.h" +#include "catalog/pg_type.h" typedef int64 graphid; #define F_GRAPHIDEQ F_INT8EQ diff --git a/src/include/utils/load/ag_load_edges.h b/src/include/utils/load/ag_load_edges.h index eb5463ed8..3f4ffa8f9 100644 --- a/src/include/utils/load/ag_load_edges.h +++ b/src/include/utils/load/ag_load_edges.h @@ -20,11 +20,10 @@ #ifndef AG_LOAD_EDGES_H #define AG_LOAD_EDGES_H - -#include -#include #include +#include #include +#include #include @@ -80,7 +79,7 @@ typedef struct { size_t header_row_length; size_t curr_row_length; char *graph_name; - Oid graph_id; + Oid graph_oid; char *object_name; int object_id; char *start_vertex; @@ -92,7 +91,7 @@ typedef struct { void edge_field_cb(void *field, size_t field_len, void *data); void edge_row_cb(int delim __attribute__((unused)), void *data); -int create_edges_from_csv_file(char *file_path, char *graph_name, Oid graph_id, +int create_edges_from_csv_file(char *file_path, char *graph_name, Oid graph_oid, char *object_name, int object_id ); #endif //AG_LOAD_EDGES_H diff --git a/src/include/utils/load/ag_load_labels.h b/src/include/utils/load/ag_load_labels.h index 5689c23db..8bf24c246 100644 --- a/src/include/utils/load/ag_load_labels.h +++ b/src/include/utils/load/ag_load_labels.h @@ -21,11 +21,10 @@ #ifndef AG_LOAD_LABELS_H #define AG_LOAD_LABELS_H - -#include -#include #include +#include #include +#include #include #include "postgres.h" @@ -65,7 +64,6 @@ #include "utils/agtype.h" #include "utils/graphid.h" - #define AGE_VERTIX 1 #define AGE_EDGE 2 @@ -89,7 +87,7 @@ typedef struct { size_t header_row_length; size_t curr_row_length; char *graph_name; - Oid graph_id; + Oid graph_oid; char *object_name; int object_id; bool id_field_exists; @@ -99,7 +97,7 @@ typedef struct { void vertex_field_cb(void *field, size_t field_len, void *data); void vertex_row_cb(int delim __attribute__((unused)), void *data); -int create_labels_from_csv_file(char *file_path, char *graph_name, Oid graph_id, +int create_labels_from_csv_file(char *file_path, char *graph_name, Oid graph_oid, char *object_name, int object_id, bool id_field_exists); diff --git a/src/include/utils/load/age_load.h b/src/include/utils/load/age_load.h index 9eeca3614..d5fd19291 100644 --- a/src/include/utils/load/age_load.h +++ b/src/include/utils/load/age_load.h @@ -61,9 +61,9 @@ agtype* create_agtype_from_list(char **header, char **fields, size_t fields_len, int64 vertex_id); agtype* create_agtype_from_list_i(char **header, char **fields, size_t fields_len, size_t start_index); -void insert_vertex_simple(Oid graph_id, char* label_name, graphid vertex_id, - agtype* vertex_properties); -void insert_edge_simple(Oid graph_id, char* label_name, graphid edge_id, +void insert_vertex_simple(Oid graph_oid, char *label_name, graphid vertex_id, + agtype *vertex_properties); +void insert_edge_simple(Oid graph_oid, char *label_name, graphid edge_id, graphid start_id, graphid end_id, agtype* end_properties); diff --git a/tools/PerfectHash.pm b/tools/PerfectHash.pm new file mode 100644 index 000000000..5a04f7e95 --- /dev/null +++ b/tools/PerfectHash.pm @@ -0,0 +1,399 @@ +# +# For PostgreSQL Database Management System: +# (formerly known as Postgres, then as Postgres95) +# +# Portions Copyright (c) 1996-2010, The PostgreSQL Global Development Group +# +# Portions Copyright (c) 1994, The Regents of the University of California +# +# Permission to use, copy, modify, and distribute this software and its documentation for any purpose, +# without fee, and without a written agreement is hereby granted, provided that the above copyright notice +# and this paragraph and the following two paragraphs appear in all copies. +# +# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, +# INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, +# ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY +# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, +# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA +# HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. +# +#---------------------------------------------------------------------- +# +# PerfectHash.pm +# Perl module that constructs minimal perfect hash functions +# +# This code constructs a minimal perfect hash function for the given +# set of keys, using an algorithm described in +# "An optimal algorithm for generating minimal perfect hash functions" +# by Czech, Havas and Majewski in Information Processing Letters, +# 43(5):256-264, October 1992. +# This implementation is loosely based on NetBSD's "nbperf", +# which was written by Joerg Sonnenberger. +# +# The resulting hash function is perfect in the sense that if the presented +# key is one of the original set, it will return the key's index in the set +# (in range 0..N-1). However, the caller must still verify the match, +# as false positives are possible. Also, the hash function may return +# values that are out of range (negative or >= N), due to summing unrelated +# hashtable entries. This indicates that the presented key is definitely +# not in the set. +# +# +# Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/tools/PerfectHash.pm +# +#---------------------------------------------------------------------- + +package PerfectHash; + +use strict; +use warnings; + + +# At runtime, we'll compute two simple hash functions of the input key, +# and use them to index into a mapping table. The hash functions are just +# multiply-and-add in uint32 arithmetic, with different multipliers and +# initial seeds. All the complexity in this module is concerned with +# selecting hash parameters that will work and building the mapping table. + +# We support making case-insensitive hash functions, though this only +# works for a strict-ASCII interpretation of case insensitivity, +# ie, A-Z maps onto a-z and nothing else. +my $case_fold = 0; + + +# +# Construct a C function implementing a perfect hash for the given keys. +# The C function definition is returned as a string. +# +# The keys should be passed as an array reference. They can be any set +# of Perl strings; it is caller's responsibility that there not be any +# duplicates. (Note that the "strings" can be binary data, but hashing +# e.g. OIDs has endianness hazards that callers must overcome.) +# +# The name to use for the function is specified as the second argument. +# It will be a global function by default, but the caller may prepend +# "static " to the result string if it wants a static function. +# +# Additional options can be specified as keyword-style arguments: +# +# case_fold => bool +# If specified as true, the hash function is case-insensitive, for the +# limited idea of case-insensitivity explained above. +# +# fixed_key_length => N +# If specified, all keys are assumed to have length N bytes, and the +# hash function signature will be just "int f(const void *key)" +# rather than "int f(const void *key, size_t keylen)". +# +sub generate_hash_function +{ + my ($keys_ref, $funcname, %options) = @_; + + # It's not worth passing this around as a parameter; just use a global. + $case_fold = $options{case_fold} || 0; + + # Try different hash function parameters until we find a set that works + # for these keys. The multipliers are chosen to be primes that are cheap + # to calculate via shift-and-add, so don't change them without care. + # (Commonly, random seeds are tried, but we want reproducible results + # from this program so we don't do that.) + my $hash_mult1 = 31; + my $hash_mult2; + my $hash_seed1; + my $hash_seed2; + my @subresult; + FIND_PARAMS: + foreach (127, 257, 521, 1033, 2053) + { + $hash_mult2 = $_; # "foreach $hash_mult2" doesn't work + for ($hash_seed1 = 0; $hash_seed1 < 10; $hash_seed1++) + { + for ($hash_seed2 = 0; $hash_seed2 < 10; $hash_seed2++) + { + @subresult = _construct_hash_table( + $keys_ref, $hash_mult1, $hash_mult2, + $hash_seed1, $hash_seed2); + last FIND_PARAMS if @subresult; + } + } + } + + # Choke if we couldn't find a workable set of parameters. + die "failed to generate perfect hash" if !@subresult; + + # Extract info from _construct_hash_table's result array. + my $elemtype = $subresult[0]; + my @hashtab = @{ $subresult[1] }; + my $nhash = scalar(@hashtab); + + # OK, construct the hash function definition including the hash table. + my $f = ''; + $f .= sprintf "int\n"; + if (defined $options{fixed_key_length}) + { + $f .= sprintf "%s(const void *key)\n{\n", $funcname; + } + else + { + $f .= sprintf "%s(const void *key, size_t keylen)\n{\n", $funcname; + } + $f .= sprintf "\tstatic const %s h[%d] = {\n", $elemtype, $nhash; + for (my $i = 0; $i < $nhash; $i++) + { + $f .= sprintf "%s%6d,%s", + ($i % 8 == 0 ? "\t\t" : " "), + $hashtab[$i], + ($i % 8 == 7 ? "\n" : ""); + } + $f .= sprintf "\n" if ($nhash % 8 != 0); + $f .= sprintf "\t};\n\n"; + $f .= sprintf "\tconst unsigned char *k = (const unsigned char *) key;\n"; + $f .= sprintf "\tsize_t\t\tkeylen = %d;\n", $options{fixed_key_length} + if (defined $options{fixed_key_length}); + $f .= sprintf "\tuint32\t\ta = %d;\n", $hash_seed1; + $f .= sprintf "\tuint32\t\tb = %d;\n\n", $hash_seed2; + $f .= sprintf "\twhile (keylen--)\n\t{\n"; + $f .= sprintf "\t\tunsigned char c = *k++"; + $f .= sprintf " | 0x20" if $case_fold; # see comment below + $f .= sprintf ";\n\n"; + $f .= sprintf "\t\ta = a * %d + c;\n", $hash_mult1; + $f .= sprintf "\t\tb = b * %d + c;\n", $hash_mult2; + $f .= sprintf "\t}\n"; + $f .= sprintf "\treturn h[a %% %d] + h[b %% %d];\n", $nhash, $nhash; + $f .= sprintf "}\n"; + + return $f; +} + + +# Calculate a hash function as the run-time code will do. +# +# If we are making a case-insensitive hash function, we implement that +# by OR'ing 0x20 into each byte of the key. This correctly transforms +# upper-case ASCII into lower-case ASCII, while not changing digits or +# dollar signs. (It does change '_', as well as other characters not +# likely to appear in keywords; this has little effect on the hash's +# ability to discriminate keywords.) +sub _calc_hash +{ + my ($key, $mult, $seed) = @_; + + my $result = $seed; + for my $c (split //, $key) + { + my $cn = ord($c); + $cn |= 0x20 if $case_fold; + $result = ($result * $mult + $cn) % 4294967296; + } + return $result; +} + + +# Attempt to construct a mapping table for a minimal perfect hash function +# for the given keys, using the specified hash parameters. +# +# Returns an array containing the mapping table element type name as the +# first element, and a ref to an array of the table values as the second. +# +# Returns an empty array on failure; then caller should choose different +# hash parameter(s) and try again. +sub _construct_hash_table +{ + my ($keys_ref, $hash_mult1, $hash_mult2, $hash_seed1, $hash_seed2) = @_; + my @keys = @{$keys_ref}; + + # This algorithm is based on a graph whose edges correspond to the + # keys and whose vertices correspond to entries of the mapping table. + # A key's edge links the two vertices whose indexes are the outputs of + # the two hash functions for that key. For K keys, the mapping + # table must have at least 2*K+1 entries, guaranteeing that there's at + # least one unused entry. (In principle, larger mapping tables make it + # easier to find a workable hash and increase the number of inputs that + # can be rejected due to touching unused hashtable entries. In practice, + # neither effect seems strong enough to justify using a larger table.) + my $nedges = scalar @keys; # number of edges + my $nverts = 2 * $nedges + 1; # number of vertices + + # However, it would be very bad if $nverts were exactly equal to either + # $hash_mult1 or $hash_mult2: effectively, that hash function would be + # sensitive to only the last byte of each key. Cases where $nverts is a + # multiple of either multiplier likewise lose information. (But $nverts + # can't actually divide them, if they've been intelligently chosen as + # primes.) We can avoid such problems by adjusting the table size. + while ($nverts % $hash_mult1 == 0 + || $nverts % $hash_mult2 == 0) + { + $nverts++; + } + + # Initialize the array of edges. + my @E = (); + foreach my $kw (@keys) + { + # Calculate hashes for this key. + # The hashes are immediately reduced modulo the mapping table size. + my $hash1 = _calc_hash($kw, $hash_mult1, $hash_seed1) % $nverts; + my $hash2 = _calc_hash($kw, $hash_mult2, $hash_seed2) % $nverts; + + # If the two hashes are the same for any key, we have to fail + # since this edge would itself form a cycle in the graph. + return () if $hash1 == $hash2; + + # Add the edge for this key. + push @E, { left => $hash1, right => $hash2 }; + } + + # Initialize the array of vertices, giving them all empty lists + # of associated edges. (The lists will be hashes of edge numbers.) + my @V = (); + for (my $v = 0; $v < $nverts; $v++) + { + push @V, { edges => {} }; + } + + # Insert each edge in the lists of edges connected to its vertices. + for (my $e = 0; $e < $nedges; $e++) + { + my $v = $E[$e]{left}; + $V[$v]{edges}->{$e} = 1; + + $v = $E[$e]{right}; + $V[$v]{edges}->{$e} = 1; + } + + # Now we attempt to prove the graph acyclic. + # A cycle-free graph is either empty or has some vertex of degree 1. + # Removing the edge attached to that vertex doesn't change this property, + # so doing that repeatedly will reduce the size of the graph. + # If the graph is empty at the end of the process, it was acyclic. + # We track the order of edge removal so that the next phase can process + # them in reverse order of removal. + my @output_order = (); + + # Consider each vertex as a possible starting point for edge-removal. + for (my $startv = 0; $startv < $nverts; $startv++) + { + my $v = $startv; + + # If vertex v is of degree 1 (i.e. exactly 1 edge connects to it), + # remove that edge, and then consider the edge's other vertex to see + # if it is now of degree 1. The inner loop repeats until reaching a + # vertex not of degree 1. + while (scalar(keys(%{ $V[$v]{edges} })) == 1) + { + # Unlink its only edge. + my $e = (keys(%{ $V[$v]{edges} }))[0]; + delete($V[$v]{edges}->{$e}); + + # Unlink the edge from its other vertex, too. + my $v2 = $E[$e]{left}; + $v2 = $E[$e]{right} if ($v2 == $v); + delete($V[$v2]{edges}->{$e}); + + # Push e onto the front of the output-order list. + unshift @output_order, $e; + + # Consider v2 on next iteration of inner loop. + $v = $v2; + } + } + + # We succeeded only if all edges were removed from the graph. + return () if (scalar(@output_order) != $nedges); + + # OK, build the hash table of size $nverts. + my @hashtab = (0) x $nverts; + # We need a "visited" flag array in this step, too. + my @visited = (0) x $nverts; + + # The goal is that for any key, the sum of the hash table entries for + # its first and second hash values is the desired output (i.e., the key + # number). By assigning hash table values in the selected edge order, + # we can guarantee that that's true. This works because the edge first + # removed from the graph (and hence last to be visited here) must have + # at least one vertex it shared with no other edge; hence it will have at + # least one vertex (hashtable entry) still unvisited when we reach it here, + # and we can assign that unvisited entry a value that makes the sum come + # out as we wish. By induction, the same holds for all the other edges. + foreach my $e (@output_order) + { + my $l = $E[$e]{left}; + my $r = $E[$e]{right}; + if (!$visited[$l]) + { + # $hashtab[$r] might be zero, or some previously assigned value. + $hashtab[$l] = $e - $hashtab[$r]; + } + else + { + die "oops, doubly used hashtab entry" if $visited[$r]; + # $hashtab[$l] might be zero, or some previously assigned value. + $hashtab[$r] = $e - $hashtab[$l]; + } + # Now freeze both of these hashtab entries. + $visited[$l] = 1; + $visited[$r] = 1; + } + + # Detect range of values needed in hash table. + my $hmin = $nedges; + my $hmax = 0; + for (my $v = 0; $v < $nverts; $v++) + { + $hmin = $hashtab[$v] if $hashtab[$v] < $hmin; + $hmax = $hashtab[$v] if $hashtab[$v] > $hmax; + } + + # Choose width of hashtable entries. In addition to the actual values, + # we need to be able to store a flag for unused entries, and we wish to + # have the property that adding any other entry value to the flag gives + # an out-of-range result (>= $nedges). + my $elemtype; + my $unused_flag; + + if ( $hmin >= -0x7F + && $hmax <= 0x7F + && $hmin + 0x7F >= $nedges) + { + # int8 will work + $elemtype = 'int8'; + $unused_flag = 0x7F; + } + elsif ($hmin >= -0x7FFF + && $hmax <= 0x7FFF + && $hmin + 0x7FFF >= $nedges) + { + # int16 will work + $elemtype = 'int16'; + $unused_flag = 0x7FFF; + } + elsif ($hmin >= -0x7FFFFFFF + && $hmax <= 0x7FFFFFFF + && $hmin + 0x3FFFFFFF >= $nedges) + { + # int32 will work + $elemtype = 'int32'; + $unused_flag = 0x3FFFFFFF; + } + else + { + die "hash table values too wide"; + } + + # Set any unvisited hashtable entries to $unused_flag. + for (my $v = 0; $v < $nverts; $v++) + { + $hashtab[$v] = $unused_flag if !$visited[$v]; + } + + return ($elemtype, \@hashtab); +} + +1; diff --git a/tools/gen_keywordlist.pl b/tools/gen_keywordlist.pl new file mode 100755 index 000000000..499300433 --- /dev/null +++ b/tools/gen_keywordlist.pl @@ -0,0 +1,221 @@ +# +# For PostgreSQL Database Management System: +# (formerly known as Postgres, then as Postgres95) +# +# Portions Copyright (c) 1996-2010, The PostgreSQL Global Development Group +# +# Portions Copyright (c) 1994, The Regents of the University of California +# +# Permission to use, copy, modify, and distribute this software and its documentation for any purpose, +# without fee, and without a written agreement is hereby granted, provided that the above copyright notice +# and this paragraph and the following two paragraphs appear in all copies. +# +# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, +# INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, +# ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY +# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, +# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA +# HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. +# +#---------------------------------------------------------------------- +# +# gen_keywordlist.pl +# Perl script that transforms a list of keywords into a ScanKeywordList +# data structure that can be passed to ScanKeywordLookup(). +# +# The input is a C header file containing a series of macro calls +# PG_KEYWORD("keyword", ...) +# Lines not starting with PG_KEYWORD are ignored. The keywords are +# implicitly numbered 0..N-1 in order of appearance in the header file. +# Currently, the keywords are required to appear in ASCII order. +# +# The output is a C header file that defines a "const ScanKeywordList" +# variable named according to the -v switch ("ScanKeywords" by default). +# The variable is marked "static" unless the -e switch is given. +# +# ScanKeywordList uses hash-based lookup, so this script also selects +# a minimal perfect hash function for the keyword set, and emits a +# static hash function that is referenced in the ScanKeywordList struct. +# The hash function is case-insensitive unless --no-case-fold is specified. +# Note that case folding works correctly only for all-ASCII keywords! +# +# +# Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/tools/gen_keywordlist.pl +# +#---------------------------------------------------------------------- + + +use strict; +use warnings; +use Getopt::Long; + +use FindBin; +use lib $FindBin::RealBin; + +use PerfectHash; + +my $output_path = ''; +my $extern = 0; +my $case_fold = 1; +my $varname = 'ScanKeywords'; + +GetOptions( + 'output:s' => \$output_path, + 'extern' => \$extern, + 'case-fold!' => \$case_fold, + 'varname:s' => \$varname) || usage(); + +my $kw_input_file = shift @ARGV || die "No input file.\n"; + +# Make sure output_path ends in a slash if needed. +if ($output_path ne '' && substr($output_path, -1) ne '/') +{ + $output_path .= '/'; +} + +$kw_input_file =~ /(\w+)\.h$/ + || die "Input file must be named something.h.\n"; +my $base_filename = $1 . '_d'; +my $kw_def_file = $output_path . $base_filename . '.h'; + +open(my $kif, '<', $kw_input_file) || die "$kw_input_file: $!\n"; +open(my $kwdef, '>', $kw_def_file) || die "$kw_def_file: $!\n"; + +# Opening boilerplate for keyword definition header. +printf $kwdef <) +{ + if (/^PG_KEYWORD\("(\w+)"/) + { + push @keywords, $1; + } +} + +# When being case-insensitive, insist that the input be all-lower-case. +if ($case_fold) +{ + foreach my $kw (@keywords) + { + die qq|The keyword "$kw" is not lower-case in $kw_input_file\n| + if ($kw ne lc $kw); + } +} + +# Error out if the keyword names are not in ASCII order. +# +# While this isn't really necessary with hash-based lookup, it's still +# helpful because it provides a cheap way to reject duplicate keywords. +# Also, insisting on sorted order ensures that code that scans the keyword +# table linearly will see the keywords in a canonical order. +for my $i (0 .. $#keywords - 1) +{ + die + qq|The keyword "$keywords[$i + 1]" is out of order in $kw_input_file\n| + if ($keywords[$i] cmp $keywords[ $i + 1 ]) >= 0; +} + +# Emit the string containing all the keywords. + +printf $kwdef qq|static const char %s_kw_string[] =\n\t"|, $varname; +print $kwdef join qq|\\0"\n\t"|, @keywords; +print $kwdef qq|";\n\n|; + +# Emit an array of numerical offsets which will be used to index into the +# keyword string. Also determine max keyword length. + +printf $kwdef "static const uint16 %s_kw_offsets[] = {\n", $varname; + +my $offset = 0; +my $max_len = 0; +foreach my $name (@keywords) +{ + my $this_length = length($name); + + print $kwdef "\t$offset,\n"; + + # Calculate the cumulative offset of the next keyword, + # taking into account the null terminator. + $offset += $this_length + 1; + + # Update max keyword length. + $max_len = $this_length if $max_len < $this_length; +} + +print $kwdef "};\n\n"; + +# Emit a macro defining the number of keywords. +# (In some places it's useful to have access to that as a constant.) + +printf $kwdef "#define %s_NUM_KEYWORDS %d\n\n", uc $varname, scalar @keywords; + +# Emit the definition of the hash function. + +my $funcname = $varname . "_hash_func"; + +my $f = PerfectHash::generate_hash_function(\@keywords, $funcname, + case_fold => $case_fold); + +printf $kwdef qq|static %s\n|, $f; + +# Emit the struct that wraps all this lookup info into one variable. + +printf $kwdef "static " if !$extern; +printf $kwdef "const ScanKeywordList %s = {\n", $varname; +printf $kwdef qq|\t%s_kw_string,\n|, $varname; +printf $kwdef qq|\t%s_kw_offsets,\n|, $varname; +printf $kwdef qq|\t%s,\n|, $funcname; +printf $kwdef qq|\t%s_NUM_KEYWORDS,\n|, uc $varname; +printf $kwdef qq|\t%d\n|, $max_len; +printf $kwdef "};\n\n"; + +printf $kwdef "#endif\t\t\t\t\t\t\t/* %s_H */\n", uc $base_filename; + + +sub usage +{ + die <] [--varname/-v ] [--extern/-e] [--[no-]case-fold] input_file + --output Output directory (default '.') + --varname Name for ScanKeywordList variable (default 'ScanKeywords') + --extern Allow the ScanKeywordList variable to be globally visible + --no-case-fold Keyword matching is to be case-sensitive + +gen_keywordlist.pl transforms a list of keywords into a ScanKeywordList. +The output filename is derived from the input file by inserting _d, +for example kwlist_d.h is produced from kwlist.h. +EOM +}