From aef5e1a6e382f1ab0203f067e4efbda566ba447a Mon Sep 17 00:00:00 2001 From: John Gemignani Date: Wed, 10 Aug 2022 12:14:51 -0700 Subject: [PATCH 01/18] Add upgrade script for 1.0.0 to 1.1.0 Added upgrade script for 1.0.0 to 1.1.0 --- age--1.0.0--1.1.0.sql | 247 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 247 insertions(+) create mode 100644 age--1.0.0--1.1.0.sql diff --git a/age--1.0.0--1.1.0.sql b/age--1.0.0--1.1.0.sql new file mode 100644 index 000000000..13b205a47 --- /dev/null +++ b/age--1.0.0--1.1.0.sql @@ -0,0 +1,247 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "ALTER EXTENSION age UPDATE TO '1.1.0'" to load this file. \quit + +-- +-- agtype - access operators ( ->, ->> ) +-- + +CREATE FUNCTION ag_catalog.agtype_object_field(agtype, text) +RETURNS agtype +LANGUAGE c +IMMUTABLE +RETURNS NULL ON NULL INPUT +PARALLEL SAFE +AS 'MODULE_PATHNAME'; + +-- get agtype object field +CREATE OPERATOR -> ( + LEFTARG = agtype, + RIGHTARG = text, + FUNCTION = ag_catalog.agtype_object_field +); + +CREATE FUNCTION ag_catalog.agtype_object_field_text(agtype, text) +RETURNS text +LANGUAGE c +IMMUTABLE +RETURNS NULL ON NULL INPUT +PARALLEL SAFE +AS 'MODULE_PATHNAME'; + +-- get agtype object field as text +CREATE OPERATOR ->> ( + LEFTARG = agtype, + RIGHTARG = text, + FUNCTION = ag_catalog.agtype_object_field_text +); + +CREATE FUNCTION ag_catalog.agtype_array_element(agtype, int4) +RETURNS agtype +LANGUAGE c +IMMUTABLE +RETURNS NULL ON NULL INPUT +PARALLEL SAFE +AS 'MODULE_PATHNAME'; + +-- get agtype array element +CREATE OPERATOR -> ( + LEFTARG = agtype, + RIGHTARG = int4, + FUNCTION = ag_catalog.agtype_array_element +); + +CREATE FUNCTION ag_catalog.agtype_array_element_text(agtype, int4) +RETURNS text +LANGUAGE c +IMMUTABLE +RETURNS NULL ON NULL INPUT +PARALLEL SAFE +AS 'MODULE_PATHNAME'; + +-- get agtype array element as text +CREATE OPERATOR ->> ( + LEFTARG = agtype, + RIGHTARG = int4, + FUNCTION = ag_catalog.agtype_array_element_text +); + +-- +-- Contains operators @> <@ +-- +CREATE FUNCTION ag_catalog.agtype_contains(agtype, agtype) +RETURNS boolean +LANGUAGE c +STABLE +RETURNS NULL ON NULL INPUT +PARALLEL SAFE +AS 'MODULE_PATHNAME'; + +CREATE OPERATOR @> ( + LEFTARG = agtype, + RIGHTARG = agtype, + FUNCTION = ag_catalog.agtype_contains, + COMMUTATOR = '<@', + RESTRICT = contsel, + JOIN = contjoinsel +); + +CREATE FUNCTION ag_catalog.agtype_contained_by(agtype, agtype) +RETURNS boolean +LANGUAGE c +STABLE +RETURNS NULL ON NULL INPUT +PARALLEL SAFE +AS 'MODULE_PATHNAME'; + +CREATE OPERATOR <@ ( + LEFTARG = agtype, + RIGHTARG = agtype, + FUNCTION = ag_catalog.agtype_contained_by, + COMMUTATOR = '@>', + RESTRICT = contsel, + JOIN = contjoinsel +); + +-- +-- Key Existence Operators ? ?| ?& +-- +CREATE FUNCTION ag_catalog.agtype_exists(agtype, text) +RETURNS boolean +LANGUAGE c +IMMUTABLE +RETURNS NULL ON NULL INPUT +PARALLEL SAFE +AS 'MODULE_PATHNAME'; + +CREATE OPERATOR ? ( + LEFTARG = agtype, + RIGHTARG = text, + FUNCTION = ag_catalog.agtype_exists, + COMMUTATOR = '?', + RESTRICT = contsel, + JOIN = contjoinsel +); + +CREATE FUNCTION ag_catalog.agtype_exists_any(agtype, text[]) +RETURNS boolean +LANGUAGE c +IMMUTABLE +RETURNS NULL ON NULL INPUT +PARALLEL SAFE +AS 'MODULE_PATHNAME'; + +CREATE OPERATOR ?| ( + LEFTARG = agtype, + RIGHTARG = text[], + FUNCTION = ag_catalog.agtype_exists_any, + RESTRICT = contsel, + JOIN = contjoinsel +); + +CREATE FUNCTION ag_catalog.agtype_exists_all(agtype, text[]) +RETURNS boolean +LANGUAGE c +IMMUTABLE +RETURNS NULL ON NULL INPUT +PARALLEL SAFE +AS 'MODULE_PATHNAME'; + +CREATE OPERATOR ?& ( + LEFTARG = agtype, + RIGHTARG = text[], + FUNCTION = ag_catalog.agtype_exists_all, + RESTRICT = contsel, + JOIN = contjoinsel +); + +-- +-- agtype GIN support +-- +CREATE FUNCTION ag_catalog.gin_compare_agtype(text, text) +RETURNS int +AS 'MODULE_PATHNAME' +LANGUAGE C +IMMUTABLE +STRICT +PARALLEL SAFE; + +CREATE FUNCTION gin_extract_agtype(agtype, internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C +IMMUTABLE +STRICT +PARALLEL SAFE; + +CREATE FUNCTION ag_catalog.gin_extract_agtype_query(agtype, internal, int2, + internal, internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C +IMMUTABLE +STRICT +PARALLEL SAFE; + +CREATE FUNCTION ag_catalog.gin_consistent_agtype(internal, int2, agtype, int4, + internal, internal) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C +IMMUTABLE +STRICT +PARALLEL SAFE; + +CREATE FUNCTION ag_catalog.gin_triconsistent_agtype(internal, int2, agtype, int4, + internal, internal, internal) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C +IMMUTABLE +STRICT +PARALLEL SAFE; + +CREATE OPERATOR CLASS ag_catalog.gin_agtype_ops +DEFAULT FOR TYPE agtype USING gin AS + OPERATOR 7 @>, + OPERATOR 9 ?(agtype, text), + OPERATOR 10 ?|(agtype, text[]), + OPERATOR 11 ?&(agtype, text[]), + FUNCTION 1 ag_catalog.gin_compare_agtype(text,text), + FUNCTION 2 ag_catalog.gin_extract_agtype(agtype, internal), + FUNCTION 3 ag_catalog.gin_extract_agtype_query(agtype, internal, int2, + internal, internal), + FUNCTION 4 ag_catalog.gin_consistent_agtype(internal, int2, agtype, int4, + internal, internal), + FUNCTION 6 ag_catalog.gin_triconsistent_agtype(internal, int2, agtype, int4, + internal, internal, internal), +STORAGE text; + +-- +-- graph id conversion function +-- +ALTER FUNCTION ag_catalog.agtype_access_operator(VARIADIC agtype[]) IMMUTABLE; + +DROP FUNCTION IF EXISTS ag_catalog._property_constraint_check(agtype, agtype); + +-- +-- end +-- From 9b67e00864e72b7e4d2824544c05f58d1ec92777 Mon Sep 17 00:00:00 2001 From: Dehowe Feng Date: Wed, 10 Aug 2022 12:55:46 -0700 Subject: [PATCH 02/18] Upgrade AGE version from 1.0.0-->1.1.0 Upgrade AGE version to 1.1.0 --- Makefile | 2 +- README.md | 2 +- RELEASE | 40 ++++++++++++++++++-------------- age--1.0.0.sql => age--1.1.0.sql | 0 age.control | 2 +- 5 files changed, 26 insertions(+), 20 deletions(-) rename age--1.0.0.sql => age--1.1.0.sql (100%) diff --git a/Makefile b/Makefile index deb3b4d90..b8ed4f023 100644 --- a/Makefile +++ b/Makefile @@ -70,7 +70,7 @@ OBJS = src/backend/age.o \ EXTENSION = age -DATA = age--1.0.0.sql +DATA = age--1.1.0.sql # sorted in dependency order REGRESS = scan \ diff --git a/README.md b/README.md index 7484e4d76..285496329 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ Intelligent -- AGE allows you to perform graph queries that are the basis for ma ## Latest happenings -- Latest Apache AGE release, [Apache AGE 1.0.0 (https://github.com/apache/age/releases/tag/v1.0.0-rc1). +- Latest Apache AGE release, [Apache AGE 1.0.0 (https://github.com/apache/age/releases/tag/v1.1.0-rc1). - The latest Apache AGE documentation is now available at [here](https://age.apache.org/docs/master/index.html). - The roadmap has been updated, please check out the [Apache AGE website](http://age.apache.org/). - Send all your comments and inquiries to the user mailing list, users@age.apache.org. diff --git a/RELEASE b/RELEASE index 3e706219f..a8369970f 100644 --- a/RELEASE +++ b/RELEASE @@ -15,22 +15,28 @@ # specific language governing permissions and limitations # under the License. -Release Notes for Apache AGE release v1.0.0 +Release Notes for Apache AGE release v1.1.0 -Apache AGE 1.0.0 - Release Notes +Apache AGE 1.1.0 - Release Notes + + Support for Agtype containment ops and GIN Indices. + Add CALL [YIELD] grammar rules for the implementation of CALL procedures. + VLE path variable integration performance patch. + Improve WHERE clause performance and support index scans. + Allow global graph contexts to see currentCommandIds. + Cache Agtype and GRAPHID OIDs. + Allow lists and maps to be used in the SET clause. + Fix bug in aggregate function collect(). + Fix Bug in WHERE clause and property constraints. + Fix VLE local cache bug (crash). + Fix bug where integers were not being serialized correctly when stored in GIN indices. + Fix the VLE peek_stack_head routine to return a NULL if the stack is NULL. + Fix MERGE visibility in chained commands, SET specifically. + Fix github issue #212 - Add access operator (`->`, `->>`) to Agtype. + Fix github issue #220 - fix local cached contexts for static procedures. + Fix github issue #224 - fix regression tests to fix issues on mac with trigonometric functions. + Fix github issue #235 - when MERGE and SET were used together. + Fix github issue #240 - negative array bounds. + Fix github issue #240 - negative array bounds - addendum. + Updated README. - Add an upgrading SQL script file from 0.5.0 to 0.6.0 - Add upgrading file age--0.6.0--0.7.0.sql - Refactor function get_agtype_value_object_value - Age load issue (#188) - Refactor agtype_access_operator - Bugfix - Remove INLINE from function declaration - Rebase VLE code - Implement Merge Clause - Bugfix: chained union logic - Allow a path of one vertex - Created functions for load graph from CSV files - Add UNION into EXPLAIN grammar rule - Implement `UNWIND` clause(#173) - Bugfix:(nodejs) Corrects parsing for independence value(#177) - Feat: Implement `OPTIONAL MATCH` (#175) diff --git a/age--1.0.0.sql b/age--1.1.0.sql similarity index 100% rename from age--1.0.0.sql rename to age--1.1.0.sql diff --git a/age.control b/age.control index 4e320d024..b0fb1401d 100644 --- a/age.control +++ b/age.control @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. -default_version = '1.0.0' +default_version = '1.1.0' comment = 'AGE database extension' module_pathname = '$libdir/age' From 0c1f4e7f52a278d159a4ef0284f84544d58a61c3 Mon Sep 17 00:00:00 2001 From: John Gemignani Date: Wed, 10 Aug 2022 15:55:02 -0700 Subject: [PATCH 03/18] Update NOTICE file Updated NOTICE file to remove 'incubating' --- NOTICE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/NOTICE b/NOTICE index 282a6fc1a..40ce5ef8a 100644 --- a/NOTICE +++ b/NOTICE @@ -1,4 +1,4 @@ -Apache AGE (incubating) +Apache AGE Copyright 2022 The Apache Software Foundation. This product includes software developed at The Apache Software Foundation (http://www.apache.org/). From 0b737b52549caaddd558574f78fe9481ebddeb6a Mon Sep 17 00:00:00 2001 From: Shoaib Date: Mon, 20 Jun 2022 10:37:15 +0900 Subject: [PATCH 04/18] cleaned version age-pg-1 --- .github/workflows/installcheck.yml | 40 +++ .gitignore | 3 +- Makefile | 14 +- age--1.1.0.sql | 18 +- regress/expected/age_load.out | 28 +- regress/expected/agtype.out | 1 + regress/expected/catalog.out | 12 +- regress/expected/expr.out | 7 +- regress/expected/scan.out | 1 + regress/sql/age_load.sql | 26 +- regress/sql/agtype.sql | 1 + regress/sql/catalog.sql | 8 +- regress/sql/expr.sql | 1 + regress/sql/scan.sql | 1 + src/backend/catalog/ag_graph.c | 28 +- src/backend/catalog/ag_label.c | 84 +++--- src/backend/commands/graph_commands.c | 20 +- src/backend/commands/label_commands.c | 14 +- src/backend/executor/cypher_create.c | 23 +- src/backend/executor/cypher_delete.c | 100 +++---- src/backend/executor/cypher_merge.c | 26 +- src/backend/executor/cypher_set.c | 57 ++-- src/backend/executor/cypher_utils.c | 31 ++- src/backend/nodes/ag_nodes.c | 6 +- src/backend/nodes/cypher_outfuncs.c | 6 +- src/backend/nodes/cypher_readfuncs.c | 28 +- src/backend/optimizer/cypher_createplan.c | 3 - src/backend/optimizer/cypher_pathnode.c | 1 - src/backend/optimizer/cypher_paths.c | 3 - src/backend/parser/ag_scanner.l | 8 +- src/backend/parser/cypher_analyze.c | 15 +- src/backend/parser/cypher_clause.c | 19 +- src/backend/parser/cypher_expr.c | 1 + src/backend/parser/cypher_keywords.c | 74 ++---- src/backend/parser/cypher_parse_agg.c | 10 +- src/backend/parser/cypher_parser.c | 12 +- src/backend/utils/adt/ag_float8_supp.c | 1 + src/backend/utils/adt/age_global_graph.c | 39 +-- src/backend/utils/adt/age_vle.c | 2 + src/backend/utils/adt/agtype.c | 92 ++++--- src/backend/utils/adt/agtype_gin.c | 1 + src/backend/utils/adt/graphid.c | 4 +- src/backend/utils/ag_func.c | 8 +- src/backend/utils/cache/ag_cache.c | 307 ++++++---------------- src/backend/utils/load/ag_load_edges.c | 35 ++- src/backend/utils/load/ag_load_labels.c | 12 +- src/backend/utils/load/age_load.c | 95 +++---- src/include/catalog/ag_graph.h | 11 +- src/include/catalog/ag_label.h | 23 +- src/include/commands/label_commands.h | 4 +- src/include/executor/cypher_utils.h | 3 + src/include/nodes/cypher_nodes.h | 6 +- src/include/optimizer/cypher_createplan.h | 1 - src/include/optimizer/cypher_pathnode.h | 1 - src/include/parser/.gitignore | 1 + src/include/parser/cypher_keywords.h | 7 +- src/include/parser/cypher_parse_node.h | 2 +- src/include/utils/ag_cache.h | 3 +- src/include/utils/age_graphid_ds.h | 2 + src/include/utils/agtype.h | 22 +- src/include/utils/graphid.h | 11 +- src/include/utils/load/ag_load_edges.h | 9 +- src/include/utils/load/ag_load_labels.h | 10 +- src/include/utils/load/age_load.h | 6 +- 64 files changed, 665 insertions(+), 783 deletions(-) create mode 100644 .github/workflows/installcheck.yml diff --git a/.github/workflows/installcheck.yml b/.github/workflows/installcheck.yml new file mode 100644 index 000000000..76503fe06 --- /dev/null +++ b/.github/workflows/installcheck.yml @@ -0,0 +1,40 @@ +name: PG12 Regression + +on: + push: + branches: [ '*' ] + pull_request: + branches: [ '*' ] + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - name: Get latest commit id of PostgreSQL 12 + run: | + echo "PG_COMMIT_HASH=$(git ls-remote git://git.postgresql.org/git/postgresql.git refs/heads/REL_12_STABLE | awk '{print $1}')" >> $GITHUB_ENV + + - name: Cache PostgreSQL 12 + uses: actions/cache@v2 + id: pg12cache + with: + path: ~/pg12 + key: ${{ runner.os }}-v1-pg12-${{ env.PG_COMMIT_HASH }} + + - name: Install PostgreSQL 12 + if: steps.pg12cache.outputs.cache-hit != 'true' + run: | + git clone --depth 1 --branch REL_12_STABLE git://git.postgresql.org/git/postgresql.git ~/pg12source + cd ~/pg12source + ./configure --prefix=$HOME/pg12 CFLAGS="-std=gnu99 -ggdb -O0" --enable-cassert + make install -j$(nproc) > /dev/null + + - uses: actions/checkout@v2 + - name: Regression + run: | + export PG_CONFIG=$HOME/pg12/bin/pg_config + make -j$(nproc) + make install + make installcheck \ No newline at end of file diff --git a/.gitignore b/.gitignore index 78e991256..d5dea3281 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,6 @@ *.o *.so -.gitignore build.sh .idea .deps -.DS_Store +.DS_Store \ No newline at end of file diff --git a/Makefile b/Makefile index b8ed4f023..366ba7402 100644 --- a/Makefile +++ b/Makefile @@ -99,15 +99,23 @@ ag_regress_dir = $(srcdir)/regress REGRESS_OPTS = --load-extension=age --inputdir=$(ag_regress_dir) --outputdir=$(ag_regress_dir) --temp-instance=$(ag_regress_dir)/instance --port=61958 --encoding=UTF-8 ag_regress_out = instance/ log/ results/ regression.* -EXTRA_CLEAN = $(addprefix $(ag_regress_dir)/, $(ag_regress_out)) src/backend/parser/cypher_gram.c src/include/parser/cypher_gram_def.h +EXTRA_CLEAN = $(addprefix $(ag_regress_dir)/, $(ag_regress_out)) src/backend/parser/cypher_gram.c src/include/parser/cypher_gram_def.h src/include/parser/cypher_kwlist_d.h + +GEN_KEYWORDLIST = $(PERL) -I ./tools/ ./tools/gen_keywordlist.pl +GEN_KEYWORDLIST_DEPS = ./tools/gen_keywordlist.pl tools/PerfectHash.pm ag_include_dir = $(srcdir)/src/include PG_CPPFLAGS = -I$(ag_include_dir) -I$(ag_include_dir)/parser -PG_CONFIG = pg_config +PG_CONFIG ?= pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) include $(PGXS) +src/backend/parser/cypher_keywords.o: src/include/parser/cypher_kwlist_d.h + +src/include/parser/cypher_kwlist_d.h: src/include/parser/cypher_kwlist.h $(GEN_KEYWORDLIST_DEPS) + $(GEN_KEYWORDLIST) --extern --varname CypherKeyword --output src/include/parser $< + src/include/parser/cypher_gram_def.h: src/backend/parser/cypher_gram.c src/backend/parser/cypher_gram.c: BISONFLAGS += --defines=src/include/parser/cypher_gram_def.h @@ -115,4 +123,4 @@ src/backend/parser/cypher_gram.c: BISONFLAGS += --defines=src/include/parser/cyp src/backend/parser/cypher_parser.o: src/backend/parser/cypher_gram.c src/backend/parser/cypher_keywords.o: src/backend/parser/cypher_gram.c -src/backend/parser/ag_scanner.c: FLEX_NO_BACKUP=yes +src/backend/parser/ag_scanner.c: FLEX_NO_BACKUP=yes \ No newline at end of file diff --git a/age--1.1.0.sql b/age--1.1.0.sql index e5c42192a..f6b555b3f 100644 --- a/age--1.1.0.sql +++ b/age--1.1.0.sql @@ -24,12 +24,14 @@ -- catalog tables -- + CREATE TABLE ag_graph ( + graphid oid NOT NULL, name name NOT NULL, namespace regnamespace NOT NULL -) WITH (OIDS); +); -CREATE UNIQUE INDEX ag_graph_oid_index ON ag_graph USING btree (oid); +CREATE UNIQUE INDEX ag_graph_graphid_index ON ag_graph USING btree (graphid); CREATE UNIQUE INDEX ag_graph_name_index ON ag_graph USING btree (name); @@ -43,20 +45,22 @@ CREATE DOMAIN label_id AS int NOT NULL CHECK (VALUE > 0 AND VALUE <= 65535); CREATE DOMAIN label_kind AS "char" NOT NULL CHECK (VALUE = 'v' OR VALUE = 'e'); CREATE TABLE ag_label ( + name name NOT NULL, graph oid NOT NULL, id label_id, kind label_kind, - relation regclass NOT NULL -) WITH (OIDS); - -CREATE UNIQUE INDEX ag_label_oid_index ON ag_label USING btree (oid); + relation regclass NOT NULL, + CONSTRAINT fk_graph_oid + FOREIGN KEY(graph) + REFERENCES ag_graph(graphid) +); CREATE UNIQUE INDEX ag_label_name_graph_index ON ag_label USING btree (name, graph); -CREATE UNIQUE INDEX ag_label_graph_id_index +CREATE UNIQUE INDEX ag_label_graph_oid_index ON ag_label USING btree (graph, id); diff --git a/regress/expected/age_load.out b/regress/expected/age_load.out index bae5924b3..6c83d7b31 100644 --- a/regress/expected/age_load.out +++ b/regress/expected/age_load.out @@ -1,3 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ \! cp -r regress/age_load/data regress/instance/data/age_load LOAD 'age'; SET search_path TO ag_catalog; @@ -190,16 +208,6 @@ SELECT * FROM cypher('agload_test_graph', $$MATCH(n:Country2 {iso2 : 'AT'}) 1688849860263940 | "Austria" | "AT" (1 row) -SELECT * FROM cypher('agload_test_graph', $$ - MATCH (u:Country {region : "Europe"}) - WHERE u.name =~ 'Cro.*' - RETURN u.name, u.region -$$) AS (result_1 agtype, result_2 agtype); - result_1 | result_2 ------------+---------- - "Croatia" | "Europe" -(1 row) - SELECT drop_graph('agload_test_graph', true); NOTICE: drop cascades to 7 other objects DETAIL: drop cascades to table agload_test_graph._ag_label_vertex diff --git a/regress/expected/agtype.out b/regress/expected/agtype.out index 196747a7e..4b92a2932 100644 --- a/regress/expected/agtype.out +++ b/regress/expected/agtype.out @@ -23,6 +23,7 @@ -- Load extension and set path -- LOAD 'age'; +SET extra_float_digits = 0; SET search_path TO ag_catalog; -- -- Create a table using the AGTYPE type diff --git a/regress/expected/catalog.out b/regress/expected/catalog.out index 8a83f45a6..19ef801e7 100644 --- a/regress/expected/catalog.out +++ b/regress/expected/catalog.out @@ -28,7 +28,7 @@ NOTICE: graph "g" has been created (1 row) -SELECT * FROM ag_graph WHERE name = 'g'; +SELECT name, namespace FROM ag_graph WHERE name = 'g'; name | namespace ------+----------- g | g @@ -119,7 +119,7 @@ NOTICE: graph "GraphB" has been created (1 row) -- Show GraphA's construction to verify case is preserved. -SELECT * FROM ag_graph WHERE name = 'GraphA'; +SELECT name, namespace FROM ag_graph WHERE name = 'GraphA'; name | namespace --------+----------- GraphA | "GraphA" @@ -140,7 +140,7 @@ NOTICE: graph "GraphA" renamed to "GraphX" (1 row) -- Show GraphX's construction to verify case is preserved. -SELECT * FROM ag_graph WHERE name = 'GraphX'; +SELECT name, namespace FROM ag_graph WHERE name = 'GraphX'; name | namespace --------+----------- GraphX | "GraphX" @@ -153,14 +153,14 @@ SELECT nspname FROM pg_namespace WHERE nspname = 'GraphX'; (1 row) -- Verify there isn't a graph GraphA anymore. -SELECT * FROM ag_graph WHERE name = 'GraphA'; +SELECT name, namespace FROM ag_graph WHERE name = 'GraphA'; name | namespace ------+----------- (0 rows) SELECT * FROM pg_namespace WHERE nspname = 'GraphA'; - nspname | nspowner | nspacl ----------+----------+-------- + oid | nspname | nspowner | nspacl +-----+---------+----------+-------- (0 rows) -- Sanity check that graphx does not exist - should return 0. diff --git a/regress/expected/expr.out b/regress/expected/expr.out index 5f28e3642..ba1124686 100644 --- a/regress/expected/expr.out +++ b/regress/expected/expr.out @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +SET extra_float_digits = 0; LOAD 'age'; SET search_path TO ag_catalog; SELECT * FROM create_graph('expr'); @@ -1143,7 +1144,7 @@ $$) AS (i bigint); SELECT * FROM cypher('type_coercion', $$ RETURN '1.0' $$) AS (i bigint); -ERROR: invalid input syntax for integer: "1.0" +ERROR: invalid input syntax for type bigint: "1.0" -- Casting to ints that will cause overflow SELECT * FROM cypher('type_coercion', $$ RETURN 10000000000000000000 @@ -1312,11 +1313,11 @@ SELECT agtype_in('null::int'); SELECT * FROM cypher('expr', $$ RETURN '0.0'::int $$) AS r(result agtype); -ERROR: invalid input syntax for integer: "0.0" +ERROR: invalid input syntax for type bigint: "0.0" SELECT * FROM cypher('expr', $$ RETURN '1.5'::int $$) AS r(result agtype); -ERROR: invalid input syntax for integer: "1.5" +ERROR: invalid input syntax for type bigint: "1.5" SELECT * FROM cypher('graph_name', $$ RETURN "15555555555555555555555555555"::int $$) AS (string_result agtype); diff --git a/regress/expected/scan.out b/regress/expected/scan.out index af82dbf22..d96d80049 100644 --- a/regress/expected/scan.out +++ b/regress/expected/scan.out @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +SET extra_float_digits = 0; LOAD 'age'; SET search_path TO ag_catalog; SELECT create_graph('scan'); diff --git a/regress/sql/age_load.sql b/regress/sql/age_load.sql index 3516a170b..e5a7db034 100644 --- a/regress/sql/age_load.sql +++ b/regress/sql/age_load.sql @@ -1,3 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + \! cp -r regress/age_load/data regress/instance/data/age_load LOAD 'age'; @@ -29,6 +48,7 @@ SELECT COUNT(*) FROM cypher('agload_test_graph', $$MATCH(n) RETURN n$$) as (n ag SELECT COUNT(*) FROM cypher('agload_test_graph', $$MATCH (a)-[e]->(b) RETURN e$$) as (n agtype); + SELECT create_vlabel('agload_test_graph','Country2'); SELECT load_labels_from_file('agload_test_graph', 'Country2', 'age_load/countries.csv', false); @@ -53,10 +73,4 @@ SELECT * FROM cypher('agload_test_graph', $$MATCH(n:Country {iso2 : 'AT'}) SELECT * FROM cypher('agload_test_graph', $$MATCH(n:Country2 {iso2 : 'AT'}) RETURN id(n), n.name, n.iso2 $$) as ("id(n)" agtype, "n.name" agtype, "n.iso2" agtype); -SELECT * FROM cypher('agload_test_graph', $$ - MATCH (u:Country {region : "Europe"}) - WHERE u.name =~ 'Cro.*' - RETURN u.name, u.region -$$) AS (result_1 agtype, result_2 agtype); - SELECT drop_graph('agload_test_graph', true); diff --git a/regress/sql/agtype.sql b/regress/sql/agtype.sql index 3a116cc4d..608cc8af5 100644 --- a/regress/sql/agtype.sql +++ b/regress/sql/agtype.sql @@ -25,6 +25,7 @@ -- Load extension and set path -- LOAD 'age'; +SET extra_float_digits = 0; SET search_path TO ag_catalog; -- diff --git a/regress/sql/catalog.sql b/regress/sql/catalog.sql index 6bc19814c..641ef2ecf 100644 --- a/regress/sql/catalog.sql +++ b/regress/sql/catalog.sql @@ -25,7 +25,7 @@ SET search_path TO ag_catalog; -- SELECT create_graph('g'); -SELECT * FROM ag_graph WHERE name = 'g'; +SELECT name, namespace FROM ag_graph WHERE name = 'g'; -- create a label to test drop_label() SELECT * FROM cypher('g', $$CREATE (:l)$$) AS r(a agtype); @@ -62,18 +62,18 @@ SELECT create_graph('GraphA'); SELECT create_graph('GraphB'); -- Show GraphA's construction to verify case is preserved. -SELECT * FROM ag_graph WHERE name = 'GraphA'; +SELECT name, namespace FROM ag_graph WHERE name = 'GraphA'; SELECT nspname FROM pg_namespace WHERE nspname = 'GraphA'; -- Rename GraphA to GraphX. SELECT alter_graph('GraphA', 'RENAME', 'GraphX'); -- Show GraphX's construction to verify case is preserved. -SELECT * FROM ag_graph WHERE name = 'GraphX'; +SELECT name, namespace FROM ag_graph WHERE name = 'GraphX'; SELECT nspname FROM pg_namespace WHERE nspname = 'GraphX'; -- Verify there isn't a graph GraphA anymore. -SELECT * FROM ag_graph WHERE name = 'GraphA'; +SELECT name, namespace FROM ag_graph WHERE name = 'GraphA'; SELECT * FROM pg_namespace WHERE nspname = 'GraphA'; -- Sanity check that graphx does not exist - should return 0. diff --git a/regress/sql/expr.sql b/regress/sql/expr.sql index 7fe904fdb..8553b3c3c 100644 --- a/regress/sql/expr.sql +++ b/regress/sql/expr.sql @@ -17,6 +17,7 @@ * under the License. */ +SET extra_float_digits = 0; LOAD 'age'; SET search_path TO ag_catalog; diff --git a/regress/sql/scan.sql b/regress/sql/scan.sql index 97804e5c7..840a822f2 100644 --- a/regress/sql/scan.sql +++ b/regress/sql/scan.sql @@ -17,6 +17,7 @@ * under the License. */ +SET extra_float_digits = 0; LOAD 'age'; SET search_path TO ag_catalog; diff --git a/src/backend/catalog/ag_graph.c b/src/backend/catalog/ag_graph.c index c1e53d6ab..a344a6a37 100644 --- a/src/backend/catalog/ag_graph.c +++ b/src/backend/catalog/ag_graph.c @@ -26,9 +26,11 @@ #include "access/skey.h" #include "access/stratnum.h" #include "catalog/indexing.h" +#include "catalog/namespace.h" +#include "nodes/makefuncs.h" #include "storage/lockdefs.h" -#include "utils/builtins.h" #include "utils/fmgroids.h" +#include "utils/fmgrprotos.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/relcache.h" @@ -39,36 +41,36 @@ static Oid get_graph_namespace(const char *graph_name); // INSERT INTO ag_catalog.ag_graph VALUES (graph_name, nsp_id) -Oid insert_graph(const Name graph_name, const Oid nsp_id) +void insert_graph(const Name graph_name, const Oid nsp_id) { Datum values[Natts_ag_graph]; bool nulls[Natts_ag_graph]; Relation ag_graph; HeapTuple tuple; - Oid graph_oid; + AssertArg(graph_name); AssertArg(OidIsValid(nsp_id)); + ag_graph = table_open(ag_graph_relation_id(), RowExclusiveLock); + values[Anum_ag_graph_oid - 1] = ObjectIdGetDatum(nsp_id); + nulls[Anum_ag_graph_oid - 1] = false; + values[Anum_ag_graph_name - 1] = NameGetDatum(graph_name); nulls[Anum_ag_graph_name - 1] = false; values[Anum_ag_graph_namespace - 1] = ObjectIdGetDatum(nsp_id); nulls[Anum_ag_graph_namespace - 1] = false; - ag_graph = heap_open(ag_graph_relation_id(), RowExclusiveLock); - tuple = heap_form_tuple(RelationGetDescr(ag_graph), values, nulls); /* * CatalogTupleInsert() is originally for PostgreSQL's catalog. However, * it is used at here for convenience. */ - graph_oid = CatalogTupleInsert(ag_graph, tuple); - - heap_close(ag_graph, RowExclusiveLock); + CatalogTupleInsert(ag_graph, tuple); - return graph_oid; + table_close(ag_graph, RowExclusiveLock); } // DELETE FROM ag_catalog.ag_graph WHERE name = graph_name @@ -82,7 +84,7 @@ void delete_graph(const Name graph_name) ScanKeyInit(&scan_keys[0], Anum_ag_graph_name, BTEqualStrategyNumber, F_NAMEEQ, NameGetDatum(graph_name)); - ag_graph = heap_open(ag_graph_relation_id(), RowExclusiveLock); + ag_graph = table_open(ag_graph_relation_id(), RowExclusiveLock); scan_desc = systable_beginscan(ag_graph, ag_graph_name_index_id(), true, NULL, 1, scan_keys); @@ -97,7 +99,7 @@ void delete_graph(const Name graph_name) CatalogTupleDelete(ag_graph, &tuple->t_self); systable_endscan(scan_desc); - heap_close(ag_graph, RowExclusiveLock); + table_close(ag_graph, RowExclusiveLock); } // Function updates graph name in ag_graph table. @@ -116,7 +118,7 @@ void update_graph_name(const Name graph_name, const Name new_name) ScanKeyInit(&scan_keys[0], Anum_ag_graph_name, BTEqualStrategyNumber, F_NAMEEQ, NameGetDatum(graph_name)); - ag_graph = heap_open(ag_graph_relation_id(), RowExclusiveLock); + ag_graph = table_open(ag_graph_relation_id(), RowExclusiveLock); scan_desc = systable_beginscan(ag_graph, ag_graph_name_index_id(), true, NULL, 1, scan_keys); @@ -146,7 +148,7 @@ void update_graph_name(const Name graph_name, const Name new_name) // end scan and close ag_graph systable_endscan(scan_desc); - heap_close(ag_graph, RowExclusiveLock); + table_close(ag_graph, RowExclusiveLock); } Oid get_graph_oid(const char *graph_name) diff --git a/src/backend/catalog/ag_label.c b/src/backend/catalog/ag_label.c index 8001f53c4..809487a3b 100644 --- a/src/backend/catalog/ag_label.c +++ b/src/backend/catalog/ag_label.c @@ -26,6 +26,7 @@ #include "access/skey.h" #include "access/stratnum.h" #include "catalog/indexing.h" +#include "catalog/namespace.h" #include "fmgr.h" #include "nodes/execnodes.h" #include "nodes/makefuncs.h" @@ -45,32 +46,32 @@ // INSERT INTO ag_catalog.ag_label // VALUES (label_name, label_graph, label_id, label_kind, label_relation) -Oid insert_label(const char *label_name, Oid label_graph, int32 label_id, - char label_kind, Oid label_relation) +void insert_label(const char *label_name, Oid graph_oid, int32 label_id, + char label_kind, Oid label_relation) { NameData label_name_data; Datum values[Natts_ag_label]; bool nulls[Natts_ag_label]; Relation ag_label; HeapTuple tuple; - Oid label_oid; /* * NOTE: Is it better to make use of label_id and label_kind domain types * than to use assert to check label_id and label_kind are valid? */ AssertArg(label_name); - AssertArg(OidIsValid(label_graph)); AssertArg(label_id_is_valid(label_id)); AssertArg(label_kind == LABEL_KIND_VERTEX || label_kind == LABEL_KIND_EDGE); AssertArg(OidIsValid(label_relation)); + ag_label = table_open(ag_label_relation_id(), RowExclusiveLock); + namestrcpy(&label_name_data, label_name); values[Anum_ag_label_name - 1] = NameGetDatum(&label_name_data); nulls[Anum_ag_label_name - 1] = false; - values[Anum_ag_label_graph - 1] = ObjectIdGetDatum(label_graph); + values[Anum_ag_label_graph - 1] = ObjectIdGetDatum(graph_oid); nulls[Anum_ag_label_graph - 1] = false; values[Anum_ag_label_id - 1] = Int32GetDatum(label_id); @@ -82,19 +83,15 @@ Oid insert_label(const char *label_name, Oid label_graph, int32 label_id, values[Anum_ag_label_relation - 1] = ObjectIdGetDatum(label_relation); nulls[Anum_ag_label_relation - 1] = false; - ag_label = heap_open(ag_label_relation_id(), RowExclusiveLock); - tuple = heap_form_tuple(RelationGetDescr(ag_label), values, nulls); /* * CatalogTupleInsert() is originally for PostgreSQL's catalog. However, * it is used at here for convenience. */ - label_oid = CatalogTupleInsert(ag_label, tuple); + CatalogTupleInsert(ag_label, tuple); - heap_close(ag_label, RowExclusiveLock); - - return label_oid; + table_close(ag_label, RowExclusiveLock); } // DELETE FROM ag_catalog.ag_label WHERE relation = relation @@ -108,7 +105,7 @@ void delete_label(Oid relation) ScanKeyInit(&scan_keys[0], Anum_ag_label_relation, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relation)); - ag_label = heap_open(ag_label_relation_id(), RowExclusiveLock); + ag_label = table_open(ag_label_relation_id(), RowExclusiveLock); scan_desc = systable_beginscan(ag_label, ag_label_relation_index_id(), true, NULL, 1, scan_keys); @@ -123,45 +120,34 @@ void delete_label(Oid relation) CatalogTupleDelete(ag_label, &tuple->t_self); systable_endscan(scan_desc); - heap_close(ag_label, RowExclusiveLock); + table_close(ag_label, RowExclusiveLock); } -Oid get_label_oid(const char *label_name, Oid label_graph) +int32 get_label_id(const char *label_name, Oid graph_oid) { label_cache_data *cache_data; - cache_data = search_label_name_graph_cache(label_name, label_graph); - if (cache_data) - return cache_data->oid; - else - return InvalidOid; -} - -int32 get_label_id(const char *label_name, Oid label_graph) -{ - label_cache_data *cache_data; - - cache_data = search_label_name_graph_cache(label_name, label_graph); + cache_data = search_label_name_graph_cache(label_name, graph_oid); if (cache_data) return cache_data->id; else return INVALID_LABEL_ID; } -Oid get_label_relation(const char *label_name, Oid label_graph) +Oid get_label_relation(const char *label_name, Oid graph_oid) { label_cache_data *cache_data; - cache_data = search_label_name_graph_cache(label_name, label_graph); + cache_data = search_label_name_graph_cache(label_name, graph_oid); if (cache_data) return cache_data->relation; else return InvalidOid; } -char *get_label_relation_name(const char *label_name, Oid label_graph) +char *get_label_relation_name(const char *label_name, Oid graph_oid) { - return get_rel_name(get_label_relation(label_name, label_graph)); + return get_rel_name(get_label_relation(label_name, graph_oid)); } PG_FUNCTION_INFO_V1(_label_name); @@ -185,7 +171,7 @@ Datum _label_name(PG_FUNCTION_ARGS) label_id = (int32)(((uint64)AG_GETARG_GRAPHID(1)) >> ENTRY_ID_BITS); - label_cache = search_label_graph_id_cache(graph, label_id); + label_cache = search_label_graph_oid_cache(graph, label_id); label_name = NameStr(label_cache->name); @@ -222,23 +208,23 @@ PG_FUNCTION_INFO_V1(_extract_label_id); Datum _extract_label_id(PG_FUNCTION_ARGS) { - graphid graph_id; + graphid graph_oid; if (PG_ARGISNULL(0)) { ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), - errmsg("graph_id must not be null"))); + errmsg("graph_oid must not be null"))); } - graph_id = AG_GETARG_GRAPHID(0); + graph_oid = AG_GETARG_GRAPHID(0); - PG_RETURN_INT32(get_graphid_label_id(graph_id)); + PG_RETURN_INT32(get_graphid_label_id(graph_oid)); } -bool label_id_exists(Oid label_graph, int32 label_id) +bool label_id_exists(Oid graph_oid, int32 label_id) { label_cache_data *cache_data; - cache_data = search_label_graph_id_cache(label_graph, label_id); + cache_data = search_label_graph_oid_cache(graph_oid, label_id); if (cache_data) return true; else @@ -267,15 +253,16 @@ RangeVar *get_label_range_var(char *graph_name, Oid graph_oid, * XXX: We may want to use the cache system for this function, * however the cache system currently requires us to know the * name of the label we want. - */ + */ List *get_all_edge_labels_per_graph(EState *estate, Oid graph_oid) { List *labels = NIL; ScanKeyData scan_keys[2]; Relation ag_label; - HeapScanDesc scan_desc; + TableScanDesc scan_desc; HeapTuple tuple; TupleTableSlot *slot; + ResultRelInfo *resultRelInfo; // setup scan keys to get all edges for the given graph oid ScanKeyInit(&scan_keys[1], Anum_ag_label_graph, BTEqualStrategyNumber, @@ -284,11 +271,15 @@ List *get_all_edge_labels_per_graph(EState *estate, Oid graph_oid) F_CHAREQ, CharGetDatum(LABEL_TYPE_EDGE)); // setup the table to be scanned - ag_label = heap_open(ag_label_relation_id(), RowExclusiveLock); - scan_desc = heap_beginscan(ag_label, estate->es_snapshot, 2, scan_keys); + ag_label = table_open(ag_label_relation_id(), RowExclusiveLock); + scan_desc = table_beginscan(ag_label, estate->es_snapshot, 2, scan_keys); + + resultRelInfo = create_entity_result_rel_info(estate, "ag_catalog", + "ag_label"); - slot = ExecInitExtraTupleSlot(estate, - RelationGetDescr(ag_label)); + slot = ExecInitExtraTupleSlot( + estate, RelationGetDescr(resultRelInfo->ri_RelationDesc), + &TTSOpsHeapTuple); // scan through the results and get all the label names. while(true) @@ -303,7 +294,7 @@ List *get_all_edge_labels_per_graph(EState *estate, Oid graph_oid) if (!HeapTupleIsValid(tuple)) break; - ExecStoreTuple(tuple, slot, InvalidBuffer, false); + ExecStoreHeapTuple(tuple, slot, false); datum = slot_getattr(slot, Anum_ag_label_name, &isNull); label = DatumGetName(datum); @@ -311,8 +302,9 @@ List *get_all_edge_labels_per_graph(EState *estate, Oid graph_oid) labels = lappend(labels, label); } - heap_endscan(scan_desc); - heap_close(ag_label, RowExclusiveLock); + table_endscan(scan_desc); + table_close(ag_label, RowExclusiveLock); + table_close(resultRelInfo->ri_RelationDesc, RowExclusiveLock); return labels; } diff --git a/src/backend/commands/graph_commands.c b/src/backend/commands/graph_commands.c index c298324c8..6ef0b3cb3 100644 --- a/src/backend/commands/graph_commands.c +++ b/src/backend/commands/graph_commands.c @@ -19,9 +19,9 @@ #include "postgres.h" -#include "access/xact.h" #include "access/genam.h" #include "access/heapam.h" +#include "access/xact.h" #include "catalog/dependency.h" #include "catalog/objectaddress.h" #include "commands/defrem.h" @@ -35,9 +35,8 @@ #include "nodes/pg_list.h" #include "nodes/value.h" #include "parser/parser.h" -#include "utils/fmgroids.h" -#include "utils/relcache.h" #include "utils/rel.h" +#include "utils/relcache.h" #include "catalog/ag_graph.h" #include "catalog/ag_label.h" @@ -166,9 +165,8 @@ Datum drop_graph(PG_FUNCTION_ARGS) graph_name_str = NameStr(*graph_name); if (!graph_exists(graph_name_str)) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_SCHEMA), - errmsg("graph \"%s\" does not exist", graph_name_str))); + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_SCHEMA), + errmsg("graph \"%s\" does not exist", graph_name_str))); } drop_schema_for_graph(graph_name_str, cascade); @@ -345,11 +343,11 @@ List *get_graphnames(void) List *graphnames = NIL; char *str; - ag_graph = heap_open(ag_graph_relation_id(), RowExclusiveLock); + ag_graph = table_open(ag_graph_relation_id(), RowExclusiveLock); scan_desc = systable_beginscan(ag_graph, ag_graph_name_index_id(), true, NULL, 0, NULL); - slot = MakeTupleTableSlot(RelationGetDescr(ag_graph)); + slot = MakeTupleTableSlot(RelationGetDescr(ag_graph), &TTSOpsHeapTuple); for (;;) { @@ -358,17 +356,17 @@ List *get_graphnames(void) break; ExecClearTuple(slot); - ExecStoreTuple(tuple, slot, InvalidBuffer, false); + ExecStoreHeapTuple(tuple, slot, false); slot_getallattrs(slot); - str = DatumGetCString(slot->tts_values[0]); + str = DatumGetCString(slot->tts_values[Anum_ag_graph_name - 1]); graphnames = lappend(graphnames, str); } ExecDropSingleTupleTableSlot(slot); systable_endscan(scan_desc); - heap_close(ag_graph, RowExclusiveLock); + table_close(ag_graph, RowExclusiveLock); return graphnames; } diff --git a/src/backend/commands/label_commands.c b/src/backend/commands/label_commands.c index 66b6818fe..c682d3988 100644 --- a/src/backend/commands/label_commands.c +++ b/src/backend/commands/label_commands.c @@ -259,8 +259,8 @@ Datum create_elabel(PG_FUNCTION_ARGS) * new table and sequence. Returns the oid from the new tuple in * ag_catalog.ag_label. */ -Oid create_label(char *graph_name, char *label_name, char label_type, - List *parents) +void create_label(char *graph_name, char *label_name, char label_type, + List *parents) { graph_cache_data *cache_data; Oid graph_oid; @@ -271,7 +271,6 @@ Oid create_label(char *graph_name, char *label_name, char label_type, RangeVar *seq_range_var; int32 label_id; Oid relation_id; - Oid label_oid; cache_data = search_graph_name_cache(graph_name); if (!cache_data) @@ -307,12 +306,9 @@ Oid create_label(char *graph_name, char *label_name, char label_type, // get a new "id" for the new label label_id = get_new_label_id(graph_oid, nsp_id); - label_oid = insert_label(label_name, graph_oid, label_id, label_type, - relation_id); + insert_label(label_name, graph_oid, label_id, label_type, relation_id); CommandCounterIncrement(); - - return label_oid; } // CREATE TABLE `schema_name`.`rel_name` ( @@ -677,10 +673,10 @@ static int32 get_new_label_id(Oid graph_oid, Oid nsp_id) for (cnt = LABEL_ID_MIN; cnt <= LABEL_ID_MAX; cnt++) { - int64 label_id; + int32 label_id; // the data type of the sequence is integer (int4) - label_id = nextval_internal(seq_id, true); + label_id = (int32) nextval_internal(seq_id, true); Assert(label_id_is_valid(label_id)); if (!label_id_exists(graph_oid, label_id)) return (int32)label_id; diff --git a/src/backend/executor/cypher_create.c b/src/backend/executor/cypher_create.c index 7a03efd3d..b4545e434 100644 --- a/src/backend/executor/cypher_create.c +++ b/src/backend/executor/cypher_create.c @@ -19,6 +19,7 @@ #include "postgres.h" +#include "access/heapam.h" #include "access/htup_details.h" #include "access/xact.h" #include "executor/tuptable.h" @@ -26,17 +27,14 @@ #include "nodes/extensible.h" #include "nodes/nodes.h" #include "nodes/plannodes.h" -#include "parser/parse_relation.h" #include "rewrite/rewriteHandler.h" #include "utils/rel.h" -#include "utils/tqual.h" #include "catalog/ag_label.h" #include "executor/cypher_executor.h" #include "executor/cypher_utils.h" #include "nodes/cypher_nodes.h" #include "utils/agtype.h" -#include "utils/ag_cache.h" #include "utils/graphid.h" static void begin_cypher_create(CustomScanState *node, EState *estate, @@ -85,7 +83,8 @@ static void begin_cypher_create(CustomScanState *node, EState *estate, ExecAssignExprContext(estate, &node->ss.ps); ExecInitScanTupleSlot(estate, &node->ss, - ExecGetResultType(node->ss.ps.lefttree)); + ExecGetResultType(node->ss.ps.lefttree), + &TTSOpsHeapTuple); if (!CYPHER_CLAUSE_IS_TERMINAL(css->flags)) { @@ -108,7 +107,7 @@ static void begin_cypher_create(CustomScanState *node, EState *estate, continue; // Open relation and aquire a row exclusive lock. - rel = heap_open(cypher_node->relid, RowExclusiveLock); + rel = table_open(cypher_node->relid, RowExclusiveLock); // Initialize resultRelInfo for the vertex cypher_node->resultRelInfo = makeNode(ResultRelInfo); @@ -120,9 +119,12 @@ static void begin_cypher_create(CustomScanState *node, EState *estate, ExecOpenIndices(cypher_node->resultRelInfo, false); // Setup the relation's tuple slot - cypher_node->elemTupleSlot = ExecInitExtraTupleSlot( - estate, - RelationGetDescr(cypher_node->resultRelInfo->ri_RelationDesc)); + cypher_node->elemTupleSlot = table_slot_create( + rel, &estate->es_tupleTable); + //cypher_node->elemTupleSlot = ExecInitExtraTupleSlot( + // estate, + // RelationGetDescr(cypher_node->resultRelInfo->ri_RelationDesc), + // &TTSOpsHeapTuple); if (cypher_node->id_expr != NULL) { @@ -274,8 +276,8 @@ static void end_cypher_create(CustomScanState *node) ExecCloseIndices(cypher_node->resultRelInfo); // close the relation itself - heap_close(cypher_node->resultRelInfo->ri_RelationDesc, - RowExclusiveLock); + table_close(cypher_node->resultRelInfo->ri_RelationDesc, + RowExclusiveLock); } } } @@ -425,6 +427,7 @@ static void create_edge(cypher_create_custom_scan_state *css, prev_path = lappend(prev_path, DatumGetPointer(result)); css->path_values = list_concat(prev_path, css->path_values); } + if (CYPHER_TARGET_NODE_IS_VARIABLE(node->flags)) { scantuple->tts_values[node->tuple_position - 1] = result; diff --git a/src/backend/executor/cypher_delete.c b/src/backend/executor/cypher_delete.c index 3d9406bdc..a0c3c776e 100644 --- a/src/backend/executor/cypher_delete.c +++ b/src/backend/executor/cypher_delete.c @@ -19,27 +19,23 @@ #include "postgres.h" -#include "access/sysattr.h" +#include "access/heapam.h" #include "access/htup_details.h" #include "access/multixact.h" +#include "access/table.h" #include "access/xact.h" -#include "storage/bufmgr.h" #include "executor/tuptable.h" #include "nodes/execnodes.h" #include "nodes/extensible.h" #include "nodes/nodes.h" #include "nodes/plannodes.h" #include "parser/parsetree.h" -#include "parser/parse_relation.h" -#include "rewrite/rewriteHandler.h" +#include "storage/bufmgr.h" #include "utils/rel.h" -#include "utils/tqual.h" #include "catalog/ag_label.h" -#include "commands/label_commands.h" #include "executor/cypher_executor.h" #include "executor/cypher_utils.h" -#include "parser/cypher_parse_node.h" #include "nodes/cypher_nodes.h" #include "utils/agtype.h" #include "utils/graphid.h" @@ -99,7 +95,8 @@ static void begin_cypher_delete(CustomScanState *node, EState *estate, // setup scan tuple slot and projection info ExecInitScanTupleSlot(estate, &node->ss, - ExecGetResultType(node->ss.ps.lefttree)); + ExecGetResultType(node->ss.ps.lefttree), + &TTSOpsHeapTuple); if (!CYPHER_CLAUSE_IS_TERMINAL(css->flags)) { @@ -284,9 +281,9 @@ static void delete_entity(EState *estate, ResultRelInfo *resultRelInfo, { ResultRelInfo *saved_resultRelInfo; LockTupleMode lockmode; - HeapUpdateFailureData hufd; - HTSU_Result lock_result; - HTSU_Result delete_result; + TM_FailureData hufd; + TM_Result lock_result; + TM_Result delete_result; Buffer buffer; // Find the physical tuple, this variable is coming from @@ -303,11 +300,11 @@ static void delete_entity(EState *estate, ResultRelInfo *resultRelInfo, * It is possible the entity may have already been deleted. If the tuple * can be deleted, the lock result will be HeapTupleMayBeUpdated. If the * tuple was already deleted by this DELETE clause, the result would be - * HeapTupleSelfUpdated, if the result was deleted by a previous delete - * clause, the result will HeapTupleInvisible. Throw an error if any + * TM_SelfModified, if the result was deleted by a previous delete + * clause, the result will TM_Invisible. Throw an error if any * other result was returned. */ - if (lock_result == HeapTupleMayBeUpdated) + if (lock_result == TM_Ok) { delete_result = heap_delete(resultRelInfo->ri_RelationDesc, &tuple->t_self, GetCurrentCommandId(true), @@ -320,30 +317,32 @@ static void delete_entity(EState *estate, ResultRelInfo *resultRelInfo, */ switch (delete_result) { - case HeapTupleMayBeUpdated: - break; - case HeapTupleSelfUpdated: - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("deleting the same entity more than once cannot happen"))); - /* ereport never gets here */ - break; - case HeapTupleUpdated: - ereport(ERROR, - (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("could not serialize access due to concurrent update"))); - /* ereport never gets here */ - break; - default: - elog(ERROR, "Entity failed to be update"); - /* elog never gets here */ - break; + case TM_Ok: + break; + case TM_SelfModified: + ereport( + ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg( + "deleting the same entity more than once cannot happen"))); + /* ereport never gets here */ + break; + case TM_Updated: + ereport( + ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + /* ereport never gets here */ + break; + default: + elog(ERROR, "Entity failed to be update"); + /* elog never gets here */ + break; } /* increment the command counter */ CommandCounterIncrement(); } - else if (lock_result != HeapTupleInvisible && - lock_result != HeapTupleSelfUpdated) + else if (lock_result != TM_Invisible && lock_result != TM_SelfModified) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), @@ -374,7 +373,7 @@ static void process_delete_list(CustomScanState *node) cypher_delete_item *item; agtype_value *original_entity_value, *id, *label; ScanKeyData scan_keys[1]; - HeapScanDesc scan_desc; + TableScanDesc scan_desc; ResultRelInfo *resultRelInfo; HeapTuple heap_tuple; char *label_name; @@ -424,8 +423,8 @@ static void process_delete_list(CustomScanState *node) /* * Setup the scan description, with the correct snapshot and scan keys. */ - scan_desc = heap_beginscan(resultRelInfo->ri_RelationDesc, - estate->es_snapshot, 1, scan_keys); + scan_desc = table_beginscan(resultRelInfo->ri_RelationDesc, + estate->es_snapshot, 1, scan_keys); /* Retrieve the tuple. */ heap_tuple = heap_getnext(scan_desc, ForwardScanDirection); @@ -437,7 +436,7 @@ static void process_delete_list(CustomScanState *node) */ if (!HeapTupleIsValid(heap_tuple)) { - heap_endscan(scan_desc); + table_endscan(scan_desc); destroy_entity_result_rel_info(resultRelInfo); continue; @@ -459,7 +458,7 @@ static void process_delete_list(CustomScanState *node) delete_entity(estate, resultRelInfo, heap_tuple); /* Close the scan and the relation. */ - heap_endscan(scan_desc); + table_endscan(scan_desc); destroy_entity_result_rel_info(resultRelInfo); } } @@ -488,25 +487,26 @@ static void find_connected_edges(CustomScanState *node, char *graph_name, * improved. However, right now we have to scan every edge to see if * one has this vertex as a start or end vertex. */ - foreach(lc, labels) + foreach (lc, labels) { char *label_name = lfirst(lc); ResultRelInfo *resultRelInfo; - HeapScanDesc scan_desc; + TableScanDesc scan_desc; HeapTuple tuple; TupleTableSlot *slot; - resultRelInfo = create_entity_result_rel_info(estate, - graph_name, label_name); + resultRelInfo = create_entity_result_rel_info(estate, graph_name, + label_name); - scan_desc = heap_beginscan(resultRelInfo->ri_RelationDesc, - estate->es_snapshot, 0, NULL); + scan_desc = table_beginscan(resultRelInfo->ri_RelationDesc, + estate->es_snapshot, 0, NULL); - slot = ExecInitExtraTupleSlot(estate, - RelationGetDescr(resultRelInfo->ri_RelationDesc)); + slot = ExecInitExtraTupleSlot( + estate, RelationGetDescr(resultRelInfo->ri_RelationDesc), + &TTSOpsHeapTuple); // scan the table - while(true) + while (true) { graphid startid, endid; bool isNull; @@ -517,7 +517,7 @@ static void find_connected_edges(CustomScanState *node, char *graph_name, if (!HeapTupleIsValid(tuple)) break; - ExecStoreTuple(tuple, slot, InvalidBuffer, false); + ExecStoreHeapTuple(tuple, slot, false); startid = GRAPHID_GET_DATUM(slot_getattr(slot, Anum_ag_label_edge_table_start_id, &isNull)); endid = GRAPHID_GET_DATUM(slot_getattr(slot, Anum_ag_label_edge_table_end_id, &isNull)); @@ -540,7 +540,7 @@ static void find_connected_edges(CustomScanState *node, char *graph_name, } } - heap_endscan(scan_desc); + table_endscan(scan_desc); destroy_entity_result_rel_info(resultRelInfo); } diff --git a/src/backend/executor/cypher_merge.c b/src/backend/executor/cypher_merge.c index c65abf2fe..49e690cd0 100644 --- a/src/backend/executor/cypher_merge.c +++ b/src/backend/executor/cypher_merge.c @@ -20,23 +20,20 @@ #include "postgres.h" #include "access/htup_details.h" +#include "access/table.h" #include "access/xact.h" #include "executor/tuptable.h" #include "nodes/execnodes.h" #include "nodes/extensible.h" #include "nodes/nodes.h" #include "nodes/plannodes.h" -#include "parser/parse_relation.h" -#include "rewrite/rewriteHandler.h" #include "utils/rel.h" -#include "utils/tqual.h" #include "catalog/ag_label.h" #include "executor/cypher_executor.h" #include "executor/cypher_utils.h" #include "nodes/cypher_nodes.h" #include "utils/agtype.h" -#include "utils/ag_cache.h" #include "utils/graphid.h" static void begin_cypher_merge(CustomScanState *node, EState *estate, @@ -90,7 +87,8 @@ static void begin_cypher_merge(CustomScanState *node, EState *estate, ExecAssignExprContext(estate, &node->ss.ps); ExecInitScanTupleSlot(estate, &node->ss, - ExecGetResultType(node->ss.ps.lefttree)); + ExecGetResultType(node->ss.ps.lefttree), + &TTSOpsVirtual); /* * When MERGE is not the last clause in a cypher query. Setup projection @@ -125,7 +123,7 @@ static void begin_cypher_merge(CustomScanState *node, EState *estate, } // Open relation and aquire a row exclusive lock. - rel = heap_open(cypher_node->relid, RowExclusiveLock); + rel = table_open(cypher_node->relid, RowExclusiveLock); // Initialize resultRelInfo for the vertex cypher_node->resultRelInfo = makeNode(ResultRelInfo); @@ -139,7 +137,8 @@ static void begin_cypher_merge(CustomScanState *node, EState *estate, // Setup the relation's tuple slot cypher_node->elemTupleSlot = ExecInitExtraTupleSlot( estate, - RelationGetDescr(cypher_node->resultRelInfo->ri_RelationDesc)); + RelationGetDescr(cypher_node->resultRelInfo->ri_RelationDesc), + &TTSOpsHeapTuple); if (cypher_node->id_expr != NULL) { @@ -162,7 +161,9 @@ static void begin_cypher_merge(CustomScanState *node, EState *estate, * that have modified the command id. */ if (estate->es_output_cid == 0) + { estate->es_output_cid = estate->es_snapshot->curcid; + } /* store the currentCommandId for this instance */ css->base_currentCommandId = GetCurrentCommandId(false); @@ -262,7 +263,6 @@ static void process_simple_merge(CustomScanState *node) /* setup the scantuple that the process_path needs */ econtext->ecxt_scantuple = node->ss.ps.lefttree->ps_ResultTupleSlot; - econtext->ecxt_scantuple->tts_isempty = false; process_path(css); } @@ -489,8 +489,8 @@ static TupleTableSlot *exec_cypher_merge(CustomScanState *node) * it. */ ExecInitScanTupleSlot(estate, &sss->ss, - ExecGetResultType(sss->subplan)); - + ExecGetResultType(sss->subplan), + &TTSOpsVirtual); /* setup the scantuple that the process_path needs */ econtext->ecxt_scantuple = sss->ss.ss_ScanTupleSlot; @@ -514,7 +514,7 @@ static TupleTableSlot *exec_cypher_merge(CustomScanState *node) econtext->ecxt_scantuple->tts_isnull); // store the heap tuble - ExecStoreTuple(heap_tuple, econtext->ecxt_scantuple, InvalidBuffer, false); + ExecStoreVirtualTuple(econtext->ecxt_scantuple); /* * make the subquery's projection scan slot be the tuple table we @@ -564,8 +564,8 @@ static void end_cypher_merge(CustomScanState *node) ExecCloseIndices(cypher_node->resultRelInfo); // close the relation itself - heap_close(cypher_node->resultRelInfo->ri_RelationDesc, - RowExclusiveLock); + table_close(cypher_node->resultRelInfo->ri_RelationDesc, + RowExclusiveLock); } } diff --git a/src/backend/executor/cypher_set.c b/src/backend/executor/cypher_set.c index 1bc74b764..63a016635 100644 --- a/src/backend/executor/cypher_set.c +++ b/src/backend/executor/cypher_set.c @@ -19,25 +19,20 @@ #include "postgres.h" -#include "access/sysattr.h" +#include "access/heapam.h" #include "access/htup_details.h" #include "access/xact.h" -#include "storage/bufmgr.h" #include "executor/tuptable.h" #include "nodes/execnodes.h" #include "nodes/extensible.h" #include "nodes/nodes.h" #include "nodes/plannodes.h" -#include "parser/parsetree.h" -#include "parser/parse_relation.h" #include "rewrite/rewriteHandler.h" +#include "storage/bufmgr.h" #include "utils/rel.h" -#include "catalog/ag_label.h" -#include "commands/label_commands.h" #include "executor/cypher_executor.h" #include "executor/cypher_utils.h" -#include "parser/cypher_parse_node.h" #include "nodes/cypher_nodes.h" #include "utils/agtype.h" #include "utils/graphid.h" @@ -82,8 +77,9 @@ static void begin_cypher_set(CustomScanState *node, EState *estate, ExecAssignExprContext(estate, &node->ss.ps); ExecInitScanTupleSlot(estate, &node->ss, - ExecGetResultType(node->ss.ps.lefttree)); - + ExecGetResultType(node->ss.ps.lefttree), + &TTSOpsHeapTuple); + if (!CYPHER_CLAUSE_IS_TERMINAL(css->flags)) { TupleDesc tupdesc = node->ss.ss_ScanTupleSlot->tts_tupleDescriptor; @@ -112,12 +108,12 @@ static HeapTuple update_entity_tuple(ResultRelInfo *resultRelInfo, { HeapTuple tuple = NULL; LockTupleMode lockmode; - HeapUpdateFailureData hufd; - HTSU_Result lock_result; - HTSU_Result update_result; + TM_FailureData hufd; + TM_Result lock_result; + TM_Result update_result; Buffer buffer; - ResultRelInfo *saved_resultRelInfo = saved_resultRelInfo;; + //ResultRelInfo *saved_resultRelInfo = saved_resultRelInfo;; estate->es_result_relation_info = resultRelInfo; lockmode = ExecUpdateLockMode(estate, resultRelInfo); @@ -126,10 +122,11 @@ static HeapTuple update_entity_tuple(ResultRelInfo *resultRelInfo, GetCurrentCommandId(false), lockmode, LockWaitBlock, false, &buffer, &hufd); - if (lock_result == HeapTupleMayBeUpdated) + if (lock_result == TM_Ok) { + //ExecOpenIndices(resultRelInfo, false); ExecStoreVirtualTuple(elemTupleSlot); - tuple = ExecMaterializeSlot(elemTupleSlot); + tuple = ExecFetchSlotHeapTuple(elemTupleSlot, true, NULL); tuple->t_self = old_tuple->t_self; // Check the constraints of the tuple @@ -146,7 +143,7 @@ static HeapTuple update_entity_tuple(ResultRelInfo *resultRelInfo, estate->es_crosscheck_snapshot, true, &hufd, &lockmode); - if (update_result != HeapTupleMayBeUpdated) + if (update_result != TM_Ok) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("Entity failed to be updated: %i", update_result))); @@ -155,13 +152,14 @@ static HeapTuple update_entity_tuple(ResultRelInfo *resultRelInfo, // Insert index entries for the tuple if (resultRelInfo->ri_NumIndices > 0) { - ExecInsertIndexTuples(elemTupleSlot, &(tuple->t_self), estate, + ExecInsertIndexTuples(elemTupleSlot,estate, false, NULL, NIL); } + //ExecCloseIndices(resultRelInfo); } ReleaseBuffer(buffer); - estate->es_result_relation_info = saved_resultRelInfo; + //estate->es_result_relation_info = saved_resultRelInfo; return tuple; } @@ -209,10 +207,6 @@ static bool check_path(agtype_value *path, graphid updated_id) return false; } -/* - * Construct a new agtype path with the entity with updated_id - * replacing all of its intances in path with updated_entity - */ static agtype_value *replace_entity_in_path(agtype_value *path, graphid updated_id, agtype *updated_entity) @@ -376,7 +370,7 @@ static void process_update_list(CustomScanState *node) TupleTableSlot *slot; ResultRelInfo *resultRelInfo; ScanKeyData scan_keys[1]; - HeapScanDesc scan_desc; + TableScanDesc scan_desc; bool remove_property; char *label_name; cypher_update_item *update_item; @@ -461,11 +455,12 @@ static void process_update_list(CustomScanState *node) new_property_value, remove_property); - resultRelInfo = create_entity_result_rel_info(estate, - css->set_list->graph_name, - label_name); + resultRelInfo = create_entity_result_rel_info( + estate, css->set_list->graph_name, label_name); - slot = ExecInitExtraTupleSlot(estate, RelationGetDescr(resultRelInfo->ri_RelationDesc)); + slot = ExecInitExtraTupleSlot( + estate, RelationGetDescr(resultRelInfo->ri_RelationDesc), + &TTSOpsHeapTuple); /* * Now that we have the updated properties, create a either a vertex or @@ -528,8 +523,8 @@ static void process_update_list(CustomScanState *node) * Setup the scan description, with the correct snapshot and scan * keys. */ - scan_desc = heap_beginscan(resultRelInfo->ri_RelationDesc, - estate->es_snapshot, 1, scan_keys); + scan_desc = table_beginscan(resultRelInfo->ri_RelationDesc, + estate->es_snapshot, 1, scan_keys); /* Retrieve the tuple. */ heap_tuple = heap_getnext(scan_desc, ForwardScanDirection); @@ -543,12 +538,12 @@ static void process_update_list(CustomScanState *node) heap_tuple); } /* close the ScanDescription */ - heap_endscan(scan_desc); + table_endscan(scan_desc); } /* close relation */ ExecCloseIndices(resultRelInfo); - heap_close(resultRelInfo->ri_RelationDesc, RowExclusiveLock); + table_close(resultRelInfo->ri_RelationDesc, RowExclusiveLock); /* increment loop index */ lidx++; diff --git a/src/backend/executor/cypher_utils.c b/src/backend/executor/cypher_utils.c index 3558fe696..15430da30 100644 --- a/src/backend/executor/cypher_utils.c +++ b/src/backend/executor/cypher_utils.c @@ -27,7 +27,9 @@ #include "access/htup_details.h" #include "access/sysattr.h" #include "access/xact.h" +#include "access/heapam.h" #include "access/multixact.h" +#include "access/xact.h" #include "nodes/extensible.h" #include "nodes/makefuncs.h" #include "nodes/nodes.h" @@ -44,6 +46,7 @@ #include "executor/cypher_utils.h" #include "utils/agtype.h" #include "utils/ag_cache.h" +#include "utils/agtype.h" #include "utils/graphid.h" /* @@ -92,7 +95,7 @@ void destroy_entity_result_rel_info(ResultRelInfo *result_rel_info) ExecCloseIndices(result_rel_info); // close the rel - heap_close(result_rel_info->ri_RelationDesc, RowExclusiveLock); + table_close(result_rel_info->ri_RelationDesc, RowExclusiveLock); } TupleTableSlot *populate_vertex_tts( @@ -167,11 +170,11 @@ TupleTableSlot *populate_edge_tts( * Find out if the entity still exists. This is for 'implicit' deletion * of an entity. */ -bool entity_exists(EState *estate, Oid graph_oid, graphid id) +bool entity_exists(EState *estate, uint32 graph_oid, graphid id) { label_cache_data *label; ScanKeyData scan_keys[1]; - HeapScanDesc scan_desc; + TableScanDesc scan_desc; HeapTuple tuple; Relation rel; bool result = true; @@ -180,14 +183,14 @@ bool entity_exists(EState *estate, Oid graph_oid, graphid id) * Extract the label id from the graph id and get the table name * the entity is part of. */ - label = search_label_graph_id_cache(graph_oid, GET_LABEL_ID(id)); + label = search_label_graph_oid_cache(graph_oid, GET_LABEL_ID(id)); // Setup the scan key to be the graphid ScanKeyInit(&scan_keys[0], 1, BTEqualStrategyNumber, F_GRAPHIDEQ, GRAPHID_GET_DATUM(id)); - rel = heap_open(label->relation, RowExclusiveLock); - scan_desc = heap_beginscan(rel, estate->es_snapshot, 1, scan_keys); + rel = table_open(label->relation, RowExclusiveLock); + scan_desc = table_beginscan(rel, estate->es_snapshot, 1, scan_keys); tuple = heap_getnext(scan_desc, ForwardScanDirection); @@ -200,8 +203,8 @@ bool entity_exists(EState *estate, Oid graph_oid, graphid id) result = false; } - heap_endscan(scan_desc); - heap_close(rel, RowExclusiveLock); + table_endscan(scan_desc); + table_close(rel, RowExclusiveLock); return result; } @@ -235,7 +238,7 @@ HeapTuple insert_entity_tuple_cid(ResultRelInfo *resultRelInfo, HeapTuple tuple = NULL; ExecStoreVirtualTuple(elemTupleSlot); - tuple = ExecMaterializeSlot(elemTupleSlot); + tuple = ExecFetchSlotHeapTuple(elemTupleSlot, true, NULL); /* Check the constraints of the tuple */ tuple->t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); @@ -244,14 +247,14 @@ HeapTuple insert_entity_tuple_cid(ResultRelInfo *resultRelInfo, ExecConstraints(resultRelInfo, elemTupleSlot, estate); } - /* Insert the tuple using the passed in cid */ - heap_insert(resultRelInfo->ri_RelationDesc, tuple, cid, 0, NULL); + // Insert the tuple normally + table_tuple_insert(resultRelInfo->ri_RelationDesc, elemTupleSlot, + GetCurrentCommandId(true), 0, NULL); - /* Insert index entries for the tuple */ + // Insert index entries for the tuple if (resultRelInfo->ri_NumIndices > 0) { - ExecInsertIndexTuples(elemTupleSlot, &(tuple->t_self), estate, false, - NULL, NIL); + ExecInsertIndexTuples(elemTupleSlot, estate, false, NULL, NIL); } return tuple; diff --git a/src/backend/nodes/ag_nodes.c b/src/backend/nodes/ag_nodes.c index d65bb5038..fc60e3aa7 100644 --- a/src/backend/nodes/ag_nodes.c +++ b/src/backend/nodes/ag_nodes.c @@ -23,9 +23,9 @@ #include "nodes/ag_nodes.h" #include "nodes/cypher_copyfuncs.h" +#include "nodes/cypher_nodes.h" #include "nodes/cypher_outfuncs.h" #include "nodes/cypher_readfuncs.h" -#include "nodes/cypher_nodes.h" static bool equal_ag_node(const ExtensibleNode *a, const ExtensibleNode *b); @@ -59,7 +59,7 @@ const char *node_names[] = { "cypher_update_item", "cypher_delete_information", "cypher_delete_item", - "cypher_merge_information" + "cypher_merge_information", }; /* @@ -121,7 +121,7 @@ const ExtensibleNodeMethods node_methods[] = { DEFINE_NODE_METHODS_EXTENDED(cypher_update_item), DEFINE_NODE_METHODS_EXTENDED(cypher_delete_information), DEFINE_NODE_METHODS_EXTENDED(cypher_delete_item), - DEFINE_NODE_METHODS_EXTENDED(cypher_merge_information) + DEFINE_NODE_METHODS_EXTENDED(cypher_merge_information), }; static bool equal_ag_node(const ExtensibleNode *a, const ExtensibleNode *b) diff --git a/src/backend/nodes/cypher_outfuncs.c b/src/backend/nodes/cypher_outfuncs.c index 05c0f7f50..1d84bc289 100644 --- a/src/backend/nodes/cypher_outfuncs.c +++ b/src/backend/nodes/cypher_outfuncs.c @@ -303,7 +303,7 @@ void out_cypher_create_target_nodes(StringInfo str, const ExtensibleNode *node) WRITE_NODE_FIELD(paths); WRITE_INT32_FIELD(flags); - WRITE_OID_FIELD(graph_oid); + WRITE_INT32_FIELD(graph_oid); } // serialization function for the cypher_create_path ExtensibleNode. @@ -370,7 +370,7 @@ void out_cypher_delete_information(StringInfo str, const ExtensibleNode *node) WRITE_NODE_FIELD(delete_items); WRITE_INT32_FIELD(flags); WRITE_STRING_FIELD(graph_name); - WRITE_OID_FIELD(graph_oid); + WRITE_INT32_FIELD(graph_oid); WRITE_BOOL_FIELD(detach); } @@ -389,7 +389,7 @@ void out_cypher_merge_information(StringInfo str, const ExtensibleNode *node) DEFINE_AG_NODE(cypher_merge_information); WRITE_INT32_FIELD(flags); - WRITE_OID_FIELD(graph_oid); + WRITE_INT32_FIELD(graph_oid); WRITE_INT32_FIELD(merge_function_attr); WRITE_NODE_FIELD(path); } diff --git a/src/backend/nodes/cypher_readfuncs.c b/src/backend/nodes/cypher_readfuncs.c index 3d3249d9a..972e7211f 100644 --- a/src/backend/nodes/cypher_readfuncs.c +++ b/src/backend/nodes/cypher_readfuncs.c @@ -21,19 +21,27 @@ #include "nodes/readfuncs.h" -#include "nodes/cypher_readfuncs.h" #include "nodes/cypher_nodes.h" +#include "nodes/cypher_readfuncs.h" /* * Copied From Postgres * * Macros for declaring appropriate local variables. */ -// Declare the extensible node and local fields for the pg_strtok +/* A few guys need only local_node */ +#define READ_LOCALS_NO_FIELDS(nodeTypeName) \ + nodeTypeName *local_node = (nodeTypeName *) node + +/* And a few guys need only the pg_strtok support fields */ +#define READ_TEMP_LOCALS() \ + const char *token; \ + int length + +/* ... but most need both */ #define READ_LOCALS(nodeTypeName) \ - nodeTypeName *local_node = (nodeTypeName *)node; \ - char *token; \ - int length; + READ_LOCALS_NO_FIELDS(nodeTypeName); \ + READ_TEMP_LOCALS() /* * The READ_*_FIELD defines first skips the :fildname token (key) part of the string @@ -49,7 +57,7 @@ #define READ_INT_FIELD(fldname) \ token = pg_strtok(&length); \ token = pg_strtok(&length); \ - local_node->fldname = atoi(token) + local_node->fldname = strtol(token, 0, 10) // Read an unsigned integer field (anything written as ":fldname %u") #define READ_UINT_FIELD(fldname) \ @@ -85,7 +93,7 @@ #define READ_ENUM_FIELD(fldname, enumtype) \ token = pg_strtok(&length); \ token = pg_strtok(&length); \ - local_node->fldname = (enumtype) atoi(token) + local_node->fldname = (enumtype) strtol(token, 0, 10) // Read a float field #define READ_FLOAT_FIELD(fldname) \ @@ -179,7 +187,7 @@ void read_cypher_create_target_nodes(struct ExtensibleNode *node) READ_NODE_FIELD(paths); READ_INT_FIELD(flags); - READ_OID_FIELD(graph_oid); + READ_INT_FIELD(graph_oid); } /* @@ -261,7 +269,7 @@ void read_cypher_delete_information(struct ExtensibleNode *node) READ_NODE_FIELD(delete_items); READ_INT_FIELD(flags); READ_STRING_FIELD(graph_name); - READ_OID_FIELD(graph_oid); + READ_INT_FIELD(graph_oid); READ_BOOL_FIELD(detach); } @@ -286,7 +294,7 @@ void read_cypher_merge_information(struct ExtensibleNode *node) READ_LOCALS(cypher_merge_information); READ_INT_FIELD(flags); - READ_OID_FIELD(graph_oid); + READ_UINT_FIELD(graph_oid); READ_INT_FIELD(merge_function_attr); READ_NODE_FIELD(path); } diff --git a/src/backend/optimizer/cypher_createplan.c b/src/backend/optimizer/cypher_createplan.c index 9e0863423..c6480d154 100644 --- a/src/backend/optimizer/cypher_createplan.c +++ b/src/backend/optimizer/cypher_createplan.c @@ -19,13 +19,10 @@ #include "postgres.h" -#include "access/sysattr.h" -#include "catalog/pg_type_d.h" #include "nodes/extensible.h" #include "nodes/nodes.h" #include "nodes/pg_list.h" #include "nodes/plannodes.h" -#include "nodes/relation.h" #include "executor/cypher_executor.h" #include "optimizer/cypher_createplan.h" diff --git a/src/backend/optimizer/cypher_pathnode.c b/src/backend/optimizer/cypher_pathnode.c index 4e04b752c..cdd0b0635 100644 --- a/src/backend/optimizer/cypher_pathnode.c +++ b/src/backend/optimizer/cypher_pathnode.c @@ -22,7 +22,6 @@ #include "nodes/extensible.h" #include "nodes/nodes.h" #include "nodes/pg_list.h" -#include "nodes/relation.h" #include "optimizer/cypher_createplan.h" #include "optimizer/cypher_pathnode.h" diff --git a/src/backend/optimizer/cypher_paths.c b/src/backend/optimizer/cypher_paths.c index b1ac6938a..80d916127 100644 --- a/src/backend/optimizer/cypher_paths.c +++ b/src/backend/optimizer/cypher_paths.c @@ -19,11 +19,8 @@ #include "postgres.h" -#include "access/sysattr.h" -#include "catalog/pg_type_d.h" #include "nodes/parsenodes.h" #include "nodes/primnodes.h" -#include "nodes/relation.h" #include "optimizer/pathnode.h" #include "optimizer/paths.h" diff --git a/src/backend/parser/ag_scanner.l b/src/backend/parser/ag_scanner.l index 3bab06157..68b15a22c 100644 --- a/src/backend/parser/ag_scanner.l +++ b/src/backend/parser/ag_scanner.l @@ -316,6 +316,7 @@ static int _scan_errposition(const int location, const ag_yy_extra *extra); * and is the same with "ag_scanner_t". */ #define YY_DECL ag_token ag_scanner_next_token(yyscan_t yyscanner) +#define NDIGITS_PER_REMAINDER 9 %} %% @@ -902,7 +903,6 @@ static void _numstr_to_decimal(const char *numstr, const int base, strbuf *sb) */ const uint64 divisor = 1000000000; const int ndivisions = 3; - const int ndigits_per_remainder = 9; int ndigits; int nwords; @@ -1048,11 +1048,11 @@ static void _numstr_to_decimal(const char *numstr, const int base, strbuf *sb) // convert the collected remainders to a string, starting from the last one for (i = nremainders - 1; i >= 0; i--) { - char buf[ndigits_per_remainder]; + char buf[NDIGITS_PER_REMAINDER]; int buf_i; uint32 tmp; - buf_i = ndigits_per_remainder; + buf_i = NDIGITS_PER_REMAINDER; for (tmp = remainders[i]; tmp > 0; tmp /= 10) buf[--buf_i] = '0' + (char)(tmp % 10); @@ -1064,7 +1064,7 @@ static void _numstr_to_decimal(const char *numstr, const int base, strbuf *sb) buf[--buf_i] = '0'; } - strbuf_append_buf(sb, &buf[buf_i], ndigits_per_remainder - buf_i); + strbuf_append_buf(sb, &buf[buf_i], NDIGITS_PER_REMAINDER - buf_i); } pfree(remainders); diff --git a/src/backend/parser/cypher_analyze.c b/src/backend/parser/cypher_analyze.c index 15222ffa2..8b96dc23f 100644 --- a/src/backend/parser/cypher_analyze.c +++ b/src/backend/parser/cypher_analyze.c @@ -32,7 +32,6 @@ #include "parser/parse_node.h" #include "parser/parse_relation.h" #include "parser/parse_target.h" -#include "parser/parsetree.h" #include "utils/builtins.h" #include "catalog/ag_graph.h" @@ -58,11 +57,11 @@ static const char *expr_get_const_cstring(Node *expr, const char *source_str); static int get_query_location(const int location, const char *source_str); static Query *analyze_cypher(List *stmt, ParseState *parent_pstate, const char *query_str, int query_loc, - char *graph_name, Oid graph_oid, Param *params); + char *graph_name, uint32 graph_oid, Param *params); static Query *analyze_cypher_and_coerce(List *stmt, RangeTblFunction *rtfunc, ParseState *parent_pstate, const char *query_str, int query_loc, - char *graph_name, Oid graph_oid, + char *graph_name, uint32 graph_oid, Param *params); void post_parse_analyze_init(void) @@ -175,7 +174,7 @@ static bool convert_cypher_walker(Node *node, ParseState *pstate) * QTW_IGNORE_JOINALIASES * We are not interested in this. */ - flags = QTW_EXAMINE_RTES | QTW_IGNORE_RT_SUBQUERIES | + flags = QTW_EXAMINE_RTES_BEFORE | QTW_IGNORE_RT_SUBQUERIES | QTW_IGNORE_JOINALIASES; /* clear the global variable extra_node */ @@ -184,7 +183,7 @@ static bool convert_cypher_walker(Node *node, ParseState *pstate) /* recurse on query */ result = query_tree_walker(query, convert_cypher_walker, pstate, flags); - /* check for EXPLAIN */ + /* todo: I don't understand why wrote this. but, can't event this */ if (extra_node != NULL && nodeTag(extra_node) == T_ExplainStmt) { ExplainStmt *estmt = NULL; @@ -271,7 +270,7 @@ static void convert_cypher_to_subquery(RangeTblEntry *rte, ParseState *pstate) FuncExpr *funcexpr = (FuncExpr *)rtfunc->funcexpr; Node *arg; Name graph_name; - Oid graph_oid; + uint32 graph_oid; const char *query_str; int query_loc; Param *params; @@ -485,7 +484,7 @@ static int get_query_location(const int location, const char *source_str) static Query *analyze_cypher(List *stmt, ParseState *parent_pstate, const char *query_str, int query_loc, - char *graph_name, Oid graph_oid, Param *params) + char *graph_name, uint32 graph_oid, Param *params) { cypher_clause *clause; ListCell *lc; @@ -564,7 +563,7 @@ static Query *analyze_cypher(List *stmt, ParseState *parent_pstate, static Query *analyze_cypher_and_coerce(List *stmt, RangeTblFunction *rtfunc, ParseState *parent_pstate, const char *query_str, int query_loc, - char *graph_name, Oid graph_oid, + char *graph_name, uint32 graph_oid, Param *params) { ParseState *pstate; diff --git a/src/backend/parser/cypher_clause.c b/src/backend/parser/cypher_clause.c index 2269d3f3d..e07d07a83 100644 --- a/src/backend/parser/cypher_clause.c +++ b/src/backend/parser/cypher_clause.c @@ -25,6 +25,7 @@ #include "postgres.h" #include "access/sysattr.h" +#include "access/heapam.h" #include "catalog/pg_type_d.h" #include "miscadmin.h" #include "nodes/makefuncs.h" @@ -33,7 +34,7 @@ #include "nodes/parsenodes.h" #include "nodes/pg_list.h" #include "nodes/primnodes.h" -#include "optimizer/var.h" +#include "optimizer/optimizer.h" #include "parser/parse_clause.h" #include "parser/parse_coerce.h" #include "parser/parse_collate.h" @@ -85,6 +86,14 @@ #define AGE_VARNAME_ID AGE_DEFAULT_VARNAME_PREFIX"id" #define AGE_VARNAME_SET_CLAUSE AGE_DEFAULT_VARNAME_PREFIX"set_clause" +/* + * In the transformation stage, we need to track + * where a variable came from. When moving between + * clauses, Postgres parsestate and Query data structures + * are insufficient for some of the information we + * need. + */ + /* * Rules to determine if a node must be included: * @@ -4352,7 +4361,7 @@ transform_create_cypher_edge(cypher_parsestate *cpstate, List **target_list, rel->relid = RelationGetRelid(label_relation); rte = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation, - NULL, false, false); + AccessShareLock, NULL, false, false); rte->requiredPerms = ACL_INSERT; // Build Id expression, always use the default logic @@ -4578,7 +4587,7 @@ transform_create_cypher_new_node(cypher_parsestate *cpstate, rel->relid = RelationGetRelid(label_relation); rte = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation, - NULL, false, false); + AccessShareLock, NULL, false, false); rte->requiredPerms = ACL_INSERT; // id @@ -5366,7 +5375,7 @@ transform_merge_cypher_edge(cypher_parsestate *cpstate, List **target_list, rel->relid = RelationGetRelid(label_relation); rte = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation, - NULL, false, false); + AccessShareLock, NULL, false, false); rte->requiredPerms = ACL_INSERT; // Build Id expression, always use the default logic @@ -5471,7 +5480,7 @@ transform_merge_cypher_node(cypher_parsestate *cpstate, List **target_list, rel->relid = RelationGetRelid(label_relation); rte = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation, - NULL, false, false); + AccessShareLock, NULL, false, false); rte->requiredPerms = ACL_INSERT; // id diff --git a/src/backend/parser/cypher_expr.c b/src/backend/parser/cypher_expr.c index cb437880b..f0627eba5 100644 --- a/src/backend/parser/cypher_expr.c +++ b/src/backend/parser/cypher_expr.c @@ -41,6 +41,7 @@ #include "parser/parse_oper.h" #include "parser/parse_relation.h" #include "utils/builtins.h" +#include "utils/float.h" #include "utils/int8.h" #include "utils/lsyscache.h" #include "utils/syscache.h" diff --git a/src/backend/parser/cypher_keywords.c b/src/backend/parser/cypher_keywords.c index 5ed120288..91df69d26 100644 --- a/src/backend/parser/cypher_keywords.c +++ b/src/backend/parser/cypher_keywords.c @@ -33,63 +33,27 @@ #include "funcapi.h" #include "parser/cypher_gram.h" +#include "parser/cypher_kwlist_d.h" /* * This list must be sorted by ASCII name, because binary search is used to * locate entries. */ -const ScanKeyword cypher_keywords[] = { - {"all", ALL, RESERVED_KEYWORD}, - {"analyze", ANALYZE, RESERVED_KEYWORD}, - {"and", AND, RESERVED_KEYWORD}, - {"as", AS, RESERVED_KEYWORD}, - {"asc", ASC, RESERVED_KEYWORD}, - {"ascending", ASCENDING, RESERVED_KEYWORD}, - {"by", BY, RESERVED_KEYWORD}, - {"call", CALL, RESERVED_KEYWORD}, - {"case", CASE, RESERVED_KEYWORD}, - {"coalesce", COALESCE, RESERVED_KEYWORD}, - {"contains", CONTAINS, RESERVED_KEYWORD}, - {"create", CREATE, RESERVED_KEYWORD}, - {"delete", DELETE, RESERVED_KEYWORD}, - {"desc", DESC, RESERVED_KEYWORD}, - {"descending", DESCENDING, RESERVED_KEYWORD}, - {"detach", DETACH, RESERVED_KEYWORD}, - {"distinct", DISTINCT, RESERVED_KEYWORD}, - {"else", ELSE, RESERVED_KEYWORD}, - {"end", END_P, RESERVED_KEYWORD}, - {"ends", ENDS, RESERVED_KEYWORD}, - {"exists", EXISTS, RESERVED_KEYWORD}, - {"explain", EXPLAIN, RESERVED_KEYWORD}, - {"false", FALSE_P, RESERVED_KEYWORD}, - {"in", IN, RESERVED_KEYWORD}, - {"is", IS, RESERVED_KEYWORD}, - {"limit", LIMIT, RESERVED_KEYWORD}, - {"match", MATCH, RESERVED_KEYWORD}, - {"merge", MERGE, RESERVED_KEYWORD}, - {"not", NOT, RESERVED_KEYWORD}, - {"null", NULL_P, RESERVED_KEYWORD}, - {"optional", OPTIONAL, RESERVED_KEYWORD}, - {"or", OR, RESERVED_KEYWORD}, - {"order", ORDER, RESERVED_KEYWORD}, - {"remove", REMOVE, RESERVED_KEYWORD}, - {"return", RETURN, RESERVED_KEYWORD}, - {"set", SET, RESERVED_KEYWORD}, - {"skip", SKIP, RESERVED_KEYWORD}, - {"starts", STARTS, RESERVED_KEYWORD}, - {"then", THEN, RESERVED_KEYWORD}, - {"true", TRUE_P, RESERVED_KEYWORD}, - {"union", UNION, RESERVED_KEYWORD}, - {"unwind", UNWIND, RESERVED_KEYWORD}, - {"verbose", VERBOSE, RESERVED_KEYWORD}, - {"when", WHEN, RESERVED_KEYWORD}, - {"where", WHERE, RESERVED_KEYWORD}, - {"with", WITH, RESERVED_KEYWORD}, - {"xor", XOR, RESERVED_KEYWORD}, - {"yield", YIELD, RESERVED_KEYWORD} +#define PG_KEYWORD(kwname, value, category) value, + +const uint16 CypherKeywordTokens[] = { +#include "parser/cypher_kwlist.h" +}; + +#undef PG_KEYWORD + +#define PG_KEYWORD(kwname, value, category) category, + +const uint16 CypherKeywordCategories[] = { +#include "parser/cypher_kwlist.h" }; -const int num_cypher_keywords = lengthof(cypher_keywords); +#undef PG_KEYWORD PG_FUNCTION_INFO_V1(get_cypher_keywords); @@ -106,7 +70,7 @@ Datum get_cypher_keywords(PG_FUNCTION_ARGS) func_ctx = SRF_FIRSTCALL_INIT(); old_mem_ctx = MemoryContextSwitchTo(func_ctx->multi_call_memory_ctx); - tup_desc = CreateTemplateTupleDesc(3, false); + tup_desc = CreateTemplateTupleDesc(3); TupleDescInitEntry(tup_desc, (AttrNumber)1, "word", TEXTOID, -1, 0); TupleDescInitEntry(tup_desc, (AttrNumber)2, "catcode", CHAROID, -1, 0); TupleDescInitEntry(tup_desc, (AttrNumber)3, "catdesc", TEXTOID, -1, 0); @@ -118,15 +82,17 @@ Datum get_cypher_keywords(PG_FUNCTION_ARGS) func_ctx = SRF_PERCALL_SETUP(); - if (func_ctx->call_cntr < num_cypher_keywords) + if (func_ctx->call_cntr < CypherKeyword.num_keywords) { char *values[3]; HeapTuple tuple; // cast-away-const is ugly but alternatives aren't much better - values[0] = (char *)cypher_keywords[func_ctx->call_cntr].name; + //values[0] = (char *)cypher_keywords[func_ctx->call_cntr].name; + values[0] = (char *) GetScanKeyword((int) func_ctx->call_cntr, + &CypherKeyword); - switch (cypher_keywords[func_ctx->call_cntr].category) + switch (CypherKeywordCategories[func_ctx->call_cntr]) { case UNRESERVED_KEYWORD: values[1] = "U"; diff --git a/src/backend/parser/cypher_parse_agg.c b/src/backend/parser/cypher_parse_agg.c index b5654e778..8fdb71d3e 100644 --- a/src/backend/parser/cypher_parse_agg.c +++ b/src/backend/parser/cypher_parse_agg.c @@ -27,7 +27,7 @@ #include "catalog/pg_constraint.h" #include "nodes/nodeFuncs.h" #include "optimizer/tlist.h" -#include "optimizer/var.h" +#include "optimizer/optimizer.h" #include "parser/cypher_parse_agg.h" #include "parser/parsetree.h" #include "rewrite/rewriteManip.h" @@ -192,7 +192,7 @@ void parse_check_aggregates(ParseState *pstate, Query *qry) root->planner_cxt = CurrentMemoryContext; root->hasJoinRTEs = true; - groupClauses = (List *) flatten_join_alias_vars(root, + groupClauses = (List *) flatten_join_alias_vars((Query*)root, (Node *) groupClauses); } @@ -236,7 +236,7 @@ void parse_check_aggregates(ParseState *pstate, Query *qry) finalize_grouping_exprs(clause, pstate, qry, groupClauses, root, have_non_var_grouping); if (hasJoinRTEs) - clause = flatten_join_alias_vars(root, clause); + clause = flatten_join_alias_vars((Query*)root, clause); check_ungrouped_columns(clause, pstate, qry, groupClauses, groupClauseCommonVars, have_non_var_grouping, &func_grouped_rels); @@ -245,7 +245,7 @@ void parse_check_aggregates(ParseState *pstate, Query *qry) finalize_grouping_exprs(clause, pstate, qry, groupClauses, root, have_non_var_grouping); if (hasJoinRTEs) - clause = flatten_join_alias_vars(root, clause); + clause = flatten_join_alias_vars((Query*)root, clause); check_ungrouped_columns(clause, pstate, qry, groupClauses, groupClauseCommonVars, have_non_var_grouping, &func_grouped_rels); @@ -562,7 +562,7 @@ static bool finalize_grouping_exprs_walker(Node *node, Index ref = 0; if (context->root) - expr = flatten_join_alias_vars(context->root, expr); + expr = flatten_join_alias_vars((Query*)context->root, expr); /* * Each expression must match a grouping entry at the current diff --git a/src/backend/parser/cypher_parser.c b/src/backend/parser/cypher_parser.c index 8dd53df26..e12c7efdc 100644 --- a/src/backend/parser/cypher_parser.c +++ b/src/backend/parser/cypher_parser.c @@ -19,7 +19,6 @@ #include "postgres.h" -#include "common/keywords.h" #include "nodes/pg_list.h" #include "parser/scansup.h" @@ -67,20 +66,19 @@ int cypher_yylex(YYSTYPE *lvalp, YYLTYPE *llocp, ag_scanner_t scanner) break; case AG_TOKEN_IDENTIFIER: { - const ScanKeyword *keyword; + int kwnum; char *ident; - keyword = ScanKeywordLookup(token.value.s, cypher_keywords, - num_cypher_keywords); - if (keyword) + kwnum = ScanKeywordLookup(token.value.s, &CypherKeyword); + if (kwnum >= 0) { /* * use token.value.s instead of keyword->name to preserve * case sensitivity */ - lvalp->keyword = token.value.s; + lvalp->keyword = GetScanKeyword(kwnum, &CypherKeyword); *llocp = token.location; - return keyword->value; + return CypherKeywordTokens[kwnum]; } ident = pstrdup(token.value.s); diff --git a/src/backend/utils/adt/ag_float8_supp.c b/src/backend/utils/adt/ag_float8_supp.c index 286f074ef..450fdc07a 100644 --- a/src/backend/utils/adt/ag_float8_supp.c +++ b/src/backend/utils/adt/ag_float8_supp.c @@ -27,6 +27,7 @@ #include +#include "utils/float.h" #include "utils/builtins.h" #include "utils/ag_float8_supp.h" diff --git a/src/backend/utils/adt/age_global_graph.c b/src/backend/utils/adt/age_global_graph.c index 3da11b44c..b980bd642 100644 --- a/src/backend/utils/adt/age_global_graph.c +++ b/src/backend/utils/adt/age_global_graph.c @@ -19,14 +19,23 @@ #include "postgres.h" +#include "access/heapam.h" +#include "access/relscan.h" +#include "access/skey.h" +#include "access/table.h" +#include "access/tableam.h" #include "catalog/namespace.h" +#include "commands/label_commands.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/rel.h" #include "utils/snapmgr.h" #include "commands/label_commands.h" +#include "catalog/ag_graph.h" +#include "catalog/ag_label.h" #include "utils/age_global_graph.h" +#include "utils/age_graphid_ds.h" #include "utils/agtype.h" #include "catalog/ag_graph.h" #include "catalog/ag_label.h" @@ -189,7 +198,7 @@ static List *get_ag_labels_names(Snapshot snapshot, Oid graph_oid, List *labels = NIL; ScanKeyData scan_keys[2]; Relation ag_label; - HeapScanDesc scan_desc; + TableScanDesc scan_desc; HeapTuple tuple; TupleDesc tupdesc; @@ -203,8 +212,8 @@ static List *get_ag_labels_names(Snapshot snapshot, Oid graph_oid, F_CHAREQ, CharGetDatum(label_type)); /* setup the table to be scanned, ag_label in this case */ - ag_label = heap_open(ag_label_relation_id(), ShareLock); - scan_desc = heap_beginscan(ag_label, snapshot, 2, scan_keys); + ag_label = table_open(ag_label_relation_id(), ShareLock); + scan_desc = table_beginscan(ag_label, snapshot, 2, scan_keys); /* get the tupdesc - we don't need to release this one */ tupdesc = RelationGetDescr(ag_label); @@ -228,8 +237,8 @@ static List *get_ag_labels_names(Snapshot snapshot, Oid graph_oid, } /* close up scan */ - heap_endscan(scan_desc); - heap_close(ag_label, ShareLock); + table_endscan(scan_desc); + table_close(ag_label, ShareLock); return labels; } @@ -399,7 +408,7 @@ static void load_vertex_hashtable(GRAPH_global_context *ggctx) foreach (lc, vertex_label_names) { Relation graph_vertex_label; - HeapScanDesc scan_desc; + TableScanDesc scan_desc; HeapTuple tuple; char *vertex_label_name; Oid vertex_label_table_oid; @@ -411,8 +420,8 @@ static void load_vertex_hashtable(GRAPH_global_context *ggctx) vertex_label_table_oid = get_relname_relid(vertex_label_name, graph_namespace_oid); /* open the relation (table) and begin the scan */ - graph_vertex_label = heap_open(vertex_label_table_oid, ShareLock); - scan_desc = heap_beginscan(graph_vertex_label, snapshot, 0, NULL); + graph_vertex_label = table_open(vertex_label_table_oid, ShareLock); + scan_desc = table_beginscan(graph_vertex_label, snapshot, 0, NULL); /* get the tupdesc - we don't need to release this one */ tupdesc = RelationGetDescr(graph_vertex_label); /* bail if the number of columns differs */ @@ -452,8 +461,8 @@ static void load_vertex_hashtable(GRAPH_global_context *ggctx) } /* end the scan and close the relation */ - heap_endscan(scan_desc); - heap_close(graph_vertex_label, ShareLock); + table_endscan(scan_desc); + table_close(graph_vertex_label, ShareLock); } } @@ -498,7 +507,7 @@ static void load_edge_hashtable(GRAPH_global_context *ggctx) foreach (lc, edge_label_names) { Relation graph_edge_label; - HeapScanDesc scan_desc; + TableScanDesc scan_desc; HeapTuple tuple; char *edge_label_name; Oid edge_label_table_oid; @@ -510,8 +519,8 @@ static void load_edge_hashtable(GRAPH_global_context *ggctx) edge_label_table_oid = get_relname_relid(edge_label_name, graph_namespace_oid); /* open the relation (table) and begin the scan */ - graph_edge_label = heap_open(edge_label_table_oid, ShareLock); - scan_desc = heap_beginscan(graph_edge_label, snapshot, 0, NULL); + graph_edge_label = table_open(edge_label_table_oid, ShareLock); + scan_desc = table_beginscan(graph_edge_label, snapshot, 0, NULL); /* get the tupdesc - we don't need to release this one */ tupdesc = RelationGetDescr(graph_edge_label); /* bail if the number of columns differs */ @@ -573,8 +582,8 @@ static void load_edge_hashtable(GRAPH_global_context *ggctx) } /* end the scan and close the relation */ - heap_endscan(scan_desc); - heap_close(graph_edge_label, ShareLock); + table_endscan(scan_desc); + table_close(graph_edge_label, ShareLock); } } diff --git a/src/backend/utils/adt/age_vle.c b/src/backend/utils/adt/age_vle.c index 35d7e71d7..e3e6385d5 100644 --- a/src/backend/utils/adt/age_vle.c +++ b/src/backend/utils/adt/age_vle.c @@ -19,6 +19,8 @@ #include "postgres.h" +#include "access/heapam.h" +#include "catalog/namespace.h" #include "catalog/pg_type.h" #include "funcapi.h" #include "utils/lsyscache.h" diff --git a/src/backend/utils/adt/agtype.c b/src/backend/utils/adt/agtype.c index 908f38870..019dc98c7 100644 --- a/src/backend/utils/adt/agtype.c +++ b/src/backend/utils/adt/agtype.c @@ -32,8 +32,15 @@ #include +#include "access/genam.h" +#include "access/heapam.h" +#include "access/skey.h" +#include "access/table.h" +#include "access/tableam.h" #include "access/htup_details.h" #include "catalog/namespace.h" +#include "catalog/pg_collation.h" +#include "catalog/pg_operator.h" #include "catalog/pg_type.h" #include "catalog/pg_aggregate_d.h" #include "catalog/pg_collation_d.h" @@ -45,6 +52,7 @@ #include "parser/parse_coerce.h" #include "nodes/pg_list.h" #include "utils/builtins.h" +#include "utils/float.h" #include "utils/fmgroids.h" #include "utils/int8.h" #include "utils/lsyscache.h" @@ -151,7 +159,7 @@ static bool is_array_path(agtype_value *agtv); /* graph entity retrieval */ static Datum get_vertex(const char *graph, const char *vertex_label, int64 graphid); -static char *get_label_name(const char *graph_name, int64 graph_id); +static char *get_label_name(const char *graph_name, int64 label_id); static float8 get_float_compatible_arg(Datum arg, Oid type, char *funcname, bool *is_null); static Numeric get_numeric_compatible_arg(Datum arg, Oid type, char *funcname, @@ -185,7 +193,7 @@ Oid get_AGTYPEOID(void) { if (g_AGTYPEOID == InvalidOid) { - g_AGTYPEOID = GetSysCacheOid2(TYPENAMENSP, CStringGetDatum("agtype"), + g_AGTYPEOID = GetSysCacheOid1(TYPENAMENSP, CStringGetDatum("agtype"), ObjectIdGetDatum(ag_catalog_namespace_id())); } @@ -197,7 +205,7 @@ Oid get_AGTYPEARRAYOID(void) { if (g_AGTYPEARRAYOID == InvalidOid) { - g_AGTYPEARRAYOID = GetSysCacheOid2(TYPENAMENSP, + g_AGTYPEARRAYOID = GetSysCacheOid1(TYPENAMENSP, CStringGetDatum("_agtype"), ObjectIdGetDatum(ag_catalog_namespace_id())); } @@ -2144,7 +2152,7 @@ Datum _agtype_build_vertex(PG_FUNCTION_ARGS) result.res = push_agtype_value(&result.parse_state, WAGT_KEY, string_to_agtype_value("id")); - if (fcinfo->argnull[0]) + if (fcinfo->args[0].isnull) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("_agtype_build_vertex() graphid cannot be NULL"))); @@ -2157,7 +2165,7 @@ Datum _agtype_build_vertex(PG_FUNCTION_ARGS) result.res = push_agtype_value(&result.parse_state, WAGT_KEY, string_to_agtype_value("label")); - if (fcinfo->argnull[1]) + if (fcinfo->args[1].isnull) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("_agtype_build_vertex() label cannot be NULL"))); @@ -2170,7 +2178,7 @@ Datum _agtype_build_vertex(PG_FUNCTION_ARGS) string_to_agtype_value("properties")); //if the properties object is null, push an empty object - if (fcinfo->argnull[2]) + if (fcinfo->args[2].isnull) { result.res = push_agtype_value(&result.parse_state, WAGT_BEGIN_OBJECT, NULL); @@ -2226,7 +2234,7 @@ Datum _agtype_build_edge(PG_FUNCTION_ARGS) result.res = push_agtype_value(&result.parse_state, WAGT_KEY, string_to_agtype_value("id")); - if (fcinfo->argnull[0]) + if (fcinfo->args[0].isnull) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("_agtype_build_edge() graphid cannot be NULL"))); @@ -2239,7 +2247,7 @@ Datum _agtype_build_edge(PG_FUNCTION_ARGS) result.res = push_agtype_value(&result.parse_state, WAGT_KEY, string_to_agtype_value("label")); - if (fcinfo->argnull[3]) + if (fcinfo->args[3].isnull) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("_agtype_build_vertex() label cannot be NULL"))); @@ -2251,7 +2259,7 @@ Datum _agtype_build_edge(PG_FUNCTION_ARGS) result.res = push_agtype_value(&result.parse_state, WAGT_KEY, string_to_agtype_value("end_id")); - if (fcinfo->argnull[2]) + if (fcinfo->args[2].isnull) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("_agtype_build_edge() endid cannot be NULL"))); @@ -2264,7 +2272,7 @@ Datum _agtype_build_edge(PG_FUNCTION_ARGS) result.res = push_agtype_value(&result.parse_state, WAGT_KEY, string_to_agtype_value("start_id")); - if (fcinfo->argnull[1]) + if (fcinfo->args[1].isnull) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("_agtype_build_edge() startid cannot be NULL"))); @@ -2278,7 +2286,7 @@ Datum _agtype_build_edge(PG_FUNCTION_ARGS) string_to_agtype_value("properties")); /* if the properties object is null, push an empty object */ - if (fcinfo->argnull[4]) + if (fcinfo->args[4].isnull) { result.res = push_agtype_value(&result.parse_state, WAGT_BEGIN_OBJECT, NULL); @@ -4490,7 +4498,7 @@ Datum column_get_datum(TupleDesc tupdesc, HeapTuple tuple, int column, * function returns a pointer to a duplicated string that needs to be freed * when you are finished using it. */ -static char *get_label_name(const char *graph_name, int64 graphid) +static char *get_label_name(const char *graph_name, int64 label_id) { ScanKeyData scan_keys[2]; Relation ag_label; @@ -4498,48 +4506,47 @@ static char *get_label_name(const char *graph_name, int64 graphid) HeapTuple tuple; TupleDesc tupdesc; char *result = NULL; + bool column_is_null; - Oid graphoid = get_graph_oid(graph_name); + Oid graph_id = get_graph_oid(graph_name); /* scankey for first match in ag_label, column 2, graphoid, BTEQ, OidEQ */ ScanKeyInit(&scan_keys[0], Anum_ag_label_graph, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(graphoid)); + F_OIDEQ, ObjectIdGetDatum(graph_id)); /* scankey for second match in ag_label, column 3, label id, BTEQ, Int4EQ */ ScanKeyInit(&scan_keys[1], Anum_ag_label_id, BTEqualStrategyNumber, - F_INT4EQ, Int32GetDatum(get_graphid_label_id(graphid))); + F_INT42EQ, Int32GetDatum(get_graphid_label_id(label_id))); - ag_label = heap_open(ag_relation_id("ag_label", "table"), ShareLock); - scan_desc = systable_beginscan(ag_label, - ag_relation_id("ag_label_graph_id_index", - "index"), true, NULL, 2, - scan_keys); + ag_label = table_open(ag_label_relation_id(), ShareLock); + scan_desc = systable_beginscan(ag_label, ag_label_graph_oid_index_id(), true, + NULL, 2, scan_keys); tuple = systable_getnext(scan_desc); if (!HeapTupleIsValid(tuple)) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_SCHEMA), - errmsg("graphid %lu does not exist", graphid))); + errmsg("graphid abc %lu does not exist", label_id))); } /* get the tupdesc - we don't need to release this one */ tupdesc = RelationGetDescr(ag_label); /* bail if the number of columns differs */ - if (tupdesc->natts != 5) + if (tupdesc->natts != Natts_ag_label) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("Invalid number of attributes for ag_catalog.ag_label"))); /* get the label name */ - result = NameStr(*DatumGetName(column_get_datum(tupdesc, tuple, 0, "name", - NAMEOID, true))); + result = NameStr(*DatumGetName( + heap_getattr(tuple, Anum_ag_label_name, tupdesc, &column_is_null))); /* duplicate it */ result = strdup(result); /* end the scan and close the relation */ systable_endscan(scan_desc); - heap_close(ag_label, ShareLock); + table_close(ag_label, ShareLock); return result; } @@ -4549,7 +4556,7 @@ static Datum get_vertex(const char *graph, const char *vertex_label, { ScanKeyData scan_keys[1]; Relation graph_vertex_label; - HeapScanDesc scan_desc; + TableScanDesc scan_desc; HeapTuple tuple; TupleDesc tupdesc; Datum id, properties, result; @@ -4567,8 +4574,8 @@ static Datum get_vertex(const char *graph, const char *vertex_label, Int64GetDatum(graphid)); /* open the relation (table), begin the scan, and get the tuple */ - graph_vertex_label = heap_open(vertex_label_table_oid, ShareLock); - scan_desc = heap_beginscan(graph_vertex_label, snapshot, 1, scan_keys); + graph_vertex_label = table_open(vertex_label_table_oid, ShareLock); + scan_desc = table_beginscan(graph_vertex_label, snapshot, 1, scan_keys); tuple = heap_getnext(scan_desc, ForwardScanDirection); /* bail if the tuple isn't valid */ @@ -4576,7 +4583,7 @@ static Datum get_vertex(const char *graph, const char *vertex_label, { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), - errmsg("graphid %lu does not exist", graphid))); + errmsg("graphid cde %lu does not exist", graphid))); } /* get the tupdesc - we don't need to release this one */ @@ -4597,8 +4604,8 @@ static Datum get_vertex(const char *graph, const char *vertex_label, result = DirectFunctionCall3(_agtype_build_vertex, id, CStringGetDatum(vertex_label), properties); /* end the scan and close the relation */ - heap_endscan(scan_desc); - heap_close(graph_vertex_label, ShareLock); + table_endscan(scan_desc); + table_close(graph_vertex_label, ShareLock); /* return the vertex datum */ return result; } @@ -4612,7 +4619,7 @@ Datum age_startnode(PG_FUNCTION_ARGS) agtype_value *agtv_value = NULL; char *graph_name = NULL; char *label_name = NULL; - graphid graph_id; + graphid graph_oid; Datum result; /* we need the graph name */ @@ -4654,14 +4661,14 @@ Datum age_startnode(PG_FUNCTION_ARGS) /* it must not be null and must be an integer */ Assert(agtv_value != NULL); Assert(agtv_value->type = AGTV_INTEGER); - graph_id = agtv_value->val.int_value; + graph_oid = agtv_value->val.int_value; /* get the label */ - label_name = get_label_name(graph_name, graph_id); + label_name = get_label_name(graph_name, graph_oid); /* it must not be null and must be a string */ Assert(label_name != NULL); - result = get_vertex(graph_name, label_name, graph_id); + result = get_vertex(graph_name, label_name, graph_oid); free(label_name); @@ -4677,7 +4684,7 @@ Datum age_endnode(PG_FUNCTION_ARGS) agtype_value *agtv_value = NULL; char *graph_name = NULL; char *label_name = NULL; - graphid graph_id; + graphid graph_oid; Datum result; /* we need the graph name */ @@ -4719,14 +4726,14 @@ Datum age_endnode(PG_FUNCTION_ARGS) /* it must not be null and must be an integer */ Assert(agtv_value != NULL); Assert(agtv_value->type = AGTV_INTEGER); - graph_id = agtv_value->val.int_value; + graph_oid = agtv_value->val.int_value; /* get the label */ - label_name = get_label_name(graph_name, graph_id); + label_name = get_label_name(graph_name, graph_oid); /* it must not be null and must be a string */ Assert(label_name != NULL); - result = get_vertex(graph_name, label_name, graph_id); + result = get_vertex(graph_name, label_name, graph_oid); free(label_name); @@ -6931,10 +6938,9 @@ Datum age_replace(PG_FUNCTION_ARGS) * We need the strings as a text strings so that we can let PG deal with * multibyte characters in the string. */ - text_result = DatumGetTextPP(DirectFunctionCall3(replace_text, - PointerGetDatum(text_string), - PointerGetDatum(text_search), - PointerGetDatum(text_replace))); + text_result = DatumGetTextPP(DirectFunctionCall3Coll( + replace_text, C_COLLATION_OID, PointerGetDatum(text_string), + PointerGetDatum(text_search), PointerGetDatum(text_replace))); /* convert it back to a cstring */ string = text_to_cstring(text_result); diff --git a/src/backend/utils/adt/agtype_gin.c b/src/backend/utils/adt/agtype_gin.c index 669935c21..9a9adc9b9 100644 --- a/src/backend/utils/adt/agtype_gin.c +++ b/src/backend/utils/adt/agtype_gin.c @@ -33,6 +33,7 @@ #include "access/stratnum.h" #include "catalog/pg_collation.h" #include "catalog/pg_type.h" +#include "utils/float.h" #include "utils/builtins.h" #include "utils/varlena.h" diff --git a/src/backend/utils/adt/graphid.c b/src/backend/utils/adt/graphid.c index bd65b957e..e46d9c432 100644 --- a/src/backend/utils/adt/graphid.c +++ b/src/backend/utils/adt/graphid.c @@ -36,7 +36,7 @@ Oid get_GRAPHIDOID(void) { if (g_GRAPHIDOID == InvalidOid) { - g_GRAPHIDOID = GetSysCacheOid2(TYPENAMENSP, CStringGetDatum("graphid"), + g_GRAPHIDOID = GetSysCacheOid1(TYPENAMENSP, CStringGetDatum("graphid"), ObjectIdGetDatum(ag_catalog_namespace_id())); } @@ -48,7 +48,7 @@ Oid get_GRAPHIDARRAYOID(void) { if (g_GRAPHIDARRAYOID == InvalidOid) { - g_GRAPHIDARRAYOID = GetSysCacheOid2(TYPENAMENSP, + g_GRAPHIDARRAYOID = GetSysCacheOid1(TYPENAMENSP, CStringGetDatum("_graphid"), ObjectIdGetDatum(ag_catalog_namespace_id())); } diff --git a/src/backend/utils/ag_func.c b/src/backend/utils/ag_func.c index cbd2167e2..35a03464b 100644 --- a/src/backend/utils/ag_func.c +++ b/src/backend/utils/ag_func.c @@ -27,7 +27,6 @@ #include "access/htup.h" #include "access/htup_details.h" #include "catalog/pg_proc.h" -#include "fmgr.h" #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/syscache.h" @@ -54,6 +53,7 @@ bool is_oid_ag_func(Oid func_oid, const char *func_name) ReleaseSysCache(proctup); return false; } + nspid = proc->pronamespace; ReleaseSysCache(proctup); @@ -81,7 +81,8 @@ Oid get_ag_func_oid(const char *func_name, const int nargs, ...) arg_types = buildoidvector(oids, nargs); - func_oid = GetSysCacheOid3(PROCNAMEARGSNSP, CStringGetDatum(func_name), + func_oid = GetSysCacheOid3(PROCNAMEARGSNSP, Anum_pg_proc_oid, + CStringGetDatum(func_name), PointerGetDatum(arg_types), ObjectIdGetDatum(ag_catalog_namespace_id())); if (!OidIsValid(func_oid)) @@ -111,7 +112,8 @@ Oid get_pg_func_oid(const char *func_name, const int nargs, ...) arg_types = buildoidvector(oids, nargs); - func_oid = GetSysCacheOid3(PROCNAMEARGSNSP, CStringGetDatum(func_name), + func_oid = GetSysCacheOid3(PROCNAMEARGSNSP, Anum_pg_proc_oid, + CStringGetDatum(func_name), PointerGetDatum(arg_types), ObjectIdGetDatum(pg_catalog_namespace_id())); if (!OidIsValid(func_oid)) diff --git a/src/backend/utils/cache/ag_cache.c b/src/backend/utils/cache/ag_cache.c index ec34355c6..09a5f51f4 100644 --- a/src/backend/utils/cache/ag_cache.c +++ b/src/backend/utils/cache/ag_cache.c @@ -26,8 +26,8 @@ #include "access/htup_details.h" #include "access/skey.h" #include "access/stratnum.h" -#include "access/sysattr.h" #include "access/tupdesc.h" +#include "catalog/pg_collation.h" #include "fmgr.h" #include "storage/lockdefs.h" #include "utils/builtins.h" @@ -69,17 +69,17 @@ typedef struct label_name_graph_cache_entry label_cache_data data; } label_name_graph_cache_entry; -typedef struct label_graph_id_cache_key +typedef struct label_graph_oid_cache_key { Oid graph; int32 id; -} label_graph_id_cache_key; +} label_graph_oid_cache_key; -typedef struct label_graph_id_cache_entry +typedef struct label_graph_oid_cache_entry { - label_graph_id_cache_key key; // hash key + label_graph_oid_cache_key key; // hash key label_cache_data data; -} label_graph_id_cache_entry; +} label_graph_oid_cache_entry; typedef struct label_relation_cache_entry { @@ -95,17 +95,13 @@ static ScanKeyData graph_name_scan_keys[1]; static HTAB *graph_namespace_cache_hash = NULL; static ScanKeyData graph_namespace_scan_keys[1]; -// ag_label.oid -static HTAB *label_oid_cache_hash = NULL; -static ScanKeyData label_oid_scan_keys[1]; - // ag_label.name, ag_label.graph static HTAB *label_name_graph_cache_hash = NULL; static ScanKeyData label_name_graph_scan_keys[2]; // ag_label.graph, ag_label.id -static HTAB *label_graph_id_cache_hash = NULL; -static ScanKeyData label_graph_id_scan_keys[2]; +static HTAB *label_graph_oid_cache_hash = NULL; +static ScanKeyData label_graph_oid_scan_keys[2]; // ag_label.relation static HTAB *label_relation_cache_hash = NULL; @@ -136,27 +132,23 @@ static void fill_graph_cache_data(graph_cache_data *cache_data, // ag_label static void initialize_label_caches(void); static void create_label_caches(void); -static void create_label_oid_cache(void); static void create_label_name_graph_cache(void); -static void create_label_graph_id_cache(void); +static void create_label_graph_oid_cache(void); static void create_label_relation_cache(void); static void invalidate_label_caches(Datum arg, Oid relid); -static void invalidate_label_oid_cache(Oid relid); -static void flush_label_oid_cache(void); static void invalidate_label_name_graph_cache(Oid relid); static void flush_label_name_graph_cache(void); -static void invalidate_label_graph_id_cache(Oid relid); -static void flush_label_graph_id_cache(void); +static void invalidate_label_graph_oid_cache(Oid relid); +static void flush_label_graph_oid_cache(void); static void invalidate_label_relation_cache(Oid relid); static void flush_label_relation_cache(void); -static label_cache_data *search_label_oid_cache_miss(Oid oid); static label_cache_data *search_label_name_graph_cache_miss(Name name, Oid graph); static void *label_name_graph_cache_hash_search(Name name, Oid graph, - HASHACTION action, - bool *found); -static label_cache_data *search_label_graph_id_cache_miss(Oid graph, int32 id); -static void *label_graph_id_cache_hash_search(Oid graph, int32 id, + HASHACTION action, bool *found); +static label_cache_data *search_label_graph_oid_cache_miss(Oid graph, + uint32 id); +static void *label_graph_oid_cache_hash_search(uint32 graph, int32 id, HASHACTION action, bool *found); static label_cache_data *search_label_relation_cache_miss(Oid relation); static void fill_label_cache_data(label_cache_data *cache_data, @@ -185,7 +177,7 @@ static void ag_cache_scan_key_init(ScanKey entry, AttrNumber attno, entry->sk_attno = attno; entry->sk_strategy = BTEqualStrategyNumber; entry->sk_subtype = InvalidOid; - entry->sk_collation = InvalidOid; + entry->sk_collation = C_COLLATION_OID; fmgr_info_cxt(func, &entry->sk_func, CacheMemoryContext); entry->sk_argument = (Datum)0; } @@ -353,11 +345,11 @@ static graph_cache_data *search_graph_name_cache_miss(Name name) scan_keys[0].sk_argument = NameGetDatum(name); /* - * Calling heap_open() might call AcceptInvalidationMessage() and that + * Calling table_open() might call AcceptInvalidationMessage() and that * might flush the graph caches. This is OK because this function is called * when the desired entry is not in the cache. */ - ag_graph = heap_open(ag_graph_relation_id(), AccessShareLock); + ag_graph = table_open(ag_graph_relation_id(), AccessShareLock); scan_desc = systable_beginscan(ag_graph, ag_graph_name_index_id(), true, NULL, 1, scan_keys); @@ -366,7 +358,7 @@ static graph_cache_data *search_graph_name_cache_miss(Name name) if (!HeapTupleIsValid(tuple)) { systable_endscan(scan_desc); - heap_close(ag_graph, AccessShareLock); + table_close(ag_graph, AccessShareLock); return NULL; } @@ -379,7 +371,7 @@ static graph_cache_data *search_graph_name_cache_miss(Name name) fill_graph_cache_data(&entry->data, tuple, RelationGetDescr(ag_graph)); systable_endscan(scan_desc); - heap_close(ag_graph, AccessShareLock); + table_close(ag_graph, AccessShareLock); return &entry->data; } @@ -412,11 +404,11 @@ static graph_cache_data *search_graph_namespace_cache_miss(Oid namespace) scan_keys[0].sk_argument = ObjectIdGetDatum(namespace); /* - * Calling heap_open() might call AcceptInvalidationMessage() and that + * Calling table_open() might call AcceptInvalidationMessage() and that * might flush the graph caches. This is OK because this function is called * when the desired entry is not in the cache. */ - ag_graph = heap_open(ag_graph_relation_id(), AccessShareLock); + ag_graph = table_open(ag_graph_relation_id(), AccessShareLock); scan_desc = systable_beginscan(ag_graph, ag_graph_namespace_index_id(), true, NULL, 1, scan_keys); @@ -426,7 +418,7 @@ static graph_cache_data *search_graph_namespace_cache_miss(Oid namespace) if (!HeapTupleIsValid(tuple)) { systable_endscan(scan_desc); - heap_close(ag_graph, AccessShareLock); + table_close(ag_graph, AccessShareLock); return NULL; } @@ -440,7 +432,7 @@ static graph_cache_data *search_graph_namespace_cache_miss(Oid namespace) fill_graph_cache_data(&entry->data, tuple, RelationGetDescr(ag_graph)); systable_endscan(scan_desc); - heap_close(ag_graph, AccessShareLock); + table_close(ag_graph, AccessShareLock); return &entry->data; } @@ -451,8 +443,8 @@ static void fill_graph_cache_data(graph_cache_data *cache_data, bool is_null; Datum value; - // ag_graph.oid - value = heap_getattr(tuple, ObjectIdAttributeNumber, tuple_desc, &is_null); + // ag_graph.id + value = heap_getattr(tuple, Anum_ag_graph_oid, tuple_desc, &is_null); Assert(!is_null); cache_data->oid = DatumGetObjectId(value); // ag_graph.name @@ -467,20 +459,16 @@ static void fill_graph_cache_data(graph_cache_data *cache_data, static void initialize_label_caches(void) { - // ag_label.oid - ag_cache_scan_key_init(&label_oid_scan_keys[0], ObjectIdAttributeNumber, - F_OIDEQ); - // ag_label.name, ag_label.graph ag_cache_scan_key_init(&label_name_graph_scan_keys[0], Anum_ag_label_name, F_NAMEEQ); ag_cache_scan_key_init(&label_name_graph_scan_keys[1], Anum_ag_label_graph, - F_OIDEQ); + F_INT4EQ); // ag_label.graph, ag_label.id - ag_cache_scan_key_init(&label_graph_id_scan_keys[0], Anum_ag_label_graph, - F_OIDEQ); - ag_cache_scan_key_init(&label_graph_id_scan_keys[1], Anum_ag_label_id, + ag_cache_scan_key_init(&label_graph_oid_scan_keys[0], Anum_ag_label_graph, + F_INT4EQ); + ag_cache_scan_key_init(&label_graph_oid_scan_keys[1], Anum_ag_label_id, F_INT4EQ); // ag_label.relation @@ -502,32 +490,11 @@ static void create_label_caches(void) * All the hash tables are created using their dedicated memory contexts * which are under TopMemoryContext. */ - create_label_oid_cache(); create_label_name_graph_cache(); - create_label_graph_id_cache(); + create_label_graph_oid_cache(); create_label_relation_cache(); } -static void create_label_oid_cache(void) -{ - HASHCTL hash_ctl; - - /* - * Use label_cache_data itself since it has oid field as its first field - * that is the key for this hash. - */ - MemSet(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(Oid); - hash_ctl.entrysize = sizeof(label_cache_data); - - /* - * Please see the comment of hash_create() for the nelem value 16 here. - * HASH_BLOBS flag is set because the size of the key is sizeof(uint32). - */ - label_oid_cache_hash = hash_create("ag_label (oid) cache", 16, &hash_ctl, - HASH_ELEM | HASH_BLOBS); -} - static void create_label_name_graph_cache(void) { HASHCTL hash_ctl; @@ -545,19 +512,19 @@ static void create_label_name_graph_cache(void) HASH_ELEM | HASH_BLOBS); } -static void create_label_graph_id_cache(void) +static void create_label_graph_oid_cache(void) { HASHCTL hash_ctl; MemSet(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(label_graph_id_cache_key); - hash_ctl.entrysize = sizeof(label_graph_id_cache_entry); + hash_ctl.keysize = sizeof(label_graph_oid_cache_key); + hash_ctl.entrysize = sizeof(label_graph_oid_cache_entry); /* * Please see the comment of hash_create() for the nelem value 16 here. * HASH_BLOBS flag is set because the key for this hash is fixed-size. */ - label_graph_id_cache_hash = hash_create("ag_label (graph, id) cache", 16, + label_graph_oid_cache_hash = hash_create("ag_label (graph, id) cache", 16, &hash_ctl, HASH_ELEM | HASH_BLOBS); } @@ -584,75 +551,18 @@ static void invalidate_label_caches(Datum arg, Oid relid) if (OidIsValid(relid)) { - invalidate_label_oid_cache(relid); invalidate_label_name_graph_cache(relid); - invalidate_label_graph_id_cache(relid); + invalidate_label_graph_oid_cache(relid); invalidate_label_relation_cache(relid); } else { - flush_label_oid_cache(); flush_label_name_graph_cache(); - flush_label_graph_id_cache(); + flush_label_graph_oid_cache(); flush_label_relation_cache(); } } -static void invalidate_label_oid_cache(Oid relid) -{ - HASH_SEQ_STATUS hash_seq; - - hash_seq_init(&hash_seq, label_oid_cache_hash); - for (;;) - { - label_cache_data *entry; - void *removed; - - entry = hash_seq_search(&hash_seq); - if (!entry) - break; - - if (entry->relation != relid) - continue; - - removed = hash_search(label_oid_cache_hash, &entry->oid, HASH_REMOVE, - NULL); - hash_seq_term(&hash_seq); - - if (!removed) - { - ereport(ERROR, - (errmsg_internal("label (oid) cache corrupted"))); - } - - break; - } -} - -static void flush_label_oid_cache(void) -{ - HASH_SEQ_STATUS hash_seq; - - hash_seq_init(&hash_seq, label_name_graph_cache_hash); - for (;;) - { - label_cache_data *entry; - void *removed; - - entry = hash_seq_search(&hash_seq); - if (!entry) - break; - - removed = hash_search(label_oid_cache_hash, &entry->oid, HASH_REMOVE, - NULL); - if (!removed) - { - ereport(ERROR, - (errmsg_internal("label (oid) cache corrupted"))); - } - } -} - static void invalidate_label_name_graph_cache(Oid relid) { HASH_SEQ_STATUS hash_seq; @@ -708,14 +618,14 @@ static void flush_label_name_graph_cache(void) } } -static void invalidate_label_graph_id_cache(Oid relid) +static void invalidate_label_graph_oid_cache(Oid relid) { HASH_SEQ_STATUS hash_seq; - hash_seq_init(&hash_seq, label_graph_id_cache_hash); + hash_seq_init(&hash_seq, label_graph_oid_cache_hash); for (;;) { - label_graph_id_cache_entry *entry; + label_graph_oid_cache_entry *entry; void *removed; entry = hash_seq_search(&hash_seq); @@ -725,7 +635,7 @@ static void invalidate_label_graph_id_cache(Oid relid) if (entry->data.relation != relid) continue; - removed = hash_search(label_graph_id_cache_hash, &entry->key, + removed = hash_search(label_graph_oid_cache_hash, &entry->key, HASH_REMOVE, NULL); hash_seq_term(&hash_seq); @@ -739,21 +649,21 @@ static void invalidate_label_graph_id_cache(Oid relid) } } -static void flush_label_graph_id_cache(void) +static void flush_label_graph_oid_cache(void) { HASH_SEQ_STATUS hash_seq; - hash_seq_init(&hash_seq, label_graph_id_cache_hash); + hash_seq_init(&hash_seq, label_graph_oid_cache_hash); for (;;) { - label_graph_id_cache_entry *entry; + label_graph_oid_cache_entry *entry; void *removed; entry = hash_seq_search(&hash_seq); if (!entry) break; - removed = hash_search(label_graph_id_cache_hash, &entry->key, + removed = hash_search(label_graph_oid_cache_hash, &entry->key, HASH_REMOVE, NULL); if (!removed) { @@ -802,72 +712,12 @@ static void flush_label_relation_cache(void) } } -label_cache_data *search_label_oid_cache(Oid oid) -{ - label_cache_data *entry; - - initialize_caches(); - - entry = hash_search(label_oid_cache_hash, &oid, HASH_FIND, NULL); - if (entry) - return entry; - - return search_label_oid_cache_miss(oid); -} - -static label_cache_data *search_label_oid_cache_miss(Oid oid) -{ - ScanKeyData scan_keys[1]; - Relation ag_label; - SysScanDesc scan_desc; - HeapTuple tuple; - bool found; - label_cache_data *entry; - - memcpy(scan_keys, label_oid_scan_keys, sizeof(label_oid_scan_keys)); - scan_keys[0].sk_argument = ObjectIdGetDatum(oid); - - /* - * Calling heap_open() might call AcceptInvalidationMessage() and that - * might invalidate the label caches. This is OK because this function is - * called when the desired entry is not in the cache. - */ - ag_label = heap_open(ag_label_relation_id(), AccessShareLock); - scan_desc = systable_beginscan(ag_label, ag_label_oid_index_id(), true, - NULL, 1, scan_keys); - - // don't need to loop over scan_desc because ag_label_oid_index is UNIQUE - tuple = systable_getnext(scan_desc); - if (!HeapTupleIsValid(tuple)) - { - systable_endscan(scan_desc); - heap_close(ag_label, AccessShareLock); - - return NULL; - } - - // get a new entry - entry = hash_search(label_oid_cache_hash, &oid, HASH_ENTER, &found); - Assert(!found); // no concurrent update on label_oid_cache_hash - - // fill the new entry with the retrieved tuple - fill_label_cache_data(entry, tuple, RelationGetDescr(ag_label)); - // make sure that the oid field is the same with the hash key(oid) - Assert(entry->oid == oid); - - systable_endscan(scan_desc); - heap_close(ag_label, AccessShareLock); - - return entry; -} - label_cache_data *search_label_name_graph_cache(const char *name, Oid graph) { NameData name_key; label_name_graph_cache_entry *entry; AssertArg(name); - AssertArg(OidIsValid(graph)); initialize_caches(); @@ -896,11 +746,11 @@ static label_cache_data *search_label_name_graph_cache_miss(Name name, scan_keys[1].sk_argument = ObjectIdGetDatum(graph); /* - * Calling heap_open() might call AcceptInvalidationMessage() and that + * Calling table_open() might call AcceptInvalidationMessage() and that * might invalidate the label caches. This is OK because this function is * called when the desired entry is not in the cache. */ - ag_label = heap_open(ag_label_relation_id(), AccessShareLock); + ag_label = table_open(ag_label_relation_id(), AccessShareLock); scan_desc = systable_beginscan(ag_label, ag_label_name_graph_index_id(), true, NULL, 2, scan_keys); @@ -912,7 +762,7 @@ static label_cache_data *search_label_name_graph_cache_miss(Name name, if (!HeapTupleIsValid(tuple)) { systable_endscan(scan_desc); - heap_close(ag_label, AccessShareLock); + table_close(ag_label, AccessShareLock); return NULL; } @@ -926,7 +776,7 @@ static label_cache_data *search_label_name_graph_cache_miss(Name name, fill_label_cache_data(&entry->data, tuple, RelationGetDescr(ag_label)); systable_endscan(scan_desc); - heap_close(ag_label, AccessShareLock); + table_close(ag_label, AccessShareLock); return &entry->data; } @@ -943,81 +793,80 @@ static void *label_name_graph_cache_hash_search(Name name, Oid graph, return hash_search(label_name_graph_cache_hash, &key, action, found); } -label_cache_data *search_label_graph_id_cache(Oid graph, int32 id) +label_cache_data *search_label_graph_oid_cache(uint32 graph_oid, int32 id) { - label_graph_id_cache_entry *entry; + label_graph_oid_cache_entry *entry; - AssertArg(OidIsValid(graph)); AssertArg(label_id_is_valid(id)); initialize_caches(); - entry = label_graph_id_cache_hash_search(graph, id, HASH_FIND, NULL); + entry = label_graph_oid_cache_hash_search(graph_oid, id, HASH_FIND, NULL); if (entry) return &entry->data; - return search_label_graph_id_cache_miss(graph, id); + return search_label_graph_oid_cache_miss(graph_oid, id); } -static label_cache_data *search_label_graph_id_cache_miss(Oid graph, int32 id) +static label_cache_data *search_label_graph_oid_cache_miss(Oid graph, uint32 id) { ScanKeyData scan_keys[2]; Relation ag_label; SysScanDesc scan_desc; HeapTuple tuple; bool found; - label_graph_id_cache_entry *entry; + label_graph_oid_cache_entry *entry; - memcpy(scan_keys, label_graph_id_scan_keys, - sizeof(label_graph_id_scan_keys)); + memcpy(scan_keys, label_graph_oid_scan_keys, + sizeof(label_graph_oid_scan_keys)); scan_keys[0].sk_argument = ObjectIdGetDatum(graph); scan_keys[1].sk_argument = Int32GetDatum(id); /* - * Calling heap_open() might call AcceptInvalidationMessage() and that + * Calling table_open() might call AcceptInvalidationMessage() and that * might invalidate the label caches. This is OK because this function is * called when the desired entry is not in the cache. */ - ag_label = heap_open(ag_label_relation_id(), AccessShareLock); - scan_desc = systable_beginscan(ag_label, ag_label_graph_id_index_id(), - true, NULL, 2, scan_keys); + ag_label = table_open(ag_label_relation_id(), AccessShareLock); + scan_desc = systable_beginscan(ag_label, ag_label_graph_oid_index_id(), true, + NULL, 2, scan_keys); /* - * don't need to loop over scan_desc because ag_label_graph_id_index is + * don't need to loop over scan_desc because ag_label_graph_oid_index is * UNIQUE */ tuple = systable_getnext(scan_desc); if (!HeapTupleIsValid(tuple)) { systable_endscan(scan_desc); - heap_close(ag_label, AccessShareLock); + table_close(ag_label, AccessShareLock); return NULL; } // get a new entry - entry = label_graph_id_cache_hash_search(graph, id, HASH_ENTER, &found); - Assert(!found); // no concurrent update on label_graph_id_cache_hash + entry = label_graph_oid_cache_hash_search(graph, id, HASH_ENTER, &found); + Assert(!found); // no concurrent update on label_graph_oid_cache_hash // fill the new entry with the retrieved tuple fill_label_cache_data(&entry->data, tuple, RelationGetDescr(ag_label)); systable_endscan(scan_desc); - heap_close(ag_label, AccessShareLock); + table_close(ag_label, AccessShareLock); return &entry->data; } -static void *label_graph_id_cache_hash_search(Oid graph, int32 id, +static void *label_graph_oid_cache_hash_search(uint32 graph, int32 id, HASHACTION action, bool *found) { - label_graph_id_cache_key key; + label_graph_oid_cache_key key; - // initialize the hash key for label_graph_id_cache_hash + // initialize the hash key for label_graph_oid_cache_hash key.graph = graph; key.id = id; - return hash_search(label_graph_id_cache_hash, &key, action, found); + return hash_search(label_graph_oid_cache_hash, &key, action, found); } label_cache_data *search_label_relation_cache(Oid relation) @@ -1047,13 +896,13 @@ static label_cache_data *search_label_relation_cache_miss(Oid relation) scan_keys[0].sk_argument = ObjectIdGetDatum(relation); /* - * Calling heap_open() might call AcceptInvalidationMessage() and that + * Calling table_open() might call AcceptInvalidationMessage() and that * might invalidate the label caches. This is OK because this function is * called when the desired entry is not in the cache. */ - ag_label = heap_open(ag_label_relation_id(), AccessShareLock); - scan_desc = systable_beginscan(ag_label, ag_label_relation_index_id(), - true, NULL, 1, scan_keys); + ag_label = table_open(ag_label_relation_id(), AccessShareLock); + scan_desc = systable_beginscan(ag_label, ag_label_relation_index_id(), true, + NULL, 1, scan_keys); // don't need to loop over scan_desc because ag_label_relation_index is // UNIQUE @@ -1061,7 +910,7 @@ static label_cache_data *search_label_relation_cache_miss(Oid relation) if (!HeapTupleIsValid(tuple)) { systable_endscan(scan_desc); - heap_close(ag_label, AccessShareLock); + table_close(ag_label, AccessShareLock); return NULL; } @@ -1075,7 +924,7 @@ static label_cache_data *search_label_relation_cache_miss(Oid relation) fill_label_cache_data(entry, tuple, RelationGetDescr(ag_label)); systable_endscan(scan_desc); - heap_close(ag_label, AccessShareLock); + table_close(ag_label, AccessShareLock); return entry; } @@ -1086,10 +935,6 @@ static void fill_label_cache_data(label_cache_data *cache_data, bool is_null; Datum value; - // ag_label.oid - value = heap_getattr(tuple, ObjectIdAttributeNumber, tuple_desc, &is_null); - Assert(!is_null); - cache_data->oid = DatumGetObjectId(value); // ag_label.name value = heap_getattr(tuple, Anum_ag_label_name, tuple_desc, &is_null); Assert(!is_null); diff --git a/src/backend/utils/load/ag_load_edges.c b/src/backend/utils/load/ag_load_edges.c index 419f0097e..a7a88607b 100644 --- a/src/backend/utils/load/ag_load_edges.c +++ b/src/backend/utils/load/ag_load_edges.c @@ -17,16 +17,11 @@ * under the License. */ -#include -#include -#include -#include -#include +#include "postgres.h" -#include "utils/load/csv.h" #include "utils/load/ag_load_edges.h" #include "utils/load/age_load.h" - +#include "utils/load/csv.h" void edge_field_cb(void *field, size_t field_len, void *data) { @@ -66,14 +61,14 @@ void edge_row_cb(int delim __attribute__((unused)), void *data) size_t i, n_fields; int64 start_id_int; - graphid start_vertex_graph_id; + graphid start_vertex_graph_oid; int start_vertex_type_id; int64 end_id_int; - graphid end_vertex_graph_id; + graphid end_vertex_graph_oid; int end_vertex_type_id; - graphid object_graph_id; + graphid object_graph_oid; agtype* props = NULL; @@ -94,22 +89,22 @@ void edge_row_cb(int delim __attribute__((unused)), void *data) } else { - object_graph_id = make_graphid(cr->object_id, (int64)cr->row); + object_graph_oid = make_graphid(cr->object_id, (int64)cr->row); start_id_int = strtol(cr->fields[0], NULL, 10); - start_vertex_type_id = get_label_id(cr->fields[1], cr->graph_id); + start_vertex_type_id = get_label_id(cr->fields[1], cr->graph_oid); end_id_int = strtol(cr->fields[2], NULL, 10); - end_vertex_type_id = get_label_id(cr->fields[3], cr->graph_id); + end_vertex_type_id = get_label_id(cr->fields[3], cr->graph_oid); - start_vertex_graph_id = make_graphid(start_vertex_type_id, start_id_int); - end_vertex_graph_id = make_graphid(end_vertex_type_id, end_id_int); + start_vertex_graph_oid = make_graphid(start_vertex_type_id, start_id_int); + end_vertex_graph_oid = make_graphid(end_vertex_type_id, end_id_int); props = create_agtype_from_list_i(cr->header, cr->fields, n_fields, 3); - insert_edge_simple(cr->graph_id, cr->object_name, - object_graph_id, start_vertex_graph_id, - end_vertex_graph_id, props); + insert_edge_simple(cr->graph_oid, cr->object_name, + object_graph_oid, start_vertex_graph_oid, + end_vertex_graph_oid, props); } @@ -155,7 +150,7 @@ static int is_term(unsigned char c) int create_edges_from_csv_file(char *file_path, char *graph_name, - Oid graph_id, + Oid graph_oid, char *object_name, int object_id ) { @@ -191,7 +186,7 @@ int create_edges_from_csv_file(char *file_path, cr.header_row_length = 0; cr.curr_row_length = 0; cr.graph_name = graph_name; - cr.graph_id = graph_id; + cr.graph_oid = graph_oid; cr.object_name = object_name; cr.object_id = object_id; diff --git a/src/backend/utils/load/ag_load_labels.c b/src/backend/utils/load/ag_load_labels.c index c0ddf69bb..af7d20446 100644 --- a/src/backend/utils/load/ag_load_labels.c +++ b/src/backend/utils/load/ag_load_labels.c @@ -101,7 +101,7 @@ void vertex_row_cb(int delim __attribute__((unused)), void *data) csv_vertex_reader *cr = (csv_vertex_reader*)data; agtype *props = NULL; size_t i, n_fields; - graphid object_graph_id; + graphid object_graph_oid; int64 label_id_int; n_fields = cr->cur_field; @@ -131,12 +131,12 @@ void vertex_row_cb(int delim __attribute__((unused)), void *data) label_id_int = (int64)cr->row; } - object_graph_id = make_graphid(cr->object_id, label_id_int); + object_graph_oid = make_graphid(cr->object_id, label_id_int); props = create_agtype_from_list(cr->header, cr->fields, n_fields, label_id_int); - insert_vertex_simple(cr->graph_id, cr->object_name, - object_graph_id, props); + insert_vertex_simple(cr->graph_oid, cr->object_name, + object_graph_oid, props); } @@ -183,7 +183,7 @@ static int is_term(unsigned char c) int create_labels_from_csv_file(char *file_path, char *graph_name, - Oid graph_id, + Oid graph_oid, char *object_name, int object_id, bool id_field_exists) @@ -221,7 +221,7 @@ int create_labels_from_csv_file(char *file_path, cr.header_row_length = 0; cr.curr_row_length = 0; cr.graph_name = graph_name; - cr.graph_id = graph_id; + cr.graph_oid = graph_oid; cr.object_name = object_name; cr.object_id = object_id; cr.id_field_exists = id_field_exists; diff --git a/src/backend/utils/load/age_load.c b/src/backend/utils/load/age_load.c index b80e95086..28ee211dd 100644 --- a/src/backend/utils/load/age_load.c +++ b/src/backend/utils/load/age_load.c @@ -21,67 +21,45 @@ #include "access/heapam.h" #include "access/xact.h" -#include "catalog/dependency.h" -#include "catalog/namespace.h" -#include "catalog/objectaddress.h" -#include "catalog/pg_class_d.h" -#include "commands/defrem.h" -#include "commands/sequence.h" -#include "commands/tablecmds.h" -#include "miscadmin.h" -#include "nodes/makefuncs.h" -#include "nodes/nodes.h" -#include "nodes/parsenodes.h" -#include "nodes/pg_list.h" -#include "nodes/plannodes.h" -#include "nodes/primnodes.h" -#include "nodes/value.h" #include "parser/parse_node.h" -#include "parser/parser.h" #include "storage/lockdefs.h" #include "tcop/dest.h" -#include "tcop/utility.h" -#include "utils/acl.h" #include "utils/builtins.h" -#include "utils/inval.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "catalog/ag_graph.h" #include "catalog/ag_label.h" -#include "commands/label_commands.h" -#include "utils/ag_cache.h" #include "utils/agtype.h" #include "utils/graphid.h" -#include "utils/load/age_load.h" -#include "utils/load/ag_load_labels.h" #include "utils/load/ag_load_edges.h" +#include "utils/load/ag_load_labels.h" +#include "utils/load/age_load.h" -static agtype* create_empty_agtype(void) +static agtype *create_empty_agtype(void) { agtype_in_state result; memset(&result, 0, sizeof(agtype_in_state)); - result.res = push_agtype_value(&result.parse_state, - WAGT_BEGIN_OBJECT, NULL); - result.res = push_agtype_value(&result.parse_state, - WAGT_END_OBJECT, NULL); + result.res = push_agtype_value(&result.parse_state, WAGT_BEGIN_OBJECT, + NULL); + result.res = push_agtype_value(&result.parse_state, WAGT_END_OBJECT, NULL); return agtype_value_to_agtype(result.res); } -agtype* create_agtype_from_list(char **header, char **fields, - size_t fields_len, int64 vertex_id) +agtype *create_agtype_from_list(char **header, char **fields, size_t fields_len, + int64 vertex_id) { agtype_in_state result; int i; memset(&result, 0, sizeof(agtype_in_state)); - result.res = push_agtype_value(&result.parse_state, - WAGT_BEGIN_OBJECT, NULL); + result.res = push_agtype_value(&result.parse_state, WAGT_BEGIN_OBJECT, + NULL); result.res = push_agtype_value(&result.parse_state, WAGT_KEY, @@ -117,12 +95,13 @@ agtype* create_agtype_from_list_i(char **header, char **fields, { return create_empty_agtype(); } + memset(&result, 0, sizeof(agtype_in_state)); - result.res = push_agtype_value(&result.parse_state, - WAGT_BEGIN_OBJECT, NULL); + result.res = push_agtype_value(&result.parse_state, WAGT_BEGIN_OBJECT, + NULL); - for (i = start_index; i> ENTRY_ID_BITS) @@ -62,8 +65,6 @@ typedef int64 graphid; graphid make_graphid(const int32 label_id, const int64 entry_id); int32 get_graphid_label_id(const graphid gid); int64 get_graphid_entry_id(const graphid gid); -Oid get_GRAPHIDOID(void); -Oid get_GRAPHIDARRAYOID(void); void clear_global_Oids_GRAPHID(void); #endif diff --git a/src/include/utils/load/ag_load_edges.h b/src/include/utils/load/ag_load_edges.h index eb5463ed8..3f4ffa8f9 100644 --- a/src/include/utils/load/ag_load_edges.h +++ b/src/include/utils/load/ag_load_edges.h @@ -20,11 +20,10 @@ #ifndef AG_LOAD_EDGES_H #define AG_LOAD_EDGES_H - -#include -#include #include +#include #include +#include #include @@ -80,7 +79,7 @@ typedef struct { size_t header_row_length; size_t curr_row_length; char *graph_name; - Oid graph_id; + Oid graph_oid; char *object_name; int object_id; char *start_vertex; @@ -92,7 +91,7 @@ typedef struct { void edge_field_cb(void *field, size_t field_len, void *data); void edge_row_cb(int delim __attribute__((unused)), void *data); -int create_edges_from_csv_file(char *file_path, char *graph_name, Oid graph_id, +int create_edges_from_csv_file(char *file_path, char *graph_name, Oid graph_oid, char *object_name, int object_id ); #endif //AG_LOAD_EDGES_H diff --git a/src/include/utils/load/ag_load_labels.h b/src/include/utils/load/ag_load_labels.h index 5689c23db..8bf24c246 100644 --- a/src/include/utils/load/ag_load_labels.h +++ b/src/include/utils/load/ag_load_labels.h @@ -21,11 +21,10 @@ #ifndef AG_LOAD_LABELS_H #define AG_LOAD_LABELS_H - -#include -#include #include +#include #include +#include #include #include "postgres.h" @@ -65,7 +64,6 @@ #include "utils/agtype.h" #include "utils/graphid.h" - #define AGE_VERTIX 1 #define AGE_EDGE 2 @@ -89,7 +87,7 @@ typedef struct { size_t header_row_length; size_t curr_row_length; char *graph_name; - Oid graph_id; + Oid graph_oid; char *object_name; int object_id; bool id_field_exists; @@ -99,7 +97,7 @@ typedef struct { void vertex_field_cb(void *field, size_t field_len, void *data); void vertex_row_cb(int delim __attribute__((unused)), void *data); -int create_labels_from_csv_file(char *file_path, char *graph_name, Oid graph_id, +int create_labels_from_csv_file(char *file_path, char *graph_name, Oid graph_oid, char *object_name, int object_id, bool id_field_exists); diff --git a/src/include/utils/load/age_load.h b/src/include/utils/load/age_load.h index 9eeca3614..d5fd19291 100644 --- a/src/include/utils/load/age_load.h +++ b/src/include/utils/load/age_load.h @@ -61,9 +61,9 @@ agtype* create_agtype_from_list(char **header, char **fields, size_t fields_len, int64 vertex_id); agtype* create_agtype_from_list_i(char **header, char **fields, size_t fields_len, size_t start_index); -void insert_vertex_simple(Oid graph_id, char* label_name, graphid vertex_id, - agtype* vertex_properties); -void insert_edge_simple(Oid graph_id, char* label_name, graphid edge_id, +void insert_vertex_simple(Oid graph_oid, char *label_name, graphid vertex_id, + agtype *vertex_properties); +void insert_edge_simple(Oid graph_oid, char *label_name, graphid edge_id, graphid start_id, graphid end_id, agtype* end_properties); From 1f5d611f7868c425ec90f67864fa63d94dfe078d Mon Sep 17 00:00:00 2001 From: Shoaib Date: Wed, 6 Jul 2022 10:04:48 +0200 Subject: [PATCH 05/18] updated cypher_kwlist.h --- src/backend/catalog/ag_label.c | 5 +- src/backend/executor/cypher_delete.c | 8 +- src/backend/executor/cypher_set.c | 35 ++- src/backend/executor/cypher_utils.c | 4 +- src/include/parser/cypher_kwlist.h | 48 ++++ tools/PerfectHash.pm | 376 +++++++++++++++++++++++++++ tools/gen_keywordlist.pl | 197 ++++++++++++++ 7 files changed, 657 insertions(+), 16 deletions(-) create mode 100644 src/include/parser/cypher_kwlist.h create mode 100644 tools/PerfectHash.pm create mode 100755 tools/gen_keywordlist.pl diff --git a/src/backend/catalog/ag_label.c b/src/backend/catalog/ag_label.c index 809487a3b..385517813 100644 --- a/src/backend/catalog/ag_label.c +++ b/src/backend/catalog/ag_label.c @@ -116,7 +116,7 @@ void delete_label(Oid relation) (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("label (relation=%u) does not exist", relation))); } - + CatalogTupleDelete(ag_label, &tuple->t_self); systable_endscan(scan_desc); @@ -303,7 +303,8 @@ List *get_all_edge_labels_per_graph(EState *estate, Oid graph_oid) } table_endscan(scan_desc); - table_close(ag_label, RowExclusiveLock); + + destroy_entity_result_rel_info(resultRelInfo); table_close(resultRelInfo->ri_RelationDesc, RowExclusiveLock); return labels; diff --git a/src/backend/executor/cypher_delete.c b/src/backend/executor/cypher_delete.c index a0c3c776e..7bb7c077d 100644 --- a/src/backend/executor/cypher_delete.c +++ b/src/backend/executor/cypher_delete.c @@ -487,7 +487,7 @@ static void find_connected_edges(CustomScanState *node, char *graph_name, * improved. However, right now we have to scan every edge to see if * one has this vertex as a start or end vertex. */ - foreach (lc, labels) + foreach(lc, labels) { char *label_name = lfirst(lc); ResultRelInfo *resultRelInfo; @@ -495,8 +495,8 @@ static void find_connected_edges(CustomScanState *node, char *graph_name, HeapTuple tuple; TupleTableSlot *slot; - resultRelInfo = create_entity_result_rel_info(estate, graph_name, - label_name); + resultRelInfo = create_entity_result_rel_info(estate, + graph_name, label_name); scan_desc = table_beginscan(resultRelInfo->ri_RelationDesc, estate->es_snapshot, 0, NULL); @@ -506,7 +506,7 @@ static void find_connected_edges(CustomScanState *node, char *graph_name, &TTSOpsHeapTuple); // scan the table - while (true) + while(true) { graphid startid, endid; bool isNull; diff --git a/src/backend/executor/cypher_set.c b/src/backend/executor/cypher_set.c index 63a016635..16d94f118 100644 --- a/src/backend/executor/cypher_set.c +++ b/src/backend/executor/cypher_set.c @@ -112,8 +112,9 @@ static HeapTuple update_entity_tuple(ResultRelInfo *resultRelInfo, TM_Result lock_result; TM_Result update_result; Buffer buffer; + bool update_indexes; - //ResultRelInfo *saved_resultRelInfo = saved_resultRelInfo;; + //ResultRelInfo *saved_resultRelInfo; estate->es_result_relation_info = resultRelInfo; lockmode = ExecUpdateLockMode(estate, resultRelInfo); @@ -135,7 +136,16 @@ static HeapTuple update_entity_tuple(ResultRelInfo *resultRelInfo, { ExecConstraints(resultRelInfo, elemTupleSlot, estate); } - + /* + simple_table_tuple_update(resultRelInfo->ri_RelationDesc, + GetCurrentCommandId(true), + elemTupleSlot, estate->es_snapshot, + &update_indexes); + + if (resultRelInfo->ri_NumIndices > 0 && update_indexes) + //ExecInsertIndexTuples(elemTupleSlot, estate, false, NULL, + // NIL); + */ // Insert the tuple normally update_result = heap_update(resultRelInfo->ri_RelationDesc, &(tuple->t_self), tuple, @@ -148,14 +158,15 @@ static HeapTuple update_entity_tuple(ResultRelInfo *resultRelInfo, ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("Entity failed to be updated: %i", update_result))); } - + + &update_indexes = update_result == TM_Ok && !HeapTupleIsHeapOnly(tuple); + // Insert index entries for the tuple - if (resultRelInfo->ri_NumIndices > 0) + if (resultRelInfo->ri_NumIndices > 0 && update_indexes) { - ExecInsertIndexTuples(elemTupleSlot,estate, - false, NULL, NIL); + //ExecInsertIndexTuples(elemTupleSlot, estate, false, NULL, NIL); } - //ExecCloseIndices(resultRelInfo); + } ReleaseBuffer(buffer); @@ -377,6 +388,8 @@ static void process_update_list(CustomScanState *node) Datum new_entity; HeapTuple heap_tuple; char *clause_name = css->set_list->clause_name; + Oid relid; + Relation rel; update_item = (cypher_update_item *)lfirst(lc); @@ -412,8 +425,8 @@ static void process_update_list(CustomScanState *node) /* get the id and label for later */ id = GET_AGTYPE_VALUE_OBJECT_VALUE(original_entity_value, "id"); label = GET_AGTYPE_VALUE_OBJECT_VALUE(original_entity_value, "label"); + label_name = pnstrdup(label->val.string.val, label->val.string.len); - /* get the properties we need to update */ original_properties = GET_AGTYPE_VALUE_OBJECT_VALUE(original_entity_value, "properties"); @@ -454,9 +467,13 @@ static void process_update_list(CustomScanState *node) update_item->prop_name, new_property_value, remove_property); - + resultRelInfo = create_entity_result_rel_info( estate, css->set_list->graph_name, label_name); + //relid = RelationGetRelid(resultRelInfo->ri_RelationDesc); + //rel = table_open(relid, RowExclusiveLock); + + //ExecOpenIndices(resultRelInfo, false); slot = ExecInitExtraTupleSlot( estate, RelationGetDescr(resultRelInfo->ri_RelationDesc), diff --git a/src/backend/executor/cypher_utils.c b/src/backend/executor/cypher_utils.c index 15430da30..2d45f1812 100644 --- a/src/backend/executor/cypher_utils.c +++ b/src/backend/executor/cypher_utils.c @@ -59,6 +59,8 @@ ResultRelInfo *create_entity_result_rel_info(EState *estate, char *graph_name, RangeVar *rv; Relation label_relation; ResultRelInfo *resultRelInfo; + Oid relid; + Relation rel; ParseState *pstate = make_parsestate(NULL); @@ -170,7 +172,7 @@ TupleTableSlot *populate_edge_tts( * Find out if the entity still exists. This is for 'implicit' deletion * of an entity. */ -bool entity_exists(EState *estate, uint32 graph_oid, graphid id) +bool entity_exists(EState *estate, Oid graph_oid, graphid id) { label_cache_data *label; ScanKeyData scan_keys[1]; diff --git a/src/include/parser/cypher_kwlist.h b/src/include/parser/cypher_kwlist.h new file mode 100644 index 000000000..d15a0e34a --- /dev/null +++ b/src/include/parser/cypher_kwlist.h @@ -0,0 +1,48 @@ +PG_KEYWORD("all", ALL, RESERVED_KEYWORD) +PG_KEYWORD("analyze", ANALYZE, RESERVED_KEYWORD) +PG_KEYWORD("and", AND, RESERVED_KEYWORD) +PG_KEYWORD("as", AS, RESERVED_KEYWORD) +PG_KEYWORD("asc", ASC, RESERVED_KEYWORD) +PG_KEYWORD("ascending", ASCENDING, RESERVED_KEYWORD) +PG_KEYWORD("by", BY, RESERVED_KEYWORD) +PG_KEYWORD("call", CALL, RESERVED_KEYWORD) +PG_KEYWORD("case", CASE, RESERVED_KEYWORD) +PG_KEYWORD("coalesce", COALESCE, RESERVED_KEYWORD) +PG_KEYWORD("contains", CONTAINS, RESERVED_KEYWORD) +PG_KEYWORD("create", CREATE, RESERVED_KEYWORD) +PG_KEYWORD("delete", DELETE, RESERVED_KEYWORD) +PG_KEYWORD("desc", DESC, RESERVED_KEYWORD) +PG_KEYWORD("descending", DESCENDING, RESERVED_KEYWORD) +PG_KEYWORD("detach", DETACH, RESERVED_KEYWORD) +PG_KEYWORD("distinct", DISTINCT, RESERVED_KEYWORD) +PG_KEYWORD("else", ELSE, RESERVED_KEYWORD) +PG_KEYWORD("end", END_P, RESERVED_KEYWORD) +PG_KEYWORD("ends", ENDS, RESERVED_KEYWORD) +PG_KEYWORD("exists", EXISTS, RESERVED_KEYWORD) +PG_KEYWORD("explain", EXPLAIN, RESERVED_KEYWORD) +PG_KEYWORD("false", FALSE_P, RESERVED_KEYWORD) +PG_KEYWORD("in", IN, RESERVED_KEYWORD) +PG_KEYWORD("is", IS, RESERVED_KEYWORD) +PG_KEYWORD("limit", LIMIT, RESERVED_KEYWORD) +PG_KEYWORD("match", MATCH, RESERVED_KEYWORD) +PG_KEYWORD("merge", MERGE, RESERVED_KEYWORD) +PG_KEYWORD("not", NOT, RESERVED_KEYWORD) +PG_KEYWORD("null", NULL_P, RESERVED_KEYWORD) +PG_KEYWORD("optional", OPTIONAL, RESERVED_KEYWORD) +PG_KEYWORD("or", OR, RESERVED_KEYWORD) +PG_KEYWORD("order", ORDER, RESERVED_KEYWORD) +PG_KEYWORD("remove", REMOVE, RESERVED_KEYWORD) +PG_KEYWORD("return", RETURN, RESERVED_KEYWORD) +PG_KEYWORD("set", SET, RESERVED_KEYWORD) +PG_KEYWORD("skip", SKIP, RESERVED_KEYWORD) +PG_KEYWORD("starts", STARTS, RESERVED_KEYWORD) +PG_KEYWORD("then", THEN, RESERVED_KEYWORD) +PG_KEYWORD("true", TRUE_P, RESERVED_KEYWORD) +PG_KEYWORD("union", UNION, RESERVED_KEYWORD) +PG_KEYWORD("unwind", UNWIND, RESERVED_KEYWORD) +PG_KEYWORD("verbose", VERBOSE, RESERVED_KEYWORD) +PG_KEYWORD("when", WHEN, RESERVED_KEYWORD) +PG_KEYWORD("where", WHERE, RESERVED_KEYWORD) +PG_KEYWORD("with", WITH, RESERVED_KEYWORD) +PG_KEYWORD("xor", XOR, RESERVED_KEYWORD) +PG_KEYWORD("yield", YIELD, RESERVED_KEYWORD) \ No newline at end of file diff --git a/tools/PerfectHash.pm b/tools/PerfectHash.pm new file mode 100644 index 000000000..54f5d4e99 --- /dev/null +++ b/tools/PerfectHash.pm @@ -0,0 +1,376 @@ +#---------------------------------------------------------------------- +# +# PerfectHash.pm +# Perl module that constructs minimal perfect hash functions +# +# This code constructs a minimal perfect hash function for the given +# set of keys, using an algorithm described in +# "An optimal algorithm for generating minimal perfect hash functions" +# by Czech, Havas and Majewski in Information Processing Letters, +# 43(5):256-264, October 1992. +# This implementation is loosely based on NetBSD's "nbperf", +# which was written by Joerg Sonnenberger. +# +# The resulting hash function is perfect in the sense that if the presented +# key is one of the original set, it will return the key's index in the set +# (in range 0..N-1). However, the caller must still verify the match, +# as false positives are possible. Also, the hash function may return +# values that are out of range (negative or >= N), due to summing unrelated +# hashtable entries. This indicates that the presented key is definitely +# not in the set. +# +# +# Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/tools/PerfectHash.pm +# +#---------------------------------------------------------------------- + +package PerfectHash; + +use strict; +use warnings; + + +# At runtime, we'll compute two simple hash functions of the input key, +# and use them to index into a mapping table. The hash functions are just +# multiply-and-add in uint32 arithmetic, with different multipliers and +# initial seeds. All the complexity in this module is concerned with +# selecting hash parameters that will work and building the mapping table. + +# We support making case-insensitive hash functions, though this only +# works for a strict-ASCII interpretation of case insensitivity, +# ie, A-Z maps onto a-z and nothing else. +my $case_fold = 0; + + +# +# Construct a C function implementing a perfect hash for the given keys. +# The C function definition is returned as a string. +# +# The keys should be passed as an array reference. They can be any set +# of Perl strings; it is caller's responsibility that there not be any +# duplicates. (Note that the "strings" can be binary data, but hashing +# e.g. OIDs has endianness hazards that callers must overcome.) +# +# The name to use for the function is specified as the second argument. +# It will be a global function by default, but the caller may prepend +# "static " to the result string if it wants a static function. +# +# Additional options can be specified as keyword-style arguments: +# +# case_fold => bool +# If specified as true, the hash function is case-insensitive, for the +# limited idea of case-insensitivity explained above. +# +# fixed_key_length => N +# If specified, all keys are assumed to have length N bytes, and the +# hash function signature will be just "int f(const void *key)" +# rather than "int f(const void *key, size_t keylen)". +# +sub generate_hash_function +{ + my ($keys_ref, $funcname, %options) = @_; + + # It's not worth passing this around as a parameter; just use a global. + $case_fold = $options{case_fold} || 0; + + # Try different hash function parameters until we find a set that works + # for these keys. The multipliers are chosen to be primes that are cheap + # to calculate via shift-and-add, so don't change them without care. + # (Commonly, random seeds are tried, but we want reproducible results + # from this program so we don't do that.) + my $hash_mult1 = 31; + my $hash_mult2; + my $hash_seed1; + my $hash_seed2; + my @subresult; + FIND_PARAMS: + foreach (127, 257, 521, 1033, 2053) + { + $hash_mult2 = $_; # "foreach $hash_mult2" doesn't work + for ($hash_seed1 = 0; $hash_seed1 < 10; $hash_seed1++) + { + for ($hash_seed2 = 0; $hash_seed2 < 10; $hash_seed2++) + { + @subresult = _construct_hash_table( + $keys_ref, $hash_mult1, $hash_mult2, + $hash_seed1, $hash_seed2); + last FIND_PARAMS if @subresult; + } + } + } + + # Choke if we couldn't find a workable set of parameters. + die "failed to generate perfect hash" if !@subresult; + + # Extract info from _construct_hash_table's result array. + my $elemtype = $subresult[0]; + my @hashtab = @{ $subresult[1] }; + my $nhash = scalar(@hashtab); + + # OK, construct the hash function definition including the hash table. + my $f = ''; + $f .= sprintf "int\n"; + if (defined $options{fixed_key_length}) + { + $f .= sprintf "%s(const void *key)\n{\n", $funcname; + } + else + { + $f .= sprintf "%s(const void *key, size_t keylen)\n{\n", $funcname; + } + $f .= sprintf "\tstatic const %s h[%d] = {\n", $elemtype, $nhash; + for (my $i = 0; $i < $nhash; $i++) + { + $f .= sprintf "%s%6d,%s", + ($i % 8 == 0 ? "\t\t" : " "), + $hashtab[$i], + ($i % 8 == 7 ? "\n" : ""); + } + $f .= sprintf "\n" if ($nhash % 8 != 0); + $f .= sprintf "\t};\n\n"; + $f .= sprintf "\tconst unsigned char *k = (const unsigned char *) key;\n"; + $f .= sprintf "\tsize_t\t\tkeylen = %d;\n", $options{fixed_key_length} + if (defined $options{fixed_key_length}); + $f .= sprintf "\tuint32\t\ta = %d;\n", $hash_seed1; + $f .= sprintf "\tuint32\t\tb = %d;\n\n", $hash_seed2; + $f .= sprintf "\twhile (keylen--)\n\t{\n"; + $f .= sprintf "\t\tunsigned char c = *k++"; + $f .= sprintf " | 0x20" if $case_fold; # see comment below + $f .= sprintf ";\n\n"; + $f .= sprintf "\t\ta = a * %d + c;\n", $hash_mult1; + $f .= sprintf "\t\tb = b * %d + c;\n", $hash_mult2; + $f .= sprintf "\t}\n"; + $f .= sprintf "\treturn h[a %% %d] + h[b %% %d];\n", $nhash, $nhash; + $f .= sprintf "}\n"; + + return $f; +} + + +# Calculate a hash function as the run-time code will do. +# +# If we are making a case-insensitive hash function, we implement that +# by OR'ing 0x20 into each byte of the key. This correctly transforms +# upper-case ASCII into lower-case ASCII, while not changing digits or +# dollar signs. (It does change '_', as well as other characters not +# likely to appear in keywords; this has little effect on the hash's +# ability to discriminate keywords.) +sub _calc_hash +{ + my ($key, $mult, $seed) = @_; + + my $result = $seed; + for my $c (split //, $key) + { + my $cn = ord($c); + $cn |= 0x20 if $case_fold; + $result = ($result * $mult + $cn) % 4294967296; + } + return $result; +} + + +# Attempt to construct a mapping table for a minimal perfect hash function +# for the given keys, using the specified hash parameters. +# +# Returns an array containing the mapping table element type name as the +# first element, and a ref to an array of the table values as the second. +# +# Returns an empty array on failure; then caller should choose different +# hash parameter(s) and try again. +sub _construct_hash_table +{ + my ($keys_ref, $hash_mult1, $hash_mult2, $hash_seed1, $hash_seed2) = @_; + my @keys = @{$keys_ref}; + + # This algorithm is based on a graph whose edges correspond to the + # keys and whose vertices correspond to entries of the mapping table. + # A key's edge links the two vertices whose indexes are the outputs of + # the two hash functions for that key. For K keys, the mapping + # table must have at least 2*K+1 entries, guaranteeing that there's at + # least one unused entry. (In principle, larger mapping tables make it + # easier to find a workable hash and increase the number of inputs that + # can be rejected due to touching unused hashtable entries. In practice, + # neither effect seems strong enough to justify using a larger table.) + my $nedges = scalar @keys; # number of edges + my $nverts = 2 * $nedges + 1; # number of vertices + + # However, it would be very bad if $nverts were exactly equal to either + # $hash_mult1 or $hash_mult2: effectively, that hash function would be + # sensitive to only the last byte of each key. Cases where $nverts is a + # multiple of either multiplier likewise lose information. (But $nverts + # can't actually divide them, if they've been intelligently chosen as + # primes.) We can avoid such problems by adjusting the table size. + while ($nverts % $hash_mult1 == 0 + || $nverts % $hash_mult2 == 0) + { + $nverts++; + } + + # Initialize the array of edges. + my @E = (); + foreach my $kw (@keys) + { + # Calculate hashes for this key. + # The hashes are immediately reduced modulo the mapping table size. + my $hash1 = _calc_hash($kw, $hash_mult1, $hash_seed1) % $nverts; + my $hash2 = _calc_hash($kw, $hash_mult2, $hash_seed2) % $nverts; + + # If the two hashes are the same for any key, we have to fail + # since this edge would itself form a cycle in the graph. + return () if $hash1 == $hash2; + + # Add the edge for this key. + push @E, { left => $hash1, right => $hash2 }; + } + + # Initialize the array of vertices, giving them all empty lists + # of associated edges. (The lists will be hashes of edge numbers.) + my @V = (); + for (my $v = 0; $v < $nverts; $v++) + { + push @V, { edges => {} }; + } + + # Insert each edge in the lists of edges connected to its vertices. + for (my $e = 0; $e < $nedges; $e++) + { + my $v = $E[$e]{left}; + $V[$v]{edges}->{$e} = 1; + + $v = $E[$e]{right}; + $V[$v]{edges}->{$e} = 1; + } + + # Now we attempt to prove the graph acyclic. + # A cycle-free graph is either empty or has some vertex of degree 1. + # Removing the edge attached to that vertex doesn't change this property, + # so doing that repeatedly will reduce the size of the graph. + # If the graph is empty at the end of the process, it was acyclic. + # We track the order of edge removal so that the next phase can process + # them in reverse order of removal. + my @output_order = (); + + # Consider each vertex as a possible starting point for edge-removal. + for (my $startv = 0; $startv < $nverts; $startv++) + { + my $v = $startv; + + # If vertex v is of degree 1 (i.e. exactly 1 edge connects to it), + # remove that edge, and then consider the edge's other vertex to see + # if it is now of degree 1. The inner loop repeats until reaching a + # vertex not of degree 1. + while (scalar(keys(%{ $V[$v]{edges} })) == 1) + { + # Unlink its only edge. + my $e = (keys(%{ $V[$v]{edges} }))[0]; + delete($V[$v]{edges}->{$e}); + + # Unlink the edge from its other vertex, too. + my $v2 = $E[$e]{left}; + $v2 = $E[$e]{right} if ($v2 == $v); + delete($V[$v2]{edges}->{$e}); + + # Push e onto the front of the output-order list. + unshift @output_order, $e; + + # Consider v2 on next iteration of inner loop. + $v = $v2; + } + } + + # We succeeded only if all edges were removed from the graph. + return () if (scalar(@output_order) != $nedges); + + # OK, build the hash table of size $nverts. + my @hashtab = (0) x $nverts; + # We need a "visited" flag array in this step, too. + my @visited = (0) x $nverts; + + # The goal is that for any key, the sum of the hash table entries for + # its first and second hash values is the desired output (i.e., the key + # number). By assigning hash table values in the selected edge order, + # we can guarantee that that's true. This works because the edge first + # removed from the graph (and hence last to be visited here) must have + # at least one vertex it shared with no other edge; hence it will have at + # least one vertex (hashtable entry) still unvisited when we reach it here, + # and we can assign that unvisited entry a value that makes the sum come + # out as we wish. By induction, the same holds for all the other edges. + foreach my $e (@output_order) + { + my $l = $E[$e]{left}; + my $r = $E[$e]{right}; + if (!$visited[$l]) + { + # $hashtab[$r] might be zero, or some previously assigned value. + $hashtab[$l] = $e - $hashtab[$r]; + } + else + { + die "oops, doubly used hashtab entry" if $visited[$r]; + # $hashtab[$l] might be zero, or some previously assigned value. + $hashtab[$r] = $e - $hashtab[$l]; + } + # Now freeze both of these hashtab entries. + $visited[$l] = 1; + $visited[$r] = 1; + } + + # Detect range of values needed in hash table. + my $hmin = $nedges; + my $hmax = 0; + for (my $v = 0; $v < $nverts; $v++) + { + $hmin = $hashtab[$v] if $hashtab[$v] < $hmin; + $hmax = $hashtab[$v] if $hashtab[$v] > $hmax; + } + + # Choose width of hashtable entries. In addition to the actual values, + # we need to be able to store a flag for unused entries, and we wish to + # have the property that adding any other entry value to the flag gives + # an out-of-range result (>= $nedges). + my $elemtype; + my $unused_flag; + + if ( $hmin >= -0x7F + && $hmax <= 0x7F + && $hmin + 0x7F >= $nedges) + { + # int8 will work + $elemtype = 'int8'; + $unused_flag = 0x7F; + } + elsif ($hmin >= -0x7FFF + && $hmax <= 0x7FFF + && $hmin + 0x7FFF >= $nedges) + { + # int16 will work + $elemtype = 'int16'; + $unused_flag = 0x7FFF; + } + elsif ($hmin >= -0x7FFFFFFF + && $hmax <= 0x7FFFFFFF + && $hmin + 0x3FFFFFFF >= $nedges) + { + # int32 will work + $elemtype = 'int32'; + $unused_flag = 0x3FFFFFFF; + } + else + { + die "hash table values too wide"; + } + + # Set any unvisited hashtable entries to $unused_flag. + for (my $v = 0; $v < $nverts; $v++) + { + $hashtab[$v] = $unused_flag if !$visited[$v]; + } + + return ($elemtype, \@hashtab); +} + +1; diff --git a/tools/gen_keywordlist.pl b/tools/gen_keywordlist.pl new file mode 100755 index 000000000..1623c8678 --- /dev/null +++ b/tools/gen_keywordlist.pl @@ -0,0 +1,197 @@ +#---------------------------------------------------------------------- +# +# gen_keywordlist.pl +# Perl script that transforms a list of keywords into a ScanKeywordList +# data structure that can be passed to ScanKeywordLookup(). +# +# The input is a C header file containing a series of macro calls +# PG_KEYWORD("keyword", ...) +# Lines not starting with PG_KEYWORD are ignored. The keywords are +# implicitly numbered 0..N-1 in order of appearance in the header file. +# Currently, the keywords are required to appear in ASCII order. +# +# The output is a C header file that defines a "const ScanKeywordList" +# variable named according to the -v switch ("ScanKeywords" by default). +# The variable is marked "static" unless the -e switch is given. +# +# ScanKeywordList uses hash-based lookup, so this script also selects +# a minimal perfect hash function for the keyword set, and emits a +# static hash function that is referenced in the ScanKeywordList struct. +# The hash function is case-insensitive unless --no-case-fold is specified. +# Note that case folding works correctly only for all-ASCII keywords! +# +# +# Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/tools/gen_keywordlist.pl +# +#---------------------------------------------------------------------- + +use strict; +use warnings; +use Getopt::Long; + +use FindBin; +use lib $FindBin::RealBin; + +use PerfectHash; + +my $output_path = ''; +my $extern = 0; +my $case_fold = 1; +my $varname = 'ScanKeywords'; + +GetOptions( + 'output:s' => \$output_path, + 'extern' => \$extern, + 'case-fold!' => \$case_fold, + 'varname:s' => \$varname) || usage(); + +my $kw_input_file = shift @ARGV || die "No input file.\n"; + +# Make sure output_path ends in a slash if needed. +if ($output_path ne '' && substr($output_path, -1) ne '/') +{ + $output_path .= '/'; +} + +$kw_input_file =~ /(\w+)\.h$/ + || die "Input file must be named something.h.\n"; +my $base_filename = $1 . '_d'; +my $kw_def_file = $output_path . $base_filename . '.h'; + +open(my $kif, '<', $kw_input_file) || die "$kw_input_file: $!\n"; +open(my $kwdef, '>', $kw_def_file) || die "$kw_def_file: $!\n"; + +# Opening boilerplate for keyword definition header. +printf $kwdef <) +{ + if (/^PG_KEYWORD\("(\w+)"/) + { + push @keywords, $1; + } +} + +# When being case-insensitive, insist that the input be all-lower-case. +if ($case_fold) +{ + foreach my $kw (@keywords) + { + die qq|The keyword "$kw" is not lower-case in $kw_input_file\n| + if ($kw ne lc $kw); + } +} + +# Error out if the keyword names are not in ASCII order. +# +# While this isn't really necessary with hash-based lookup, it's still +# helpful because it provides a cheap way to reject duplicate keywords. +# Also, insisting on sorted order ensures that code that scans the keyword +# table linearly will see the keywords in a canonical order. +for my $i (0 .. $#keywords - 1) +{ + die + qq|The keyword "$keywords[$i + 1]" is out of order in $kw_input_file\n| + if ($keywords[$i] cmp $keywords[ $i + 1 ]) >= 0; +} + +# Emit the string containing all the keywords. + +printf $kwdef qq|static const char %s_kw_string[] =\n\t"|, $varname; +print $kwdef join qq|\\0"\n\t"|, @keywords; +print $kwdef qq|";\n\n|; + +# Emit an array of numerical offsets which will be used to index into the +# keyword string. Also determine max keyword length. + +printf $kwdef "static const uint16 %s_kw_offsets[] = {\n", $varname; + +my $offset = 0; +my $max_len = 0; +foreach my $name (@keywords) +{ + my $this_length = length($name); + + print $kwdef "\t$offset,\n"; + + # Calculate the cumulative offset of the next keyword, + # taking into account the null terminator. + $offset += $this_length + 1; + + # Update max keyword length. + $max_len = $this_length if $max_len < $this_length; +} + +print $kwdef "};\n\n"; + +# Emit a macro defining the number of keywords. +# (In some places it's useful to have access to that as a constant.) + +printf $kwdef "#define %s_NUM_KEYWORDS %d\n\n", uc $varname, scalar @keywords; + +# Emit the definition of the hash function. + +my $funcname = $varname . "_hash_func"; + +my $f = PerfectHash::generate_hash_function(\@keywords, $funcname, + case_fold => $case_fold); + +printf $kwdef qq|static %s\n|, $f; + +# Emit the struct that wraps all this lookup info into one variable. + +printf $kwdef "static " if !$extern; +printf $kwdef "const ScanKeywordList %s = {\n", $varname; +printf $kwdef qq|\t%s_kw_string,\n|, $varname; +printf $kwdef qq|\t%s_kw_offsets,\n|, $varname; +printf $kwdef qq|\t%s,\n|, $funcname; +printf $kwdef qq|\t%s_NUM_KEYWORDS,\n|, uc $varname; +printf $kwdef qq|\t%d\n|, $max_len; +printf $kwdef "};\n\n"; + +printf $kwdef "#endif\t\t\t\t\t\t\t/* %s_H */\n", uc $base_filename; + + +sub usage +{ + die <] [--varname/-v ] [--extern/-e] [--[no-]case-fold] input_file + --output Output directory (default '.') + --varname Name for ScanKeywordList variable (default 'ScanKeywords') + --extern Allow the ScanKeywordList variable to be globally visible + --no-case-fold Keyword matching is to be case-sensitive + +gen_keywordlist.pl transforms a list of keywords into a ScanKeywordList. +The output filename is derived from the input file by inserting _d, +for example kwlist_d.h is produced from kwlist.h. +EOM +} From 7cd1d159cd43d3e5a657ca88db6986a7230e782f Mon Sep 17 00:00:00 2001 From: Shoaib Date: Thu, 7 Jul 2022 09:35:36 +0200 Subject: [PATCH 06/18] fix index error by Josh --- regress/expected/cypher_set.out | 30 +++++----- src/backend/executor/cypher_set.c | 93 +++++++++++++++++++------------ 2 files changed, 72 insertions(+), 51 deletions(-) diff --git a/regress/expected/cypher_set.out b/regress/expected/cypher_set.out index 5cd42ee97..ce55c722c 100644 --- a/regress/expected/cypher_set.out +++ b/regress/expected/cypher_set.out @@ -246,9 +246,9 @@ EXECUTE p_1; {"id": 281474976710659, "label": "", "properties": {"i": 3, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": 3, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": 3, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": 3, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": 3, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": 3, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": 3, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": 3, "k": 10}}::vertex @@ -261,9 +261,9 @@ EXECUTE p_1; {"id": 281474976710659, "label": "", "properties": {"i": 3, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": 3, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": 3, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": 3, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": 3, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": 3, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": 3, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": 3, "k": 10}}::vertex @@ -277,9 +277,9 @@ EXECUTE p_2('{"var_name": 4}'); {"id": 281474976710659, "label": "", "properties": {"i": 4, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": 4, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": 4, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 4, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": 4, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": 4, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 4, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": 4, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": 4, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": 4, "k": 10}}::vertex @@ -292,9 +292,9 @@ EXECUTE p_2('{"var_name": 6}'); {"id": 281474976710659, "label": "", "properties": {"i": 6, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": 6, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": 6, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 6, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": 6, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": 6, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 6, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": 6, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": 6, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": 6, "k": 10}}::vertex @@ -316,9 +316,9 @@ SELECT set_test(); {"id": 281474976710659, "label": "", "properties": {"i": 7, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": 7, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": 7, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 7, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": 7, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": 7, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 7, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": 7, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": 7, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": 7, "k": 10}}::vertex @@ -331,9 +331,9 @@ SELECT set_test(); {"id": 281474976710659, "label": "", "properties": {"i": 7, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": 7, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": 7, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 7, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": 7, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": 7, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 7, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": 7, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": 7, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": 7, "k": 10}}::vertex @@ -349,9 +349,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) SET n.i = 3, n.j = 5 RETURN n $$) {"id": 281474976710659, "label": "", "properties": {"i": 3, "j": 5, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": 3, "j": 5, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": 3, "j": 5, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": 3, "j": 5, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": 3, "j": 5, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": 3, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": 3, "j": 5, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": 3, "j": 5, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": 3, "j": 5, "k": 10}}::vertex @@ -500,9 +500,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) SET n.i = [3, 'test', [1, 2, 3], {"id": 281474976710659, "label": "", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "k": 10}}::vertex @@ -518,9 +518,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) RETURN n$$) AS (a agtype); {"id": 281474976710659, "label": "", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": [3, "test", [1, 2, 3], {"id": 1}, 1.0, 1::numeric], "j": 5, "k": 10}}::vertex @@ -550,9 +550,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) SET n.i = [] RETURN n$$) AS (a ag {"id": 281474976710659, "label": "", "properties": {"i": [], "j": 5, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": [], "j": 5, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": [], "j": 5, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": [], "j": 5, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": [], "j": 5, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": [], "j": 5, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": [], "j": 5, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": [], "j": 5, "k": 10}}::vertex @@ -568,9 +568,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) RETURN n$$) AS (a agtype); {"id": 281474976710659, "label": "", "properties": {"i": [], "j": 5, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": [], "j": 5, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": [], "j": 5, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": [], "j": 5, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": [], "j": 5, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": [], "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": [], "j": 5, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": [], "j": 5, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": [], "j": 5, "k": 10}}::vertex @@ -587,9 +587,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) SET n.i = {prop1: 3, prop2:'test' {"id": 281474976710659, "label": "", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "k": 10}}::vertex @@ -605,9 +605,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) RETURN n$$) AS (a agtype); {"id": 281474976710659, "label": "", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": {"prop1": 3, "prop2": "test", "prop3": [1, 2, 3], "prop4": {"id": 1}, "prop5": 1.0, "prop6": 1::numeric}, "j": 5, "k": 10}}::vertex @@ -637,9 +637,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) SET n.i = {} RETURN n$$) AS (a ag {"id": 281474976710659, "label": "", "properties": {"i": {}, "j": 5, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": {}, "j": 5, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": {}, "j": 5, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": {}, "j": 5, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": {}, "j": 5, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": {}, "j": 5, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": {}, "j": 5, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": {}, "j": 5, "k": 10}}::vertex @@ -655,9 +655,9 @@ SELECT * FROM cypher('cypher_set', $$MATCH (n) RETURN n$$) AS (a agtype); {"id": 281474976710659, "label": "", "properties": {"i": {}, "j": 5, "y": 2}}::vertex {"id": 281474976710658, "label": "", "properties": {"i": {}, "j": 5, "t": 150, "y": 1}}::vertex {"id": 281474976710657, "label": "", "properties": {"i": {}, "j": 5, "t": 150}}::vertex - {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 844424930131969, "label": "v", "properties": {"i": {}, "j": 5, "t": 150}}::vertex {"id": 844424930131971, "label": "v", "properties": {"i": {}, "j": 5, "t": 150}}::vertex + {"id": 844424930131970, "label": "v", "properties": {"a": 0, "i": {}, "j": 5, "t": 150, "y": 99, "z": 99}}::vertex {"id": 1407374883553281, "label": "other_v", "properties": {"i": {}, "j": 5, "k": 10}}::vertex {"id": 1407374883553282, "label": "other_v", "properties": {"i": {}, "j": 5, "k": 10}}::vertex {"id": 1407374883553283, "label": "other_v", "properties": {"i": {}, "j": 5, "k": 10}}::vertex diff --git a/src/backend/executor/cypher_set.c b/src/backend/executor/cypher_set.c index 16d94f118..aef9180c5 100644 --- a/src/backend/executor/cypher_set.c +++ b/src/backend/executor/cypher_set.c @@ -79,7 +79,7 @@ static void begin_cypher_set(CustomScanState *node, EState *estate, ExecInitScanTupleSlot(estate, &node->ss, ExecGetResultType(node->ss.ps.lefttree), &TTSOpsHeapTuple); - + if (!CYPHER_CLAUSE_IS_TERMINAL(css->flags)) { TupleDesc tupdesc = node->ss.ss_ScanTupleSlot->tts_tupleDescriptor; @@ -110,11 +110,11 @@ static HeapTuple update_entity_tuple(ResultRelInfo *resultRelInfo, LockTupleMode lockmode; TM_FailureData hufd; TM_Result lock_result; - TM_Result update_result; Buffer buffer; - bool update_indexes; + bool update_indexes; + TM_Result result; - //ResultRelInfo *saved_resultRelInfo; + ResultRelInfo *saved_resultRelInfo = estate->es_result_relation_info; estate->es_result_relation_info = resultRelInfo; lockmode = ExecUpdateLockMode(estate, resultRelInfo); @@ -125,7 +125,7 @@ static HeapTuple update_entity_tuple(ResultRelInfo *resultRelInfo, if (lock_result == TM_Ok) { - //ExecOpenIndices(resultRelInfo, false); + ExecOpenIndices(resultRelInfo, false); ExecStoreVirtualTuple(elemTupleSlot); tuple = ExecFetchSlotHeapTuple(elemTupleSlot, true, NULL); tuple->t_self = old_tuple->t_self; @@ -136,41 +136,63 @@ static HeapTuple update_entity_tuple(ResultRelInfo *resultRelInfo, { ExecConstraints(resultRelInfo, elemTupleSlot, estate); } - /* - simple_table_tuple_update(resultRelInfo->ri_RelationDesc, - GetCurrentCommandId(true), - elemTupleSlot, estate->es_snapshot, - &update_indexes); - - if (resultRelInfo->ri_NumIndices > 0 && update_indexes) - //ExecInsertIndexTuples(elemTupleSlot, estate, false, NULL, - // NIL); - */ - // Insert the tuple normally - update_result = heap_update(resultRelInfo->ri_RelationDesc, - &(tuple->t_self), tuple, + + result = table_tuple_update(resultRelInfo->ri_RelationDesc, + &tuple->t_self, elemTupleSlot, GetCurrentCommandId(true), - estate->es_crosscheck_snapshot, true, &hufd, - &lockmode); + //estate->es_output_cid, + estate->es_snapshot,// NULL, + estate->es_crosscheck_snapshot, + true /* wait for commit */ , + &hufd, &lockmode, &update_indexes); - if (update_result != TM_Ok) + if (result == TM_SelfModified) + { + if (hufd.cmax != estate->es_output_cid) + { + ereport(ERROR, + (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), + errmsg("tuple to be updated was already modified"))); + } + + ExecCloseIndices(resultRelInfo); + estate->es_result_relation_info = saved_resultRelInfo; + + return tuple; + } + + if (result != TM_Ok) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("Entity failed to be updated: %i", update_result))); + errmsg("Entity failed to be updated: %i", result))); } - - &update_indexes = update_result == TM_Ok && !HeapTupleIsHeapOnly(tuple); - + // Insert index entries for the tuple if (resultRelInfo->ri_NumIndices > 0 && update_indexes) { - //ExecInsertIndexTuples(elemTupleSlot, estate, false, NULL, NIL); + ExecInsertIndexTuples(elemTupleSlot, estate, false, NULL, NIL); } - + + ExecCloseIndices(resultRelInfo); } + else if (lock_result == TM_SelfModified) + { + if (hufd.cmax != estate->es_output_cid) + { + ereport(ERROR, + (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), + errmsg("tuple to be updated was already modified"))); + } + } + else + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("Entity failed to be updated: %i", lock_result))); + } + ReleaseBuffer(buffer); - //estate->es_result_relation_info = saved_resultRelInfo; + estate->es_result_relation_info = saved_resultRelInfo; return tuple; } @@ -388,8 +410,7 @@ static void process_update_list(CustomScanState *node) Datum new_entity; HeapTuple heap_tuple; char *clause_name = css->set_list->clause_name; - Oid relid; - Relation rel; + int cid; update_item = (cypher_update_item *)lfirst(lc); @@ -425,7 +446,7 @@ static void process_update_list(CustomScanState *node) /* get the id and label for later */ id = GET_AGTYPE_VALUE_OBJECT_VALUE(original_entity_value, "id"); label = GET_AGTYPE_VALUE_OBJECT_VALUE(original_entity_value, "label"); - + label_name = pnstrdup(label->val.string.val, label->val.string.len); /* get the properties we need to update */ original_properties = GET_AGTYPE_VALUE_OBJECT_VALUE(original_entity_value, @@ -467,13 +488,9 @@ static void process_update_list(CustomScanState *node) update_item->prop_name, new_property_value, remove_property); - + resultRelInfo = create_entity_result_rel_info( estate, css->set_list->graph_name, label_name); - //relid = RelationGetRelid(resultRelInfo->ri_RelationDesc); - //rel = table_open(relid, RowExclusiveLock); - - //ExecOpenIndices(resultRelInfo, false); slot = ExecInitExtraTupleSlot( estate, RelationGetDescr(resultRelInfo->ri_RelationDesc), @@ -528,6 +545,9 @@ static void process_update_list(CustomScanState *node) * If the last update index for the entity is equal to the current loop * index, then update this tuple. */ + cid = estate->es_snapshot->curcid; + estate->es_snapshot->curcid = GetCurrentCommandId(false); + if (luindex[update_item->entity_position - 1] == lidx) { /* @@ -558,6 +578,7 @@ static void process_update_list(CustomScanState *node) table_endscan(scan_desc); } + estate->es_snapshot->curcid = cid; /* close relation */ ExecCloseIndices(resultRelInfo); table_close(resultRelInfo->ri_RelationDesc, RowExclusiveLock); From 9939917dd04ec488bdb55a40e64b7e9db2e27994 Mon Sep 17 00:00:00 2001 From: Shoaib Date: Thu, 11 Aug 2022 11:17:34 +0200 Subject: [PATCH 07/18] rebased and updated docker file --- Dockerfile | 4 ++-- src/backend/executor/cypher_utils.c | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index d0b449aed..cd9622ca0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,14 +17,14 @@ # -FROM postgres:11 +FROM postgres:12 RUN apt-get update RUN apt-get install --assume-yes --no-install-recommends --no-install-suggests \ bison \ build-essential \ flex \ - postgresql-server-dev-11 + postgresql-server-dev-12 COPY . /age RUN cd /age && make install diff --git a/src/backend/executor/cypher_utils.c b/src/backend/executor/cypher_utils.c index 2d45f1812..7b92fbc61 100644 --- a/src/backend/executor/cypher_utils.c +++ b/src/backend/executor/cypher_utils.c @@ -59,8 +59,6 @@ ResultRelInfo *create_entity_result_rel_info(EState *estate, char *graph_name, RangeVar *rv; Relation label_relation; ResultRelInfo *resultRelInfo; - Oid relid; - Relation rel; ParseState *pstate = make_parsestate(NULL); From 2bcbe333d0d5e7175be4069ad5640612b80d1751 Mon Sep 17 00:00:00 2001 From: Shoaib Date: Thu, 11 Aug 2022 11:27:32 +0200 Subject: [PATCH 08/18] deleted updated scripts --- age--0.5.0--0.6.0.sql | 34 ------ age--0.6.0--0.7.0.sql | 197 --------------------------------- age--0.7.0--1.0.0.sql | 52 --------- age--1.0.0--1.1.0.sql | 247 ------------------------------------------ 4 files changed, 530 deletions(-) delete mode 100644 age--0.5.0--0.6.0.sql delete mode 100644 age--0.6.0--0.7.0.sql delete mode 100644 age--0.7.0--1.0.0.sql delete mode 100644 age--1.0.0--1.1.0.sql diff --git a/age--0.5.0--0.6.0.sql b/age--0.5.0--0.6.0.sql deleted file mode 100644 index dbe620f13..000000000 --- a/age--0.5.0--0.6.0.sql +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "ALTER EXTENSION age UPDATE TO '0.6.0'" to load this file. \quit - -CREATE OR REPLACE FUNCTION ag_catalog.age_vle(IN agtype, IN agtype, IN agtype, - IN agtype, IN agtype, IN agtype, - IN agtype, OUT edges agtype) -RETURNS SETOF agtype -LANGUAGE C -IMMUTABLE -STRICT -AS 'MODULE_PATHNAME'; - --- --- End --- diff --git a/age--0.6.0--0.7.0.sql b/age--0.6.0--0.7.0.sql deleted file mode 100644 index cdbaf4406..000000000 --- a/age--0.6.0--0.7.0.sql +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "ALTER EXTENSION age UPDATE TO '0.7.0'" to load this file. \quit - -CREATE FUNCTION ag_catalog.create_vlabel(graph_name name, label_name name) - RETURNS void - LANGUAGE c -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.create_elabel(graph_name name, label_name name) - RETURNS void - LANGUAGE c -AS 'MODULE_PATHNAME'; - --- binary I/O functions -CREATE FUNCTION ag_catalog.graphid_send(graphid) -RETURNS bytea -LANGUAGE c -IMMUTABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.graphid_recv(internal) -RETURNS graphid -LANGUAGE c -IMMUTABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -UPDATE pg_type SET -typsend = 'ag_catalog.graphid_send', -typreceive = 'ag_catalog.graphid_recv' -WHERE typname = 'graphid'; - --- binary I/O functions -CREATE FUNCTION ag_catalog.agtype_send(agtype) -RETURNS bytea -LANGUAGE c -IMMUTABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.agtype_recv(internal) -RETURNS agtype -LANGUAGE c -IMMUTABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -UPDATE pg_type SET -typsend = 'ag_catalog.agtype_send', -typreceive = 'ag_catalog.agtype_recv' -WHERE typname = 'agtype'; - --- agtype -> int4[] -CREATE FUNCTION ag_catalog.agtype_to_int4_array(variadic "any") - RETURNS int[] - LANGUAGE c - STABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE CAST (agtype AS int[]) - WITH FUNCTION ag_catalog.agtype_to_int4_array(variadic "any"); - -CREATE FUNCTION ag_catalog.age_eq_tilde(agtype, agtype) -RETURNS agtype -LANGUAGE c -STABLE -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE OR REPLACE FUNCTION ag_catalog.age_vle(IN agtype, IN agtype, IN agtype, - IN agtype, IN agtype, IN agtype, - IN agtype, OUT edges agtype) -RETURNS SETOF agtype -LANGUAGE C -STABLE -CALLED ON NULL INPUT -PARALLEL UNSAFE -- might be safe -AS 'MODULE_PATHNAME'; - --- function to build an edge for a VLE match -CREATE FUNCTION ag_catalog.age_build_vle_match_edge(agtype, agtype) -RETURNS agtype -LANGUAGE C -STABLE -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - --- function to match a terminal vle edge -CREATE FUNCTION ag_catalog.age_match_vle_terminal_edge(agtype, agtype, agtype) -RETURNS boolean -LANGUAGE C -STABLE -CALLED ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - --- function to create an AGTV_PATH from a VLE_path_container -CREATE FUNCTION ag_catalog.age_materialize_vle_path(agtype) -RETURNS agtype -LANGUAGE C -STABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - --- function to create an AGTV_ARRAY of edges from a VLE_path_container -CREATE FUNCTION ag_catalog.age_materialize_vle_edges(agtype) -RETURNS agtype -LANGUAGE C -STABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.age_match_vle_edge_to_id_qual(agtype, agtype, agtype) -RETURNS boolean -LANGUAGE C -STABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.age_match_two_vle_edges(agtype, agtype) -RETURNS boolean -LANGUAGE C -STABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - --- list functions -CREATE FUNCTION ag_catalog.age_keys(agtype) -RETURNS agtype -LANGUAGE c -STABLE -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.age_labels(agtype) -RETURNS agtype -LANGUAGE c -STABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.age_nodes(agtype) -RETURNS agtype -LANGUAGE c -STABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.age_relationships(agtype) -RETURNS agtype -LANGUAGE c -STABLE -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.age_range(variadic "any") -RETURNS agtype -LANGUAGE c -STABLE -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - --- --- End --- diff --git a/age--0.7.0--1.0.0.sql b/age--0.7.0--1.0.0.sql deleted file mode 100644 index 59d3bff82..000000000 --- a/age--0.7.0--1.0.0.sql +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "ALTER EXTENSION age UPDATE TO '1.0.0'" to load this file. \quit - -CREATE FUNCTION ag_catalog.load_labels_from_file(graph_name name, - label_name name, - file_path text, - id_field_exists bool default true) - RETURNS void - LANGUAGE c - AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.load_edges_from_file(graph_name name, - label_name name, - file_path text) - RETURNS void - LANGUAGE c - AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog._cypher_merge_clause(internal) -RETURNS void -LANGUAGE c -AS 'MODULE_PATHNAME'; - -CREATE FUNCTION ag_catalog.age_unnest(agtype, block_types boolean = false) - RETURNS SETOF agtype - LANGUAGE c - STABLE -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - --- --- End --- diff --git a/age--1.0.0--1.1.0.sql b/age--1.0.0--1.1.0.sql deleted file mode 100644 index 13b205a47..000000000 --- a/age--1.0.0--1.1.0.sql +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "ALTER EXTENSION age UPDATE TO '1.1.0'" to load this file. \quit - --- --- agtype - access operators ( ->, ->> ) --- - -CREATE FUNCTION ag_catalog.agtype_object_field(agtype, text) -RETURNS agtype -LANGUAGE c -IMMUTABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - --- get agtype object field -CREATE OPERATOR -> ( - LEFTARG = agtype, - RIGHTARG = text, - FUNCTION = ag_catalog.agtype_object_field -); - -CREATE FUNCTION ag_catalog.agtype_object_field_text(agtype, text) -RETURNS text -LANGUAGE c -IMMUTABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - --- get agtype object field as text -CREATE OPERATOR ->> ( - LEFTARG = agtype, - RIGHTARG = text, - FUNCTION = ag_catalog.agtype_object_field_text -); - -CREATE FUNCTION ag_catalog.agtype_array_element(agtype, int4) -RETURNS agtype -LANGUAGE c -IMMUTABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - --- get agtype array element -CREATE OPERATOR -> ( - LEFTARG = agtype, - RIGHTARG = int4, - FUNCTION = ag_catalog.agtype_array_element -); - -CREATE FUNCTION ag_catalog.agtype_array_element_text(agtype, int4) -RETURNS text -LANGUAGE c -IMMUTABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - --- get agtype array element as text -CREATE OPERATOR ->> ( - LEFTARG = agtype, - RIGHTARG = int4, - FUNCTION = ag_catalog.agtype_array_element_text -); - --- --- Contains operators @> <@ --- -CREATE FUNCTION ag_catalog.agtype_contains(agtype, agtype) -RETURNS boolean -LANGUAGE c -STABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE OPERATOR @> ( - LEFTARG = agtype, - RIGHTARG = agtype, - FUNCTION = ag_catalog.agtype_contains, - COMMUTATOR = '<@', - RESTRICT = contsel, - JOIN = contjoinsel -); - -CREATE FUNCTION ag_catalog.agtype_contained_by(agtype, agtype) -RETURNS boolean -LANGUAGE c -STABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE OPERATOR <@ ( - LEFTARG = agtype, - RIGHTARG = agtype, - FUNCTION = ag_catalog.agtype_contained_by, - COMMUTATOR = '@>', - RESTRICT = contsel, - JOIN = contjoinsel -); - --- --- Key Existence Operators ? ?| ?& --- -CREATE FUNCTION ag_catalog.agtype_exists(agtype, text) -RETURNS boolean -LANGUAGE c -IMMUTABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE OPERATOR ? ( - LEFTARG = agtype, - RIGHTARG = text, - FUNCTION = ag_catalog.agtype_exists, - COMMUTATOR = '?', - RESTRICT = contsel, - JOIN = contjoinsel -); - -CREATE FUNCTION ag_catalog.agtype_exists_any(agtype, text[]) -RETURNS boolean -LANGUAGE c -IMMUTABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE OPERATOR ?| ( - LEFTARG = agtype, - RIGHTARG = text[], - FUNCTION = ag_catalog.agtype_exists_any, - RESTRICT = contsel, - JOIN = contjoinsel -); - -CREATE FUNCTION ag_catalog.agtype_exists_all(agtype, text[]) -RETURNS boolean -LANGUAGE c -IMMUTABLE -RETURNS NULL ON NULL INPUT -PARALLEL SAFE -AS 'MODULE_PATHNAME'; - -CREATE OPERATOR ?& ( - LEFTARG = agtype, - RIGHTARG = text[], - FUNCTION = ag_catalog.agtype_exists_all, - RESTRICT = contsel, - JOIN = contjoinsel -); - --- --- agtype GIN support --- -CREATE FUNCTION ag_catalog.gin_compare_agtype(text, text) -RETURNS int -AS 'MODULE_PATHNAME' -LANGUAGE C -IMMUTABLE -STRICT -PARALLEL SAFE; - -CREATE FUNCTION gin_extract_agtype(agtype, internal) -RETURNS internal -AS 'MODULE_PATHNAME' -LANGUAGE C -IMMUTABLE -STRICT -PARALLEL SAFE; - -CREATE FUNCTION ag_catalog.gin_extract_agtype_query(agtype, internal, int2, - internal, internal) -RETURNS internal -AS 'MODULE_PATHNAME' -LANGUAGE C -IMMUTABLE -STRICT -PARALLEL SAFE; - -CREATE FUNCTION ag_catalog.gin_consistent_agtype(internal, int2, agtype, int4, - internal, internal) -RETURNS bool -AS 'MODULE_PATHNAME' -LANGUAGE C -IMMUTABLE -STRICT -PARALLEL SAFE; - -CREATE FUNCTION ag_catalog.gin_triconsistent_agtype(internal, int2, agtype, int4, - internal, internal, internal) -RETURNS bool -AS 'MODULE_PATHNAME' -LANGUAGE C -IMMUTABLE -STRICT -PARALLEL SAFE; - -CREATE OPERATOR CLASS ag_catalog.gin_agtype_ops -DEFAULT FOR TYPE agtype USING gin AS - OPERATOR 7 @>, - OPERATOR 9 ?(agtype, text), - OPERATOR 10 ?|(agtype, text[]), - OPERATOR 11 ?&(agtype, text[]), - FUNCTION 1 ag_catalog.gin_compare_agtype(text,text), - FUNCTION 2 ag_catalog.gin_extract_agtype(agtype, internal), - FUNCTION 3 ag_catalog.gin_extract_agtype_query(agtype, internal, int2, - internal, internal), - FUNCTION 4 ag_catalog.gin_consistent_agtype(internal, int2, agtype, int4, - internal, internal), - FUNCTION 6 ag_catalog.gin_triconsistent_agtype(internal, int2, agtype, int4, - internal, internal, internal), -STORAGE text; - --- --- graph id conversion function --- -ALTER FUNCTION ag_catalog.agtype_access_operator(VARIADIC agtype[]) IMMUTABLE; - -DROP FUNCTION IF EXISTS ag_catalog._property_constraint_check(agtype, agtype); - --- --- end --- From 9e646e356192c693108488359f95c5db1caf93e6 Mon Sep 17 00:00:00 2001 From: Shoaib Date: Tue, 16 Aug 2022 15:09:56 +0200 Subject: [PATCH 09/18] upadated travis.yml --- .travis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 7dadc21a4..b8c9a5ac3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,13 +4,13 @@ dist: bionic arch: amd64 jobs: include: - - name: PostgreSQL 11 + - name: PostgreSQL 12 compiler: gcc addons: apt: packages: - - postgresql-11 - - postgresql-server-dev-11 + - postgresql-12 + - postgresql-server-dev-12 script: - sudo make install -j$(nproc) - make installcheck From e599abe6e4a87c99f5e6db82b006cfb864a8892c Mon Sep 17 00:00:00 2001 From: Shoaib Date: Fri, 19 Aug 2022 12:00:33 +0200 Subject: [PATCH 10/18] 1) regression test restored 2) oid caching code restored 3) whitespace removed --- regress/expected/age_load.out | 28 ++++++++++------------------ regress/expected/expr.out | 2 +- regress/sql/age_load.sql | 26 ++++++-------------------- src/backend/catalog/ag_label.c | 2 +- src/backend/executor/cypher_create.c | 4 ---- src/backend/executor/cypher_merge.c | 9 +-------- src/backend/parser/cypher_keywords.c | 1 - src/backend/utils/load/libcsv.c | 8 ++++---- 8 files changed, 23 insertions(+), 57 deletions(-) diff --git a/regress/expected/age_load.out b/regress/expected/age_load.out index 6c83d7b31..bae5924b3 100644 --- a/regress/expected/age_load.out +++ b/regress/expected/age_load.out @@ -1,21 +1,3 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ \! cp -r regress/age_load/data regress/instance/data/age_load LOAD 'age'; SET search_path TO ag_catalog; @@ -208,6 +190,16 @@ SELECT * FROM cypher('agload_test_graph', $$MATCH(n:Country2 {iso2 : 'AT'}) 1688849860263940 | "Austria" | "AT" (1 row) +SELECT * FROM cypher('agload_test_graph', $$ + MATCH (u:Country {region : "Europe"}) + WHERE u.name =~ 'Cro.*' + RETURN u.name, u.region +$$) AS (result_1 agtype, result_2 agtype); + result_1 | result_2 +-----------+---------- + "Croatia" | "Europe" +(1 row) + SELECT drop_graph('agload_test_graph', true); NOTICE: drop cascades to 7 other objects DETAIL: drop cascades to table agload_test_graph._ag_label_vertex diff --git a/regress/expected/expr.out b/regress/expected/expr.out index ba1124686..5142cb1c5 100644 --- a/regress/expected/expr.out +++ b/regress/expected/expr.out @@ -5303,7 +5303,7 @@ SELECT * FROM cypher('UCSC', $$ MATCH (u) RETURN stDev(u.gpa), stDevP(u.gpa) $$) AS (stDev agtype, stDevP agtype); stdev | stdevp -------------------+------------------- - 0.549566929066705 | 0.508800109100231 + 0.549566929066706 | 0.508800109100232 (1 row) -- should return 0 diff --git a/regress/sql/age_load.sql b/regress/sql/age_load.sql index e5a7db034..3516a170b 100644 --- a/regress/sql/age_load.sql +++ b/regress/sql/age_load.sql @@ -1,22 +1,3 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - \! cp -r regress/age_load/data regress/instance/data/age_load LOAD 'age'; @@ -48,7 +29,6 @@ SELECT COUNT(*) FROM cypher('agload_test_graph', $$MATCH(n) RETURN n$$) as (n ag SELECT COUNT(*) FROM cypher('agload_test_graph', $$MATCH (a)-[e]->(b) RETURN e$$) as (n agtype); - SELECT create_vlabel('agload_test_graph','Country2'); SELECT load_labels_from_file('agload_test_graph', 'Country2', 'age_load/countries.csv', false); @@ -73,4 +53,10 @@ SELECT * FROM cypher('agload_test_graph', $$MATCH(n:Country {iso2 : 'AT'}) SELECT * FROM cypher('agload_test_graph', $$MATCH(n:Country2 {iso2 : 'AT'}) RETURN id(n), n.name, n.iso2 $$) as ("id(n)" agtype, "n.name" agtype, "n.iso2" agtype); +SELECT * FROM cypher('agload_test_graph', $$ + MATCH (u:Country {region : "Europe"}) + WHERE u.name =~ 'Cro.*' + RETURN u.name, u.region +$$) AS (result_1 agtype, result_2 agtype); + SELECT drop_graph('agload_test_graph', true); diff --git a/src/backend/catalog/ag_label.c b/src/backend/catalog/ag_label.c index 385517813..5ef127247 100644 --- a/src/backend/catalog/ag_label.c +++ b/src/backend/catalog/ag_label.c @@ -304,7 +304,7 @@ List *get_all_edge_labels_per_graph(EState *estate, Oid graph_oid) table_endscan(scan_desc); - destroy_entity_result_rel_info(resultRelInfo); + destroy_entity_result_rel_info(resultRelInfo); table_close(resultRelInfo->ri_RelationDesc, RowExclusiveLock); return labels; diff --git a/src/backend/executor/cypher_create.c b/src/backend/executor/cypher_create.c index b4545e434..ca4f1777c 100644 --- a/src/backend/executor/cypher_create.c +++ b/src/backend/executor/cypher_create.c @@ -121,10 +121,6 @@ static void begin_cypher_create(CustomScanState *node, EState *estate, // Setup the relation's tuple slot cypher_node->elemTupleSlot = table_slot_create( rel, &estate->es_tupleTable); - //cypher_node->elemTupleSlot = ExecInitExtraTupleSlot( - // estate, - // RelationGetDescr(cypher_node->resultRelInfo->ri_RelationDesc), - // &TTSOpsHeapTuple); if (cypher_node->id_expr != NULL) { diff --git a/src/backend/executor/cypher_merge.c b/src/backend/executor/cypher_merge.c index 49e690cd0..fbfce8f1f 100644 --- a/src/backend/executor/cypher_merge.c +++ b/src/backend/executor/cypher_merge.c @@ -87,7 +87,7 @@ static void begin_cypher_merge(CustomScanState *node, EState *estate, ExecAssignExprContext(estate, &node->ss.ps); ExecInitScanTupleSlot(estate, &node->ss, - ExecGetResultType(node->ss.ps.lefttree), + ExecGetResultType(node->ss.ps.lefttree), &TTSOpsVirtual); /* @@ -462,7 +462,6 @@ static TupleTableSlot *exec_cypher_merge(CustomScanState *node) */ ExprContext *econtext = node->ss.ps.ps_ExprContext; SubqueryScanState *sss = (SubqueryScanState *)node->ss.ps.lefttree; - HeapTuple heap_tuple; /* * Our child execution node is always a subquery. If not there @@ -507,12 +506,6 @@ static TupleTableSlot *exec_cypher_merge(CustomScanState *node) */ mark_tts_isnull(econtext->ecxt_scantuple); - // create the physical heap tuple - heap_tuple = heap_form_tuple( - econtext->ecxt_scantuple->tts_tupleDescriptor, - econtext->ecxt_scantuple->tts_values, - econtext->ecxt_scantuple->tts_isnull); - // store the heap tuble ExecStoreVirtualTuple(econtext->ecxt_scantuple); diff --git a/src/backend/parser/cypher_keywords.c b/src/backend/parser/cypher_keywords.c index 91df69d26..cd4082260 100644 --- a/src/backend/parser/cypher_keywords.c +++ b/src/backend/parser/cypher_keywords.c @@ -88,7 +88,6 @@ Datum get_cypher_keywords(PG_FUNCTION_ARGS) HeapTuple tuple; // cast-away-const is ugly but alternatives aren't much better - //values[0] = (char *)cypher_keywords[func_ctx->call_cntr].name; values[0] = (char *) GetScanKeyword((int) func_ctx->call_cntr, &CypherKeyword); diff --git a/src/backend/utils/load/libcsv.c b/src/backend/utils/load/libcsv.c index 0db73fcb0..b94e4fadd 100644 --- a/src/backend/utils/load/libcsv.c +++ b/src/backend/utils/load/libcsv.c @@ -292,9 +292,9 @@ csv_increase_buffer(struct csv_parser *p) if (p == NULL) return 0; if (p->realloc_func == NULL) return 0; - /* Increase the size of the entry buffer. Attempt to increase size by + /* Increase the size of the entry buffer. Attempt to increase size by * p->blk_size, if this is larger than SIZE_MAX try to increase current - * buffer size to SIZE_MAX. If allocation fails, try to allocate halve + * buffer size to SIZE_MAX. If allocation fails, try to allocate halve * the size and try again until successful or increment size is zero. */ @@ -342,7 +342,7 @@ csv_parse(struct csv_parser *p, const void *s, size_t len, void (*cb1)(void *, s if (!p->entry_buf && pos < len) { /* Buffer hasn't been allocated yet and len > 0 */ - if (csv_increase_buffer(p) != 0) { + if (csv_increase_buffer(p) != 0) { p->quoted = quoted, p->pstate = pstate, p->spaces = spaces, p->entry_pos = entry_pos; return pos; } @@ -367,7 +367,7 @@ csv_parse(struct csv_parser *p, const void *s, size_t len, void (*cb1)(void *, s } else if (is_term ? is_term(c) : c == CSV_CR || c == CSV_LF) { /* Carriage Return or Line Feed */ if (pstate == FIELD_NOT_BEGUN) { SUBMIT_FIELD(p); - SUBMIT_ROW(p, c); + SUBMIT_ROW(p, c); } else { /* ROW_NOT_BEGUN */ /* Don't submit empty rows by default */ if (p->options & CSV_REPALL_NL) { From 00901916018d1a66dceba304d72d03ca22d51a45 Mon Sep 17 00:00:00 2001 From: Shoaib Date: Fri, 19 Aug 2022 16:43:30 +0200 Subject: [PATCH 11/18] Fixed Final Issues 1) regression test restored 2) oid caching code restored 3) whitespace removed 4) copyrights added --- tools/gen_keywordlist.pl | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tools/gen_keywordlist.pl b/tools/gen_keywordlist.pl index 1623c8678..499300433 100755 --- a/tools/gen_keywordlist.pl +++ b/tools/gen_keywordlist.pl @@ -1,3 +1,26 @@ +# +# For PostgreSQL Database Management System: +# (formerly known as Postgres, then as Postgres95) +# +# Portions Copyright (c) 1996-2010, The PostgreSQL Global Development Group +# +# Portions Copyright (c) 1994, The Regents of the University of California +# +# Permission to use, copy, modify, and distribute this software and its documentation for any purpose, +# without fee, and without a written agreement is hereby granted, provided that the above copyright notice +# and this paragraph and the following two paragraphs appear in all copies. +# +# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, +# INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, +# ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY +# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, +# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA +# HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. +# #---------------------------------------------------------------------- # # gen_keywordlist.pl @@ -28,6 +51,7 @@ # #---------------------------------------------------------------------- + use strict; use warnings; use Getopt::Long; From e80cb6a2474a0e78d24de783d8ff7f77beb982dc Mon Sep 17 00:00:00 2001 From: Shoaib Date: Fri, 19 Aug 2022 19:30:47 +0200 Subject: [PATCH 12/18] Fixed Final Issues 1) regression test restored 2) oid caching code restored 3) whitespace removed 4) copyrights added --- src/backend/executor/cypher_set.c | 4 ++++ src/backend/nodes/ag_nodes.c | 2 +- src/backend/parser/cypher_analyze.c | 2 +- tools/PerfectHash.pm | 23 +++++++++++++++++++++++ 4 files changed, 29 insertions(+), 2 deletions(-) diff --git a/src/backend/executor/cypher_set.c b/src/backend/executor/cypher_set.c index aef9180c5..73ce1fa7f 100644 --- a/src/backend/executor/cypher_set.c +++ b/src/backend/executor/cypher_set.c @@ -240,6 +240,10 @@ static bool check_path(agtype_value *path, graphid updated_id) return false; } +/* + * Construct a new agtype path with the entity with updated_id + * replacing all of its intances in path with updated_entity + */ static agtype_value *replace_entity_in_path(agtype_value *path, graphid updated_id, agtype *updated_entity) diff --git a/src/backend/nodes/ag_nodes.c b/src/backend/nodes/ag_nodes.c index fc60e3aa7..995c087ce 100644 --- a/src/backend/nodes/ag_nodes.c +++ b/src/backend/nodes/ag_nodes.c @@ -59,7 +59,7 @@ const char *node_names[] = { "cypher_update_item", "cypher_delete_information", "cypher_delete_item", - "cypher_merge_information", + "cypher_merge_information" }; /* diff --git a/src/backend/parser/cypher_analyze.c b/src/backend/parser/cypher_analyze.c index 8b96dc23f..9ffb86342 100644 --- a/src/backend/parser/cypher_analyze.c +++ b/src/backend/parser/cypher_analyze.c @@ -183,7 +183,7 @@ static bool convert_cypher_walker(Node *node, ParseState *pstate) /* recurse on query */ result = query_tree_walker(query, convert_cypher_walker, pstate, flags); - /* todo: I don't understand why wrote this. but, can't event this */ + /* check for EXPLAIN */ if (extra_node != NULL && nodeTag(extra_node) == T_ExplainStmt) { ExplainStmt *estmt = NULL; diff --git a/tools/PerfectHash.pm b/tools/PerfectHash.pm index 54f5d4e99..5a04f7e95 100644 --- a/tools/PerfectHash.pm +++ b/tools/PerfectHash.pm @@ -1,3 +1,26 @@ +# +# For PostgreSQL Database Management System: +# (formerly known as Postgres, then as Postgres95) +# +# Portions Copyright (c) 1996-2010, The PostgreSQL Global Development Group +# +# Portions Copyright (c) 1994, The Regents of the University of California +# +# Permission to use, copy, modify, and distribute this software and its documentation for any purpose, +# without fee, and without a written agreement is hereby granted, provided that the above copyright notice +# and this paragraph and the following two paragraphs appear in all copies. +# +# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, +# INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, +# ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY +# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, +# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA +# HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. +# #---------------------------------------------------------------------- # # PerfectHash.pm From 9749d0d2d70e30d3a1b1433e0de7ae56edae7d62 Mon Sep 17 00:00:00 2001 From: Shoaib Date: Mon, 22 Aug 2022 09:44:10 +0200 Subject: [PATCH 13/18] Fixed Final Issues 1) regression test restored 2) oid caching code restored 3) whitespace removed 4) copyrights added --- src/backend/utils/adt/agtype.c | 6 +++--- src/include/utils/agtype.h | 19 ++++++++----------- src/include/utils/graphid.h | 2 ++ 3 files changed, 13 insertions(+), 14 deletions(-) diff --git a/src/backend/utils/adt/agtype.c b/src/backend/utils/adt/agtype.c index 019dc98c7..c610aa7f6 100644 --- a/src/backend/utils/adt/agtype.c +++ b/src/backend/utils/adt/agtype.c @@ -193,8 +193,8 @@ Oid get_AGTYPEOID(void) { if (g_AGTYPEOID == InvalidOid) { - g_AGTYPEOID = GetSysCacheOid1(TYPENAMENSP, CStringGetDatum("agtype"), - ObjectIdGetDatum(ag_catalog_namespace_id())); + g_AGTYPEOID = GetSysCacheOid2(TYPENAMENSP, Anum_pg_type_oid, CStringGetDatum("agtype"), + ObjectIdGetDatum(ag_catalog_namespace_id())); } return g_AGTYPEOID; @@ -205,7 +205,7 @@ Oid get_AGTYPEARRAYOID(void) { if (g_AGTYPEARRAYOID == InvalidOid) { - g_AGTYPEARRAYOID = GetSysCacheOid1(TYPENAMENSP, + g_AGTYPEARRAYOID = GetSysCacheOid2(TYPENAMENSP,Anum_pg_type_oid, CStringGetDatum("_agtype"), ObjectIdGetDatum(ag_catalog_namespace_id())); } diff --git a/src/include/utils/agtype.h b/src/include/utils/agtype.h index d10ea2392..c427f3cfe 100644 --- a/src/include/utils/agtype.h +++ b/src/include/utils/agtype.h @@ -545,15 +545,12 @@ agtype_value *string_to_agtype_value(char *s); agtype_value *integer_to_agtype_value(int64 int_value); void add_agtype(Datum val, bool is_null, agtype_in_state *result, Oid val_type, bool key_scalar); -void clear_global_Oids_AGTYPE(void); -void clear_global_Oids_GRAPHID(void); - -// OID of agtype and _agtype -#define AGTYPEOID \ - (GetSysCacheOid2(TYPENAMENSP, Anum_pg_type_oid, CStringGetDatum("agtype"), \ - ObjectIdGetDatum(ag_catalog_namespace_id()))) -#define AGTYPEARRAYOID \ - (GetSysCacheOid2(TYPENAMENSP, Anum_pg_type_oid, \ - CStringGetDatum("_agtype"), \ - ObjectIdGetDatum(ag_catalog_namespace_id()))) + +/* Oid accessors for AGTYPE */ +Oid get_AGTYPEOID(void); +Oid get_AGTYPEARRAYOID(void); +void clear_global_Oids_AGTYPE(void); +#define AGTYPEOID get_AGTYPEOID() +#define AGTYPEARRAYOID get_AGTYPEARRAYOID() + #endif diff --git a/src/include/utils/graphid.h b/src/include/utils/graphid.h index 7c6e90d61..18244db77 100644 --- a/src/include/utils/graphid.h +++ b/src/include/utils/graphid.h @@ -65,6 +65,8 @@ typedef int64 graphid; graphid make_graphid(const int32 label_id, const int64 entry_id); int32 get_graphid_label_id(const graphid gid); int64 get_graphid_entry_id(const graphid gid); +Oid get_GRAPHIDOID(void); +Oid get_GRAPHIDARRAYOID(void); void clear_global_Oids_GRAPHID(void); #endif From afbb241e8930fe922b4fcbccdd9be0d7528b25b1 Mon Sep 17 00:00:00 2001 From: Shoaib Date: Mon, 22 Aug 2022 10:04:26 +0200 Subject: [PATCH 14/18] Fixed Final Issues 1) regression test restored 2) oid caching code restored 3) whitespace removed 4) copyrights added --- src/backend/utils/adt/agtype.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/backend/utils/adt/agtype.c b/src/backend/utils/adt/agtype.c index c610aa7f6..1c5ba86c6 100644 --- a/src/backend/utils/adt/agtype.c +++ b/src/backend/utils/adt/agtype.c @@ -193,7 +193,8 @@ Oid get_AGTYPEOID(void) { if (g_AGTYPEOID == InvalidOid) { - g_AGTYPEOID = GetSysCacheOid2(TYPENAMENSP, Anum_pg_type_oid, CStringGetDatum("agtype"), + g_AGTYPEOID = GetSysCacheOid2(TYPENAMENSP, Anum_pg_type_oid, + CStringGetDatum("agtype"), ObjectIdGetDatum(ag_catalog_namespace_id())); } From 69a8902eec977cfc00b07d1c050c3c3ee1e88c45 Mon Sep 17 00:00:00 2001 From: Shoaib Date: Mon, 22 Aug 2022 10:28:31 +0200 Subject: [PATCH 15/18] Fixed Final Issues 1) regression test restored 2) oid caching code restored 3) whitespace removed 4) copyrights added --- src/backend/catalog/ag_label.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/catalog/ag_label.c b/src/backend/catalog/ag_label.c index 5ef127247..f5572c8d3 100644 --- a/src/backend/catalog/ag_label.c +++ b/src/backend/catalog/ag_label.c @@ -116,7 +116,7 @@ void delete_label(Oid relation) (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("label (relation=%u) does not exist", relation))); } - + CatalogTupleDelete(ag_label, &tuple->t_self); systable_endscan(scan_desc); From 8966d25a33e565d839aed1f4e09e4416fd9ffcc2 Mon Sep 17 00:00:00 2001 From: Shoaib Date: Tue, 23 Aug 2022 10:00:49 +0200 Subject: [PATCH 16/18] Fixed Final Issues 1) regression test restored 2) oid caching code restored 3) whitespace removed 4) copyrights added --- src/backend/nodes/ag_nodes.c | 2 +- src/backend/utils/adt/graphid.c | 7 ++++--- src/include/utils/graphid.h | 8 +++----- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/src/backend/nodes/ag_nodes.c b/src/backend/nodes/ag_nodes.c index 995c087ce..63f65c938 100644 --- a/src/backend/nodes/ag_nodes.c +++ b/src/backend/nodes/ag_nodes.c @@ -121,7 +121,7 @@ const ExtensibleNodeMethods node_methods[] = { DEFINE_NODE_METHODS_EXTENDED(cypher_update_item), DEFINE_NODE_METHODS_EXTENDED(cypher_delete_information), DEFINE_NODE_METHODS_EXTENDED(cypher_delete_item), - DEFINE_NODE_METHODS_EXTENDED(cypher_merge_information), + DEFINE_NODE_METHODS_EXTENDED(cypher_merge_information) }; static bool equal_ag_node(const ExtensibleNode *a, const ExtensibleNode *b) diff --git a/src/backend/utils/adt/graphid.c b/src/backend/utils/adt/graphid.c index e46d9c432..a5846381b 100644 --- a/src/backend/utils/adt/graphid.c +++ b/src/backend/utils/adt/graphid.c @@ -36,8 +36,9 @@ Oid get_GRAPHIDOID(void) { if (g_GRAPHIDOID == InvalidOid) { - g_GRAPHIDOID = GetSysCacheOid1(TYPENAMENSP, CStringGetDatum("graphid"), - ObjectIdGetDatum(ag_catalog_namespace_id())); + g_GRAPHIDOID = GetSysCacheOid2(TYPENAMENSP, Anum_pg_type_oid, + CStringGetDatum("graphid"), + ObjectIdGetDatum(ag_catalog_namespace_id())); } return g_GRAPHIDOID; @@ -48,7 +49,7 @@ Oid get_GRAPHIDARRAYOID(void) { if (g_GRAPHIDARRAYOID == InvalidOid) { - g_GRAPHIDARRAYOID = GetSysCacheOid1(TYPENAMENSP, + g_GRAPHIDARRAYOID = GetSysCacheOid2(TYPENAMENSP, Anum_pg_type_oid, CStringGetDatum("_graphid"), ObjectIdGetDatum(ag_catalog_namespace_id())); } diff --git a/src/include/utils/graphid.h b/src/include/utils/graphid.h index 18244db77..999661cf2 100644 --- a/src/include/utils/graphid.h +++ b/src/include/utils/graphid.h @@ -53,11 +53,9 @@ typedef int64 graphid; #define AG_GETARG_GRAPHID(a) DATUM_GET_GRAPHID(PG_GETARG_DATUM(a)) #define AG_RETURN_GRAPHID(x) return GRAPHID_GET_DATUM(x) -// OID of graphid and _graphid -#define GRAPHIDOID \ - (GetSysCacheOid2(TYPENAMENSP, Anum_pg_type_oid, \ - CStringGetDatum("graphid"), \ - ObjectIdGetDatum(ag_catalog_namespace_id()))) +/* Oid accessors for GRAPHID */ +#define GRAPHIDOID get_GRAPHIDOID() +#define GRAPHIDARRAYOID get_GRAPHIDARRAYOID() #define GET_LABEL_ID(id) \ (((uint64)id) >> ENTRY_ID_BITS) From a43e72146892bd6bc5138d715a049678bd734d19 Mon Sep 17 00:00:00 2001 From: Shoaib Date: Tue, 23 Aug 2022 10:28:36 +0200 Subject: [PATCH 17/18] Fixed Final Issues 1) regression test restored 2) oid caching code restored 3) whitespace removed 4) copyrights added --- .gitignore | 2 +- Makefile | 2 +- src/backend/nodes/ag_nodes.c | 2 +- src/backend/nodes/cypher_readfuncs.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index d5dea3281..46a93606b 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,4 @@ build.sh .idea .deps -.DS_Store \ No newline at end of file +.DS_Store diff --git a/Makefile b/Makefile index 366ba7402..f8b1369b5 100644 --- a/Makefile +++ b/Makefile @@ -123,4 +123,4 @@ src/backend/parser/cypher_gram.c: BISONFLAGS += --defines=src/include/parser/cyp src/backend/parser/cypher_parser.o: src/backend/parser/cypher_gram.c src/backend/parser/cypher_keywords.o: src/backend/parser/cypher_gram.c -src/backend/parser/ag_scanner.c: FLEX_NO_BACKUP=yes \ No newline at end of file +src/backend/parser/ag_scanner.c: FLEX_NO_BACKUP=yes diff --git a/src/backend/nodes/ag_nodes.c b/src/backend/nodes/ag_nodes.c index 63f65c938..d65bb5038 100644 --- a/src/backend/nodes/ag_nodes.c +++ b/src/backend/nodes/ag_nodes.c @@ -23,9 +23,9 @@ #include "nodes/ag_nodes.h" #include "nodes/cypher_copyfuncs.h" -#include "nodes/cypher_nodes.h" #include "nodes/cypher_outfuncs.h" #include "nodes/cypher_readfuncs.h" +#include "nodes/cypher_nodes.h" static bool equal_ag_node(const ExtensibleNode *a, const ExtensibleNode *b); diff --git a/src/backend/nodes/cypher_readfuncs.c b/src/backend/nodes/cypher_readfuncs.c index 972e7211f..02e5afba5 100644 --- a/src/backend/nodes/cypher_readfuncs.c +++ b/src/backend/nodes/cypher_readfuncs.c @@ -21,8 +21,8 @@ #include "nodes/readfuncs.h" -#include "nodes/cypher_nodes.h" #include "nodes/cypher_readfuncs.h" +#include "nodes/cypher_nodes.h" /* * Copied From Postgres From b7a7a6d13b00a1dce05610549b6f13e94eba8b40 Mon Sep 17 00:00:00 2001 From: Shoaib Date: Tue, 23 Aug 2022 20:07:04 +0200 Subject: [PATCH 18/18] Fixed Final Issues 1) regression test restored 2) oid caching code restored 3) whitespace removed 4) copyrights added --- src/backend/utils/adt/graphid.c | 2 +- src/backend/utils/load/libcsv.c | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/backend/utils/adt/graphid.c b/src/backend/utils/adt/graphid.c index a5846381b..89bd8e528 100644 --- a/src/backend/utils/adt/graphid.c +++ b/src/backend/utils/adt/graphid.c @@ -38,7 +38,7 @@ Oid get_GRAPHIDOID(void) { g_GRAPHIDOID = GetSysCacheOid2(TYPENAMENSP, Anum_pg_type_oid, CStringGetDatum("graphid"), - ObjectIdGetDatum(ag_catalog_namespace_id())); + ObjectIdGetDatum(ag_catalog_namespace_id())); } return g_GRAPHIDOID; diff --git a/src/backend/utils/load/libcsv.c b/src/backend/utils/load/libcsv.c index b94e4fadd..c17304caa 100644 --- a/src/backend/utils/load/libcsv.c +++ b/src/backend/utils/load/libcsv.c @@ -245,7 +245,7 @@ csv_set_space_func(struct csv_parser *p, int (*f)(unsigned char)) /* Set the space function */ if (p) p->is_space = f; } - + void csv_set_term_func(struct csv_parser *p, int (*f)(unsigned char)) { @@ -259,7 +259,7 @@ csv_set_realloc_func(struct csv_parser *p, void *(*f)(void *, size_t)) /* Set the realloc function used to increase buffer size */ if (p && f) p->realloc_func = f; } - + void csv_set_free_func(struct csv_parser *p, void (*f)(void *)) { @@ -282,7 +282,7 @@ csv_get_buffer_size(const struct csv_parser *p) return p->entry_size; return 0; } - + static int csv_increase_buffer(struct csv_parser *p) { @@ -291,7 +291,7 @@ csv_increase_buffer(struct csv_parser *p) if (p == NULL) return 0; if (p->realloc_func == NULL) return 0; - + /* Increase the size of the entry buffer. Attempt to increase size by * p->blk_size, if this is larger than SIZE_MAX try to increase current * buffer size to SIZE_MAX. If allocation fails, try to allocate halve @@ -321,7 +321,7 @@ csv_increase_buffer(struct csv_parser *p) p->entry_size += to_add; return 0; } - + size_t csv_parse(struct csv_parser *p, const void *s, size_t len, void (*cb1)(void *, size_t, void *), void (*cb2)(int c, void *), void *data) {