diff --git a/configure b/configure index e612c658399..94a195b6d40 100755 --- a/configure +++ b/configure @@ -742,7 +742,6 @@ with_ldap with_krb_srvnam krb_srvtab with_gssapi -with_pythonsrc_ext with_python with_perl with_tcl @@ -909,7 +908,6 @@ with_tcl with_tclconfig with_perl with_python -with_pythonsrc_ext with_gssapi with_krb_srvnam with_pam @@ -1647,7 +1645,6 @@ Optional Packages: --with-tclconfig=DIR tclConfig.sh is in DIR --with-perl build Perl modules (PL/Perl) --without-python build Python modules (PL/Python) - --with-pythonsrc-ext build Python modules for gpMgmt --with-gssapi build with GSSAPI support --with-krb-srvnam=NAME default service principal name in Kerberos (GSSAPI) [postgres] @@ -9456,39 +9453,6 @@ fi $as_echo "$with_python" >&6; } -# -# Optionally build Python modules for gpMgmt -# -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build Python modules for gpMgmt" >&5 -$as_echo_n "checking whether to build Python modules for gpMgmt... " >&6; } - - - -# Check whether --with-pythonsrc-ext was given. -if test "${with_pythonsrc_ext+set}" = set; then : - withval=$with_pythonsrc_ext; - case $withval in - yes) - : - ;; - no) - : - ;; - *) - as_fn_error $? "no argument expected for --with-pythonsrc-ext option" "$LINENO" 5 - ;; - esac - -else - with_pythonsrc_ext=no - -fi - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_pythonsrc_ext" >&5 -$as_echo "$with_pythonsrc_ext" >&6; } - - # # GSSAPI diff --git a/configure.ac b/configure.ac index a1e3d9709db..ca68dfa89e9 100644 --- a/configure.ac +++ b/configure.ac @@ -1021,14 +1021,6 @@ PGAC_ARG_BOOL(with, python, yes, [build Python modules (PL/Python)]) AC_MSG_RESULT([$with_python]) AC_SUBST(with_python) -# -# Optionally build Python modules for gpMgmt -# -AC_MSG_CHECKING([whether to build Python modules for gpMgmt]) -PGAC_ARG_BOOL(with, pythonsrc-ext, no, [build Python modules for gpMgmt]) -AC_MSG_RESULT([$with_pythonsrc_ext]) -AC_SUBST(with_pythonsrc_ext) - # # GSSAPI diff --git a/deploy/build/README.Rhel-Rocky.bash b/deploy/build/README.Rhel-Rocky.bash index 8fdf57c781e..93aa1e68965 100755 --- a/deploy/build/README.Rhel-Rocky.bash +++ b/deploy/build/README.Rhel-Rocky.bash @@ -10,6 +10,7 @@ sudo yum --enablerepo=powertools install -y libyaml-devel sudo yum install -y postgresql sudo yum install -y postgresql-devel +sudo yum install -y python3-psycopg2 pip3.9 install -r ../../python-dependencies.txt diff --git a/deploy/build/README.Ubuntu.bash b/deploy/build/README.Ubuntu.bash index ec9ca3ee752..7a0086ada44 100755 --- a/deploy/build/README.Ubuntu.bash +++ b/deploy/build/README.Ubuntu.bash @@ -36,7 +36,6 @@ sudo apt-get install -y \ python3-dev \ python3-pip \ python3-psutil \ - python3-pygresql \ python3-yaml \ zlib1g-dev diff --git a/gpAux/Makefile b/gpAux/Makefile index 6502cffd0c4..0a63fd58fa6 100644 --- a/gpAux/Makefile +++ b/gpAux/Makefile @@ -142,7 +142,7 @@ endif ifneq (false, ${PG_LANG}) -CONFIGFLAGS+= --with-perl --with-python PYTHON=python3 +CONFIGFLAGS+= --with-perl --with-python PYTHON=python3.9 ifdef TCL_CFG CONFIGFLAGS+= --with-tcl-config=${TCL_CFG} endif @@ -218,8 +218,8 @@ perl_archlibexp:=$(shell perl -MConfig -e 'print $$Config{archlibexp}') # set default build steps define BUILD_STEPS @$(MAKE) -C $(BUILDDIR)/src/backend generated-headers - cd $(BUILDDIR) && PYGRESQL_LDFLAGS=' -Wl,-rpath,\$$$$ORIGIN/.. -Wl,--enable-new-dtags ' $(MAKE) $(PARALLEL_MAKE_OPTS) install - cd $(BUILDDIR)/src/pl/plpython && $(MAKE) clean && $(MAKE) $(PARALLEL_MAKE_OPTS) install && cd $(BUILDDIR) + cd $(BUILDDIR) && $(MAKE) $(PARALLEL_MAKE_OPTS) install + cd $(BUILDDIR)/src/pl/plpython && $(MAKE) clean && echo 'LDFLAGS += -Wl,-rpath,\$$$$ORIGIN/../../ext/python/lib/ -Wl,--enable-new-dtags' >> Makefile && $(MAKE) $(PARALLEL_MAKE_OPTS) install && cd $(BUILDDIR) cd $(BUILDDIR)/src/pl/plperl && $(MAKE) clean && echo "LDFLAGS += -Wl,-rpath,$(perl_archlibexp)/CORE -Wl,--enable-new-dtags" >> GNUmakefile && echo "LDFLAGS_SL += -Wl,-rpath,$(perl_archlibexp)/CORE -Wl,--enable-new-dtags" >> GNUmakefile && $(MAKE) $(PARALLEL_MAKE_OPTS) install && cd $(BUILDDIR) #@$(MAKE) greenplum_path INSTLOC=$(INSTLOC) #@$(MAKE) mgmtcopy INSTLOC=$(INSTLOC) @@ -254,7 +254,6 @@ define BUILD_STEPS cp -p $(GPMGMT)/bin/gpload $(INSTLOC)/bin/gpload cp -p $(GPMGMT)/bin/gpload.py $(INSTLOC)/bin/gpload.py $(MAKE) copylibs INSTLOC=$(INSTLOC) - cd $(GPMGMT)/bin && $(MAKE) pygresql INSTLOC=$(INSTLOC) $(MAKE) clients INSTLOC=$(INSTLOC) CLIENTSINSTLOC=$(CLIENTSINSTLOC) endef endif @@ -348,7 +347,6 @@ CLIENTS_HOME_DIR=$(BLD_HOME) endif CLIENTSINSTLOC=$(CLIENTS_HOME_DIR)/cloudberry-clients CLIENTSINSTLOC_BIN=$(CLIENTSINSTLOC)/bin -CLIENTSINSTLOC_BINEXT=$(CLIENTSINSTLOC)/bin/ext/ CLIENTSINSTLOC_EXT=$(CLIENTSINSTLOC)/ext CLIENTSINSTLOC_LIB=$(CLIENTSINSTLOC)/lib CLIENTSINSTLOC_LIB_PWARE=$(CLIENTSINSTLOC)/lib/pware @@ -372,15 +370,6 @@ define tmpCLIENTS_FILESET_BIN endef CLIENTS_FILESET_BIN = $(strip $(tmpCLIENTS_FILESET_BIN)) -# pg.py, pgdb.py, _pg.so are from pygresql which does not install into a single module -define tmpCLIENTS_FILESET_BINEXT - pg.py - pgdb.py - _pg*.so - yaml -endef -CLIENTS_FILESET_BINEXT = $(strip $(tmpCLIENTS_FILESET_BINEXT)) - BLD_PYTHON_FILESET=. BLD_OS:=$(shell uname -s) @@ -418,8 +407,6 @@ else # ---- copy GPDB fileset ---- mkdir -p $(CLIENTSINSTLOC_BIN) (cd $(INSTLOC)/bin/ && $(TAR) cf - $(CLIENTS_FILESET_BIN)) | (cd $(CLIENTSINSTLOC_BIN)/ && $(TAR) xpf -)$(check_pipe_for_errors) - mkdir -p $(CLIENTSINSTLOC_BINEXT) - (cd $(GPMGMT)/bin/ext/ && $(TAR) cf - $(CLIENTS_FILESET_BINEXT)) | (cd $(CLIENTSINSTLOC_BINEXT)/ && $(TAR) xpf -)$(check_pipe_for_errors) ifneq "$(PYTHONHOME)" "" mkdir -p $(CLIENTSINSTLOC_EXT)/python (cd $(PYTHONHOME) && $(TAR) cf - $(BLD_PYTHON_FILESET)) | (cd $(CLIENTSINSTLOC_EXT)/python/ && $(TAR) xpf -)$(check_pipe_for_errors) diff --git a/gpAux/client/install/src/windows/CreatePackage.bat b/gpAux/client/install/src/windows/CreatePackage.bat index b6a782bebda..6eb46cd4ebe 100644 --- a/gpAux/client/install/src/windows/CreatePackage.bat +++ b/gpAux/client/install/src/windows/CreatePackage.bat @@ -11,8 +11,6 @@ type nul > %GPDB_INSTALL_PATH%\bin\gppylib\__init__.py copy ..\..\..\..\..\gpMgmt\bin\gppylib\gpversion.py %GPDB_INSTALL_PATH%\bin\gppylib\ perl -pi.bak -e "s,\$Revision\$,%VERSION%," %GPDB_INSTALL_PATH%\bin\gpload.py copy ..\..\..\..\..\gpMgmt\bin\gpload.bat %GPDB_INSTALL_PATH%\bin -for %%f in (..\..\..\..\..\gpMgmt\bin\pythonSrc\ext\PyYAML-*.tar.gz) do tar -xf %%f -for /D %%d in (PyYAML-*) do copy %%d\lib\yaml\* %GPDB_INSTALL_PATH%\lib\python\yaml perl -p -e "s,__VERSION_PLACEHOLDER__,%VERSION%," greenplum-clients.wxs > greenplum-clients-%VERSION%.wxs candle.exe -nologo greenplum-clients-%VERSION%.wxs -out greenplum-clients-%VERSION%.wixobj -dSRCDIR=%GPDB_INSTALL_PATH% -dVERSION=%VERSION% light.exe -nologo -sval greenplum-clients-%VERSION%.wixobj -out greenplum-clients-x86_64.msi \ No newline at end of file diff --git a/gpAux/client/install/src/windows/greenplum-clients.wxs b/gpAux/client/install/src/windows/greenplum-clients.wxs index 0f17c299112..bdb679f555e 100755 --- a/gpAux/client/install/src/windows/greenplum-clients.wxs +++ b/gpAux/client/install/src/windows/greenplum-clients.wxs @@ -1139,35 +1139,6 @@ If you want to review or change any of your installation settings, click Back. C - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -1179,8 +1150,6 @@ If you want to review or change any of your installation settings, click Back. C - - diff --git a/gpMgmt/Makefile b/gpMgmt/Makefile index 372dae8ed46..499f21170db 100644 --- a/gpMgmt/Makefile +++ b/gpMgmt/Makefile @@ -18,17 +18,6 @@ install: generate_greenplum_path_file if [ -e bin/ext/__init__.py ]; then \ cp -rp bin/ext/__init__.py $(DESTDIR)$(prefix)/lib/python ; \ fi - if [ -e bin/ext/psutil ]; then \ - cp -rp bin/ext/psutil $(DESTDIR)$(prefix)/lib/python ; \ - fi - if [ -e bin/ext/pgdb.py ]; then \ - cp -rp bin/ext/pgdb.py $(DESTDIR)$(prefix)/lib/python && \ - cp -rp bin/ext/pg.py $(DESTDIR)$(prefix)/lib/python && \ - cp -rp bin/ext/_pg*.so $(DESTDIR)$(prefix)/lib/python ; \ - fi - if [ -e bin/ext/yaml ]; then \ - cp -rp bin/ext/yaml $(DESTDIR)$(prefix)/lib/python ; \ - fi clean distclean: $(MAKE) -C bin $@ diff --git a/gpMgmt/bin/Makefile b/gpMgmt/bin/Makefile index 3d9ab50ad79..91d17426b89 100644 --- a/gpMgmt/bin/Makefile +++ b/gpMgmt/bin/Makefile @@ -70,54 +70,12 @@ PYLIB_DIR=$(SRC)/ext core: python3 gpconfig_modules/parse_guc_metadata.py $(DESTDIR)$(prefix) -ifeq ($(with_pythonsrc_ext), yes) -install: installdirs installprograms core psutil pygresql pyyaml -else install: installdirs installprograms core -endif # # Python Libraries # -# -# PyGreSQL -# -PYGRESQL_VERSION=5.2 -PYGRESQL_DIR=PyGreSQL-$(PYGRESQL_VERSION) -pygresql: - @echo "--- PyGreSQL" - cd $(PYLIB_SRC_EXT)/ && $(TAR) xzf $(PYGRESQL_DIR).tar.gz - cd $(PYLIB_SRC_EXT)/$(PYGRESQL_DIR)/ && PATH=$(DESTDIR)$(bindir):$$PATH LDFLAGS='$(LDFLAGS) $(PYGRESQL_LDFLAGS)' python3 setup.py build - cp -r $(PYLIB_SRC_EXT)/$(PYGRESQL_DIR)/build/lib*-3*/* $(PYLIB_DIR)/ - - -# -# PSUTIL -# -PSUTIL_VERSION=5.7.0 -PSUTIL_DIR=psutil-$(PSUTIL_VERSION) - -psutil: - @echo "--- psutil" -ifeq "$(findstring $(BLD_ARCH),aix7_ppc_64 )" "" - cd $(PYLIB_SRC_EXT)/ && $(TAR) xzf $(PSUTIL_DIR).tar.gz - cd $(PYLIB_SRC_EXT)/$(PSUTIL_DIR)/ && env -u CC python3 setup.py build - cp -r $(PYLIB_SRC_EXT)/$(PSUTIL_DIR)/build/lib.*/psutil $(PYLIB_DIR) -endif - -# -# PYYAML -# -PYYAML_VERSION=5.3.1 -PYYAML_DIR=PyYAML-$(PYYAML_VERSION) - -pyyaml: - @echo "--- pyyaml" - cd $(PYLIB_SRC_EXT)/ && $(TAR) xzf $(PYYAML_DIR).tar.gz - cd $(PYLIB_SRC_EXT)/$(PYYAML_DIR)/ && env -u CC python3 setup.py build - cp -r $(PYLIB_SRC_EXT)/$(PYYAML_DIR)/build/lib*-3*/* $(PYLIB_DIR)/ - # # PYLINT # @@ -125,8 +83,6 @@ pyyaml: PYLINT_VERSION=0.21.0 PYLINT_DIR=pylint-$(PYLINT_VERSION) PYLINT_PYTHONPATH=$(PYLIB_DIR):$(PYLIB_SRC_EXT)/$(PYLINT_DIR)/build/lib/ -MOCK_VERSION=1.0.1 -MOCK_DIR=mock-$(MOCK_VERSION) SETUP_TOOLS_VERSION=36.6.0 PARSE_VERSION=1.8.2 SETUP_TOOLS_DIR=setuptools-$(SETUP_TOOLS_VERSION) @@ -135,8 +91,6 @@ PYTHONSRC_INSTALL=$(PYLIB_SRC_EXT)/install PYTHON_VERSION=$(shell python3 -c "import sys; print ('%s.%s' % (sys.version_info[0:2]))") PYTHONSRC_INSTALL_SITE=$(PYLIB_SRC_EXT)/install/lib/python$(PYTHON_VERSION)/site-packages PYTHONSRC_INSTALL_PYTHON_PATH=$(PYTHONPATH):$(PYTHONSRC_INSTALL_SITE) -# TODO: mock-1.0.1-py2.6.egg package should be updated. -MOCK_BIN=$(PYTHONSRC_INSTALL)/lib/python$(PYTHON_VERSION)/site-packages/mock-1.0.1-py2.6.egg UBUNTU_PLATFORM=$(shell if lsb_release -a 2>/dev/null | grep -q 'Ubuntu' ; then echo "Ubuntu"; fi) pylint: @@ -145,17 +99,6 @@ pylint: @cd $(PYLIB_SRC_EXT)/$(PYLINT_DIR)/ && python3 setup.py build 1> /dev/null @touch $(PYLIB_SRC_EXT)/$(PYLINT_DIR)/build/lib/__init__.py -$(MOCK_BIN): - @echo "--- mock for platform $(UBUNTU_PLATFORM)" - @if [ "$(UBUNTU_PLATFORM)" = "Ubuntu" ]; then\ - pip3 install mock;\ - else\ - mkdir -p $(PYTHONSRC_INSTALL_SITE) && \ - cd $(PYLIB_SRC_EXT)/ && $(TAR) xzf $(MOCK_DIR).tar.gz && \ - cd $(PYLIB_SRC_EXT)/$(MOCK_DIR)/ && \ - PYTHONPATH=$(PYTHONSRC_INSTALL_PYTHON_PATH) python3 setup.py install --prefix $(PYTHONSRC_INSTALL) ; \ - fi; - PYTHON_FILES=`grep -l --exclude=Makefile --exclude=gplogfilter "/bin/env python3" *`\ `grep -l "/bin/env python3" $(SRC)/../sbin/*`\ `find ./gppylib -name "*.py"`\ @@ -167,7 +110,7 @@ checkcode: pylint @echo -n "pylint_score=" > $(SRC)/../pylint_score.properties @grep "Your code has been rated at" $(SRC)/../pylint.txt | sed -e "s|Your .* \(.*\)/.*|\1|" >> $(SRC)/../pylint_score.properties -check: $(MOCK_BIN) +check: @echo "Running pure unit and also "unit" tests that require cluster to be up..." @TMPDIR=/tmp PYTHONPATH=$(SERVER_SRC):$(SERVER_SBIN):$(PYTHONPATH):$(PYTHONSRC_INSTALL_PYTHON_PATH):$(SRC)/ext:$(SBIN_DIR):$(LIB_DIR):$(PYLIB_DIR)/mock-1.0.1 \ gppylib/gpunit discover --verbose -s $(SRC)/gppylib -p "test_unit*.py" 2> $(SRC)/../gpMgmt_testunit_results.log 1> $(SRC)/../gpMgmt_testunit_output.log @@ -190,8 +133,6 @@ installcheck: installcheck-bash clean distclean: rm -rf $(PYLIB_SRC_EXT)/$(PYLINT_DIR) - rm -rf $(PYLIB_SRC_EXT)/$(PYGRESQL_DIR)/build - rm -rf $(PYLIB_SRC)/$(PYGRESQL_DIR)/build rm -rf *.pyc rm -f analyzedbc gpactivatestandbyc gpaddmirrorsc gpcheckcatc \ gpcheckperfc gpcheckresgroupimplc gpchecksubnetcfgc gpconfigc \ diff --git a/gpMgmt/bin/analyzedb b/gpMgmt/bin/analyzedb index cc51e265927..1cae7b6c55d 100755 --- a/gpMgmt/bin/analyzedb +++ b/gpMgmt/bin/analyzedb @@ -25,16 +25,15 @@ from contextlib import closing import pipes # for shell-quoting, pipes.quote() import fcntl import itertools - +import psycopg2 try: - import pg - from gppylib import gplog, pgconf, userinput from gppylib.commands.base import Command, WorkerPool, Worker from gppylib.operations import Operation from gppylib.gpversion import GpVersion from gppylib.db import dbconn from gppylib.operations.unix import CheckDir, CheckFile, MakeDir + from gppylib.utils import escape_string except ImportError as e: sys.exit('Cannot import modules. Please check that you have sourced greenplum_path.sh. Detail: ' + str(e)) @@ -172,7 +171,7 @@ def validate_schema_exists(pg_port, dbname, schema): try: dburl = dbconn.DbURL(port=pg_port, dbname=dbname) conn = dbconn.connect(dburl) - count = dbconn.querySingleton(conn, "select count(*) from pg_namespace where nspname='%s';" % pg.escape_string(schema)) + count = dbconn.querySingleton(conn, "select count(*) from pg_namespace where nspname='%s';" % escape_string(schema)) if count == 0: raise ExceptionNoStackTraceNeeded("Schema %s does not exist in database %s." % (schema, dbname)) finally: @@ -219,7 +218,7 @@ def get_partition_state_tuples(pg_port, dbname, catalog_schema, partition_info): try: modcount_sql = "select to_char(coalesce(sum(modcount::bigint), 0), '999999999999999999999') from gp_dist_random('%s.%s')" % (catalog_schema, tupletable) modcount = dbconn.querySingleton(conn, modcount_sql) - except pg.DatabaseError as e: + except psycopg2.DatabaseError as e: if "does not exist" in str(e): logger.info("Table %s.%s (%s) no longer exists and will not be analyzed", schemaname, partition_name, tupletable) else: @@ -982,7 +981,7 @@ def get_oid_str(table_list): def regclass_schema_tbl(schema, tbl): schema_tbl = "%s.%s" % (escape_identifier(schema), escape_identifier(tbl)) - return "to_regclass('%s')" % (pg.escape_string(schema_tbl)) + return "to_regclass('%s')" % (escape_string(schema_tbl)) # Escape double-quotes in a string, so that the resulting string is suitable for @@ -1250,7 +1249,7 @@ def validate_tables(conn, tablenames): while curr_batch < nbatches: batch = tablenames[curr_batch * batch_size:(curr_batch + 1) * batch_size] - oid_str = ','.join(map((lambda x: "('%s')" % pg.escape_string(x)), batch)) + oid_str = ','.join(map((lambda x: "('%s')" % escape_string(x)), batch)) if not oid_str: break @@ -1266,7 +1265,7 @@ def get_include_cols_from_exclude(conn, schema, table, exclude_cols): """ Given a list of excluded columns of a table, get the list of included columns """ - quoted_exclude_cols = ','.join(["'%s'" % pg.escape_string(x) for x in exclude_cols]) + quoted_exclude_cols = ','.join(["'%s'" % escape_string(x) for x in exclude_cols]) oid_str = regclass_schema_tbl(schema, table) cols = run_sql(conn, GET_INCLUDED_COLUMNS_FROM_EXCLUDE_SQL % (oid_str, quoted_exclude_cols)) @@ -1282,7 +1281,7 @@ def validate_columns(conn, schema, table, column_list): return sql = VALIDATE_COLUMN_NAMES_SQL % (regclass_schema_tbl(schema, table), - ','.join(["'%s'" % pg.escape_string(x) for x in column_list])) + ','.join(["'%s'" % escape_string(x) for x in column_list])) valid_col_count = dbconn.querySingleton(conn, sql) if int(valid_col_count) != len(column_list): diff --git a/gpMgmt/bin/gpactivatestandby b/gpMgmt/bin/gpactivatestandby index 4c2069f0970..914d20120d0 100755 --- a/gpMgmt/bin/gpactivatestandby +++ b/gpMgmt/bin/gpactivatestandby @@ -21,10 +21,9 @@ import time import shutil import tempfile from datetime import datetime, timedelta - +import psycopg2 # import GPDB modules try: - import pg as pygresql from gppylib.commands import unix, gp, pg from gppylib.db import dbconn from gppylib.gpparseopts import OptParser, OptChecker, OptionGroup, SUPPRESS_HELP @@ -341,7 +340,7 @@ def promote_standby(coordinator_data_dir): dbconn.execSQL(conn, 'CHECKPOINT') conn.close() return True - except pygresql.InternalError as e: + except (psycopg2.InternalError, psycopg2.OperationalError) as e: pass time.sleep(1) diff --git a/gpMgmt/bin/gpcheckcat b/gpMgmt/bin/gpcheckcat index 00c2e4b21f3..26c0e7e30d5 100755 --- a/gpMgmt/bin/gpcheckcat +++ b/gpMgmt/bin/gpcheckcat @@ -28,23 +28,20 @@ import re import sys import time from functools import reduce - +import psycopg2 +from psycopg2 import extras +from contextlib import closing try: from gppylib import gplog - from gppylib.db import dbconn from gppylib.gpcatalog import * from gppylib.commands.unix import * from gppylib.commands.gp import conflict_with_gpexpand from gppylib.system.info import * - from pgdb import DatabaseError from gpcheckcat_modules.unique_index_violation_check import UniqueIndexViolationCheck from gpcheckcat_modules.leaked_schema_dropper import LeakedSchemaDropper from gpcheckcat_modules.repair import Repair from gpcheckcat_modules.foreign_key_check import ForeignKeyCheck from gpcheckcat_modules.orphaned_toast_tables_check import OrphanedToastTablesCheck - - import pg - except ImportError as e: sys.exit('Error: unable to import module: ' + str(e)) @@ -138,7 +135,7 @@ class Global(): self.dbname = None self.firstdb = None self.alldb = [] - self.db = {} + self.conn = {} self.tmpdir = None self.reset_stmt_queues() @@ -207,30 +204,27 @@ def usage(exitarg=None): ############################### def getversion(): - db = connect() - curs = db.query(''' - select regexp_replace(version(), - E'.*PostgreSQL [^ ]+ .Apache Cloudberry ([1-9]+.[0-9]+|main).*', - E'\\\\1') as ver;''') - - row = curs.getresult()[0] - version = row[0] - - logger.debug('got version %s' % version) - return version - + with closing(connect()) as conn: + with conn.cursor() as curs: + curs.execute(''' + select regexp_replace(version(), + E'.*PostgreSQL [^ ]+ .Greenplum Database ([1-9]+.[0-9]+|main).*', + E'\\\\1') as ver;''') + row = curs.fetchone() + version = row[0] + logger.debug('got version %s' % version) + return version ############################### def getalldbs(): """ get all connectable databases """ - db = connect() - curs = db.query(''' - select datname from pg_database where datallowconn order by datname ''') - row = curs.getresult() - return row - + with closing(connect()) as conn: + with conn.cursor() as curs: + curs.execute('''select datname from pg_database where datallowconn order by datname''') + row = curs.fetchall() + return row ############################### def parseCommandLine(): @@ -343,19 +337,21 @@ def connect(user=None, password=None, host=None, port=None, try: logger.debug('connecting to %s:%s %s' % (host, port, database)) - db = pg.connect(host=host, port=port, user=user, - passwd=password, dbname=database, opt=options) - - except pg.InternalError as ex: + conn = psycopg2.connect(host=host, port=port, user=user, + password=password, dbname=database, options=options) + ## Don't execute query in a transaction block. + conn.set_session(autocommit=True) + except (psycopg2.InternalError, psycopg2.OperationalError) as ex: logger.fatal('could not connect to %s: "%s"' % (database, str(ex).strip())) exit(1) logger.debug('connected with %s:%s %s' % (host, port, database)) - return db + return conn -############# +# NOTE: We cannot use connect2() with contextmanager, since we manage the connection +# ourselves. def connect2(cfgrec, user=None, password=None, database=None, utilityMode=True): host = cfgrec['address'] port = cfgrec['port'] @@ -367,22 +363,22 @@ def connect2(cfgrec, user=None, password=None, database=None, utilityMode=True): key = "%s.%s.%s.%s.%s.%s.%s" % (host, port, datadir, user, password, database, str(utilityMode)) - conns = GV.db.get(key) + conns = GV.conn.get(key) if conns: return conns[0] conn = connect(host=host, port=port, user=user, password=password, database=database, utilityMode=utilityMode) if conn: - GV.db[key] = [conn, cfgrec] + GV.conn[key] = [conn, cfgrec] return conn class execThread(Thread): - def __init__(self, cfg, db, qry): + def __init__(self, cfg, conn, qry): self.cfg = cfg - self.db = db + self.conn = conn self.qry = qry self.curs = None self.error = None @@ -390,11 +386,11 @@ class execThread(Thread): def run(self): try: - self.curs = self.db.query(self.qry) + self.curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) + self.curs.execute(self.qry) except BaseException as e: self.error = e - def processThread(threads): batch = [] for th in threads: @@ -423,9 +419,9 @@ def connect2run(qry, col=None): # parallelise queries for dbid in GV.cfg: c = GV.cfg[dbid] - db = connect2(c) + conn = connect2(c) - thread = execThread(c, db, qry) + thread = execThread(c, conn, qry) thread.start() logger.debug('launching query thread %s for dbid %i' % (thread.name, dbid)) @@ -445,8 +441,8 @@ def connect2run(qry, col=None): err = [] for [cfg, curs] in batch: if col is None: - col = curs.listfields() - for row in curs.dictresult(): + col = [desc[0] for desc in curs.description] + for row in curs.fetchall(): err.append([cfg, col, row]) return err @@ -464,7 +460,6 @@ def formatErr(c, col, row): ############# def getGPConfiguration(): cfg = {} - db = connect() # note that in 4.0, sql commands cannot be run against the segment mirrors directly # so we filter out non-primary segment databases in the query qry = ''' @@ -474,13 +469,14 @@ def getGPConfiguration(): FROM gp_segment_configuration WHERE (role = 'p' or content < 0 ) ''' - curs = db.query(qry) - for row in curs.dictresult(): - if row['content'] == -1 and not row['isprimary']: - continue # skip standby coordinator - cfg[row['dbid']] = row - db.close() - return cfg + with closing(connect()) as conn: + with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as curs: + curs.execute(qry) + for row in curs.fetchall(): + if row['content'] == -1 and not row['isprimary']: + continue # skip standby coordinator + cfg[row['dbid']] = row + return cfg def checkDistribPolicy(): logger.info('-----------------------------------') @@ -494,27 +490,27 @@ def checkDistribPolicy(): where pk.contype in('p', 'u') and d.policytype = 'p' and d.distkey = '' ''' - db = connect2(GV.cfg[GV.coordinator_dbid]) try: - curs = db.query(qry) - err = [] - for row in curs.dictresult(): - err.append([GV.cfg[GV.coordinator_dbid], ('nspname', 'relname', 'constraint'), row]) - - if not err: - logger.info('[OK] randomly distributed tables') - else: - GV.checkStatus = False - setError(ERROR_REMOVE) - logger.info('[FAIL] randomly distributed tables') - logger.error('pg_constraint has %d issue(s)' % len(err)) - logger.error(qry) - for e in err: - logger.error(formatErr(e[0], e[1], e[2])) - for e in err: - cons = e[2] - removeIndexConstraint(cons['nspname'], cons['relname'], - cons['constraint']) + conn = connect2(GV.cfg[GV.coordinator_dbid]) + with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as curs: + curs.execute(qry) + err = [] + for row in curs.fetchall(): + err.append([GV.cfg[GV.coordinator_dbid], ('nspname', 'relname', 'constraint'), row]) + + if not err: + logger.info('[OK] randomly distributed tables') + else: + GV.checkStatus = False + setError(ERROR_REMOVE) + logger.info('[FAIL] randomly distributed tables') + logger.error('pg_constraint has %d issue(s)' % len(err)) + logger.error(qry) + for e in err: + logger.error(formatErr(e[0], e[1], e[2])) + cons = e[2] + removeIndexConstraint(cons['nspname'], cons['relname'], + cons['constraint']) except Exception as e: setError(ERROR_NOREPAIR) myprint('[ERROR] executing test: checkDistribPolicy') @@ -536,22 +532,23 @@ def checkDistribPolicy(): and not d.distkey::int2[] operator(pg_catalog.<@) pk.conkey ''' try: - curs = db.query(qry) - - err = [] - for row in curs.dictresult(): - err.append([GV.cfg[GV.coordinator_dbid], ('nspname', 'relname', 'constraint'), row]) - - if not err: - logger.info('[OK] unique constraints') - else: - GV.checkStatus = False - setError(ERROR_REMOVE) - logger.info('[FAIL] unique constraints') - logger.error('pg_constraint has %d issue(s)' % len(err)) - logger.error(qry) - for e in err: logger.error(formatErr(e[0], e[1], e[2])) + conn = connect2(GV.cfg[GV.coordinator_dbid]) + with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as curs: + err = [] + curs.execute(qry) + for row in curs.fetchall(): + err.append([GV.cfg[GV.coordinator_dbid], ('nspname', 'relname', 'constraint'), row]) + + if not err: + logger.info('[OK] unique constraints') + else: + GV.checkStatus = False + setError(ERROR_REMOVE) + logger.info('[FAIL] unique constraints') + logger.error('pg_constraint has %d issue(s)' % len(err)) + logger.error(qry) for e in err: + logger.error(formatErr(e[0], e[1], e[2])) cons = e[2] removeIndexConstraint(cons['nspname'], cons['relname'], cons['constraint']) @@ -567,7 +564,6 @@ def checkPartitionIntegrity(): logger.info('-----------------------------------') logger.info('Checking pg_partition ...') err = [] - db = connect() # Check for the numsegments value of parent and child partition from the gp_distribution_policy table qry = ''' @@ -579,53 +575,55 @@ def checkPartitionIntegrity(): and not (inhrelid in (select ftrelid from pg_catalog.pg_foreign_table) and child.numsegments = NULL); ''' try: - curs = db.query(qry) - cols = ('inhparent', 'inhrelid', 'numsegments_parent', 'numsegments_child') - col_names = { - 'inhparent': 'table', - 'inhrelid': 'affected child', - 'numsegments_parent': 'parent numsegments value', - 'numsegments_child': 'child numsegments value', - } - - err = [] - for row in curs.dictresult(): - err.append([GV.cfg[GV.coordinator_dbid], cols, row]) - - if not err: - logger.info('[OK] partition numsegments check') - else: - err_count = len(err) - GV.checkStatus = False - setError(ERROR_REMOVE) - logger.info('[FAIL] partition numsegments check') - logger.error('partition numsegments check found %d issue(s)' % err_count) - if err_count > 100: - logger.error(qry) + with closing(connect()) as conn: + with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as curs: + curs.execute(qry) + cols = ('inhparent', 'inhrelid', 'numsegments_parent', 'numsegments_child') + col_names = { + 'inhparent': 'table', + 'inhrelid': 'affected child', + 'numsegments_parent': 'parent numsegments value', + 'numsegments_child': 'child numsegments value', + } + + err = [] + for row in curs.fetchall(): + err.append([GV.cfg[GV.coordinator_dbid], cols, row]) + + if not err: + logger.info('[OK] partition numsegments check') + else: + err_count = len(err) + GV.checkStatus = False + setError(ERROR_REMOVE) + logger.info('[FAIL] partition numsegments check') + logger.error('partition numsegments check found %d issue(s)' % err_count) + if err_count > 100: + logger.error(qry) - myprint( - '[ERROR]: child partition(s) have different numsegments value ' - 'from the root partition. Check the gpcheckcat log for details.' - ) - logger.error('The following tables have different numsegments value (showing at most 100 rows):') + myprint( + '[ERROR]: child partition(s) have different numsegments value ' + 'from the root partition. Check the gpcheckcat log for details.' + ) + logger.error('The following tables have different numsegments value (showing at most 100 rows):') - # report at most 100 rows, for brevity - err = err[:100] + # report at most 100 rows, for brevity + err = err[:100] - for index, e in enumerate(err): - cfg = e[0] - col = e[1] - row = e[2] + for index, e in enumerate(err): + cfg = e[0] + col = e[1] + row = e[2] - if index == 0: - logger.error("--------") - logger.error(" " + " | ".join(map(col_names.get, col))) - logger.error(" " + "-+-".join(['-' * len(col_names[x]) for x in col])) + if index == 0: + logger.error("--------") + logger.error(" " + " | ".join(map(col_names.get, col))) + logger.error(" " + "-+-".join(['-' * len(col_names[x]) for x in col])) - logger.error(" " + " | ".join([str(row[x]) for x in col])) + logger.error(" " + " | ".join([str(row[x]) for x in col])) - if err_count > 100: - logger.error(" ...") + if err_count > 100: + logger.error(" ...") except Exception as e: setError(ERROR_NOREPAIR) @@ -648,74 +646,74 @@ def checkPartitionIntegrity(): and (select isleaf from pg_partition_tree(inhparent) where relid = inhrelid)); ''' try: - curs = db.query(qry) - cols = ('inhparent', 'inhrelid', 'dby_parent', 'dby_child') - col_names = { - 'inhparent': 'table', - 'inhrelid': 'affected child', - 'dby_parent': 'table distribution key', - 'dby_child': 'child distribution key', - } - - err = [] - for row in curs.dictresult(): - err.append([GV.cfg[GV.coordinator_dbid], cols, row]) - - if not err: - logger.info('[OK] partition distribution policy check') - else: - GV.checkStatus = False - setError(ERROR_REMOVE) - logger.info('[FAIL] partition distribution policy check') - logger.error('partition distribution policy check found %d issue(s)' % len(err)) - if len(err) > 100: - logger.error(qry) - - myprint( - '[ERROR]: child partition(s) are distributed differently from ' - 'the root partition, and must be manually redistributed, for ' - 'some tables. Check the gpcheckcat log for details.' - ) - logger.error('The following tables must be manually redistributed:') - - count = 0 - for e in err: - cfg = e[0] - col = e[1] - row = e[2] - - # TODO: generate a repair script for this row. This is - # difficult, since we can't redistribute child partitions - # directly. - - # report at most 100 rows, for brevity - if count == 100: - logger.error("...") - count += 1 - if count > 100: - continue - - if count == 0: - logger.error("--------") - logger.error(" " + " | ".join(map(col_names.get, col))) - logger.error(" " + "-+-".join(['-' * len(col_names[x]) for x in col])) - - logger.error(" " + " | ".join([str(row[x]) for x in col])) - count += 1 - - logger.error( - 'Execute an ALTER TABLE ... SET DISTRIBUTED BY statement, with ' - 'the desired distribution key, on the partition root for each ' - 'affected table.' - ) + with closing(connect()) as conn: + with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as curs: + curs.execute(qry) + cols = ('inhparent', 'inhrelid', 'dby_parent', 'dby_child') + col_names = { + 'inhparent': 'table', + 'inhrelid': 'affected child', + 'dby_parent': 'table distribution key', + 'dby_child': 'child distribution key', + } + + err = [] + for row in curs.fetchall(): + err.append([GV.cfg[GV.coordinator_dbid], cols, row]) + + if not err: + logger.info('[OK] partition distribution policy check') + else: + GV.checkStatus = False + setError(ERROR_REMOVE) + logger.info('[FAIL] partition distribution policy check') + logger.error('partition distribution policy check found %d issue(s)' % len(err)) + if len(err) > 100: + logger.error(qry) + + myprint( + '[ERROR]: child partition(s) are distributed differently from ' + 'the root partition, and must be manually redistributed, for ' + 'some tables. Check the gpcheckcat log for details.' + ) + logger.error('The following tables must be manually redistributed:') + + count = 0 + for e in err: + cfg = e[0] + col = e[1] + row = e[2] + + # TODO: generate a repair script for this row. This is + # difficult, since we can't redistribute child partitions + # directly. + + # report at most 100 rows, for brevity + if count == 100: + logger.error("...") + count += 1 + if count > 100: + continue + + if count == 0: + logger.error("--------") + logger.error(" " + " | ".join(map(col_names.get, col))) + logger.error(" " + "-+-".join(['-' * len(col_names[x]) for x in col])) + + logger.error(" " + " | ".join([str(row[x]) for x in col])) + count += 1 + + logger.error( + 'Execute an ALTER TABLE ... SET DISTRIBUTED BY statement, with ' + 'the desired distribution key, on the partition root for each ' + 'affected table.' + ) except Exception as e: setError(ERROR_NOREPAIR) myprint('[ERROR] executing test: checkPartitionIntegrity') myprint(' Execution error: ' + str(e)) - db.close() - checkPoliciesRepair() ############# @@ -782,7 +780,7 @@ Produce repair scripts to remove dangling entries of gp_fastsequence: ''' -def removeFastSequence(db): +def removeFastSequence(conn): ''' MPP-14758: gp_fastsequence does not get cleanup after a failed transaction (AO/CO) Note: this is slightly different from the normal foreign key check @@ -808,14 +806,15 @@ def removeFastSequence(db): ON r.gp_segment_id = cfg.content WHERE cfg.role = 'p'; """ - curs = db.query(qry) - for row in curs.dictresult(): - seg = row['dbid'] # dbid of targeted segment - name = 'gp_fastsequence tuple' # for comment purposes - table = 'gp_fastsequence' # table name - cols = {'objid': row['objid']} # column name and value - objname = 'gp_fastsequence' # for comment purposes - buildRemove(seg, name, table, cols, objname) + with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as curs: + curs.execute(qry) + for row in curs.fetchall(): + seg = row['dbid'] # dbid of targeted segment + name = 'gp_fastsequence tuple' # for comment purposes + table = 'gp_fastsequence' # table name + cols = {'objid': row['objid']} # column name and value + objname = 'gp_fastsequence' # for comment purposes + buildRemove(seg, name, table, cols, objname) except Exception as e: logger.error('removeFastSequence: ' + str(e)) @@ -877,42 +876,41 @@ def drop_leaked_schemas(leaked_schema_dropper, dbname): logger.info('-----------------------------------') logger.info('Checking for leaked temporary schemas') - db_connection = connect(database=dbname) try: - dropped_schemas = leaked_schema_dropper.drop_leaked_schemas(db_connection) - if not dropped_schemas: - logger.info('[OK] temporary schemas') - else: - logger.info('[FAIL] temporary schemas') - myprint("Found and dropped %d unbound temporary schemas" % len(dropped_schemas)) - logger.error('Dropped leaked schemas \'%s\' in the database \'%s\'' % (dropped_schemas, dbname)) + with closing(connect(database=dbname)) as conn: + dropped_schemas = leaked_schema_dropper.drop_leaked_schemas(conn) + if not dropped_schemas: + logger.info('[OK] temporary schemas') + else: + logger.info('[FAIL] temporary schemas') + myprint("Found and dropped %d unbound temporary schemas" % len(dropped_schemas)) + logger.error('Dropped leaked schemas \'%s\' in the database \'%s\'' % (dropped_schemas, dbname)) except Exception as e: setError(ERROR_NOREPAIR) myprint(' Execution error: ' + str(e)) - finally: - db_connection.close() def checkDepend(): # Check for dependencies on non-existent objects logger.info('-----------------------------------') logger.info('Checking Object Dependencies') - db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) + conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) + with conn.cursor() as curs: - # Catalogs that link up to pg_depend/pg_shdepend - qry = """ - select relname from pg_class c - where relkind='r' - and relnamespace=%d - and exists (select 1 from pg_attribute a where attname = 'oid' and a.attrelid = c.oid) - """ % PG_CATALOG_OID - curs = db.query(qry) - catalogs = [] - for row in curs.getresult(): - catalogs.append(row[0]) - - checkDependJoinCatalog(catalogs) - checkCatalogJoinDepend(catalogs) + # Catalogs that link up to pg_depend/pg_shdepend + qry = """ + select relname from pg_class c + where relkind='r' + and relnamespace=%d + and exists (select 1 from pg_attribute a where attname = 'oid' and a.attrelid = c.oid) + """ % PG_CATALOG_OID + curs.execute(qry) + catalogs = [] + for row in curs.fetchall(): + catalogs.append(row[0]) + + checkDependJoinCatalog(catalogs) + checkCatalogJoinDepend(catalogs) def checkDependJoinCatalog(catalogs): # Construct subquery that will verify that all (classid, objid) @@ -1069,7 +1067,6 @@ def checkOwners(): # # - Between 3.3 and 4.0 the ao segment columns migrated from pg_class # to pg_appendonly. - db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) qry = ''' select distinct n.nspname, coalesce(o.relname, c.relname) as relname, a.rolname, m.rolname as coordinator_rolname @@ -1086,20 +1083,21 @@ def checkOwners(): where c.relowner <> r.relowner ''' try: - curs = db.query(qry) - - rows = [] - for row in curs.dictresult(): - rows.append(row) - - if len(rows) == 0: - logger.info('[OK] table ownership') - else: - GV.checkStatus = False - setError(ERROR_REMOVE) - logger.info('[FAIL] table ownership') - logger.error('found %d table ownership issue(s)' % len(rows)) - logger.error('%s' % qry) + conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) + with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as curs: + curs.execute(qry) + rows = [] + for row in curs.fetchall(): + rows.append(row) + + if len(rows) == 0: + logger.info('[OK] table ownership') + else: + GV.checkStatus = False + setError(ERROR_REMOVE) + logger.info('[FAIL] table ownership') + logger.error('found %d table ownership issue(s)' % len(rows)) + logger.error('%s' % qry) for row in rows[0:100]: logger.error(' %s.%s relowner %s != %s' % (row['nspname'], row['relname'], row['rolname'], @@ -1123,7 +1121,6 @@ def checkOwners(): # - Ignore implementation types of pg_class entries - they should be # in the check above since ALTER TABLE is required to fix them, not # ALTER TYPE. - db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) qry = ''' select distinct n.nspname, t.typname, a.rolname, m.rolname as coordinator_rolname from gp_dist_random('pg_type') r @@ -1134,27 +1131,28 @@ def checkOwners(): where r.typowner <> t.typowner ''' try: - curs = db.query(qry) + conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) + with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as curs: + curs.execute(qry) - rows = [] - for row in curs.dictresult(): - rows.append(row) + rows = [] + for row in curs.fetchall(): + rows.append(row) - if len(rows) == 0: - logger.info('[OK] type ownership') - else: - GV.checkStatus = False - setError(ERROR_NOREPAIR) - logger.info('[FAIL] type ownership') - logger.error('found %d type ownership issue(s)' % len(rows)) - logger.error('%s' % qry) + if len(rows) == 0: + logger.info('[OK] type ownership') + else: + GV.checkStatus = False + setError(ERROR_NOREPAIR) + logger.info('[FAIL] type ownership') + logger.error('found %d type ownership issue(s)' % len(rows)) + logger.error('%s' % qry) for row in rows[0:100]: logger.error(' %s.%s typeowner %s != %s' % (row['nspname'], row['typname'], row['rolname'], row['coordinator_rolname'])) if len(rows) > 100: logger.error("...") - except Exception as e: setError(ERROR_NOREPAIR) myprint("[ERROR] executing test: check type ownership") @@ -1178,15 +1176,14 @@ def checkOwners(): def closeDbs(): - for key, conns in GV.db.items(): - db = conns[0] - db.close() - GV.db = {} # remove everything + for key, conns in GV.conn.items(): + conn = conns[0] + conn.close() + GV.conn = {} # remove everything # ------------------------------------------------------------------------------- def getCatObj(namestr): - db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) try: cat = GV.catalog.getCatalogTable(namestr) except Exception as e: @@ -1256,25 +1253,25 @@ def checkTableACL(cat): # Execute the query try: - db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) - curs = db.query(qry) - nrows = curs.ntuples() - - if nrows == 0: - logger.info('[OK] Cross consistency acl check for ' + catname) - else: - GV.checkStatus = False - setError(ERROR_NOREPAIR) - GV.aclStatus = False - logger.info('[FAIL] Cross consistency acl check for ' + catname) - logger.error(' %s acl check has %d issue(s)' % (catname, nrows)) - - fields = curs.listfields() - gplog.log_literal(logger, logging.ERROR, " " + " | ".join(fields)) - for row in curs.getresult(): - gplog.log_literal(logger, logging.ERROR, " " + " | ".join(map(str, row))) - processACLResult(catname, fields, curs.getresult()) + conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) + with conn.cursor() as curs: + curs.execute(qry) + nrows = curs.rowcount + if nrows == 0: + logger.info('[OK] Cross consistency acl check for ' + catname) + else: + GV.checkStatus = False + setError(ERROR_NOREPAIR) + GV.aclStatus = False + logger.info('[FAIL] Cross consistency acl check for ' + catname) + logger.error(' %s acl check has %d issue(s)' % (catname, nrows)) + + fields = [desc[0] for desc in curs.description] + gplog.log_literal(logger, logging.ERROR, " " + " | ".join(fields)) + for row in curs.getresult(): + gplog.log_literal(logger, logging.ERROR, " " + " | ".join(map(str, row))) + processACLResult(catname, fields, curs.getresult()) except Exception as e: setError(ERROR_NOREPAIR) GV.aclStatus = False @@ -1295,9 +1292,9 @@ def checkForeignKey(cat_tables=None): if not cat_tables: cat_tables = GV.catalog.getCatalogTables() - db_connection = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) + conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) try: - foreign_key_check = ForeignKeyCheck(db_connection, logger, GV.opt['-S'], autoCast) + foreign_key_check = ForeignKeyCheck(conn, logger, GV.opt['-S'], autoCast) foreign_key_issues = foreign_key_check.runCheck(cat_tables) if foreign_key_issues: GV.checkStatus = False @@ -1307,13 +1304,14 @@ def checkForeignKey(cat_tables=None): processForeignKeyResult(catname, pkcatname, fields, results) if catname == 'gp_fastsequence' and pkcatname == 'pg_class': setError(ERROR_REMOVE) - removeFastSequence(db_connection) + removeFastSequence(conn) else: setError(ERROR_NOREPAIR) except Exception as ex: setError(ERROR_NOREPAIR) GV.foreignKeyStatus = False myprint(' Execution error: ' + str(ex)) + # ------------------------------------------------------------------------------- @@ -1391,40 +1389,39 @@ def checkTableMissingEntry(cat): # Execute the query try: - db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) - curs = db.query(qry) - nrows = curs.ntuples() - results = curs.getresult() - fields = curs.listfields() - - if nrows != 0: - results = filterSpuriousFailures(catname, fields, results) - nrows = len(results) - - if nrows == 0: - logger.info('[OK] Checking for missing or extraneous entries for ' + catname) - else: - if catname in ['pg_constraint']: - logger_with_level = logger.warning - log_level = logging.WARNING + conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) + with conn.cursor() as curs: + curs.execute(qry) + nrows = curs.rowcount + results = curs.fetchall() + fields = [desc[0] for desc in curs.description] + + if nrows != 0: + results = filterSpuriousFailures(catname, fields, results) + nrows = len(results) + + if nrows == 0: + logger.info('[OK] Checking for missing or extraneous entries for ' + catname) else: - GV.checkStatus = False - GV.missingEntryStatus = False - logger_with_level = logger.error - log_level = logging.ERROR - - logger.info(('[%s] Checking for missing or extraneous entries for ' + catname) % - ('WARNING' if log_level == logging.WARNING else 'FAIL')) - logger_with_level(' %s has %d issue(s)' % (catname, nrows)) - gplog.log_literal(logger, log_level, " " + " | ".join(fields)) + if catname in ['pg_constraint']: + logger_with_level = logger.warning + log_level = logging.WARNING + else: + GV.checkStatus = False + GV.missingEntryStatus = False + logger_with_level = logger.error + log_level = logging.ERROR + + logger.info(('[%s] Checking for missing or extraneous entries for ' + catname) % + ('WARNING' if log_level == logging.WARNING else 'FAIL')) + logger_with_level(' %s has %d issue(s)' % (catname, nrows)) + gplog.log_literal(logger, log_level, " " + " | ".join(fields)) for row in results: gplog.log_literal(logger, log_level, " " + " | ".join(map(str, row))) processMissingDuplicateEntryResult(catname, fields, results, "missing") if catname == 'pg_type': generateVerifyFile(catname, fields, results, 'missing_extraneous') - return results - except Exception as e: setError(ERROR_NOREPAIR) GV.missingEntryStatus = False @@ -1434,8 +1431,8 @@ def checkTableMissingEntry(cat): class checkAOSegVpinfoThread(execThread): - def __init__(self, cfg, db): - execThread.__init__(self, cfg, db, None) + def __init__(self, cfg, conn): + execThread.__init__(self, cfg, conn, None) def run(self): aoseg_query = """ @@ -1447,9 +1444,10 @@ class checkAOSegVpinfoThread(execThread): try: # Read the list of aoseg tables from the database - curs = self.db.query(aoseg_query) + curs = self.conn.cursor() + curs.execute(aoseg_query) - for relname, relid, segrelid, segrelname, attr_count in curs.getresult(): + for relname, relid, segrelid, segrelname, attr_count in curs.fetchall(): # We check vpinfo consistency only for segs that are in state # AOSEG_STATE_DEFAULT and which are not RESERVED_SEGNO. # RESERVED_SEGNO can have a different number of attributes than @@ -1459,8 +1457,8 @@ class checkAOSegVpinfoThread(execThread): # The vpinfo for RESERVED_SEGNO will have more columns than # relnatts in that case. qry = "SELECT distinct(length(vpinfo)) FROM pg_aoseg.%s where state = 1 and segno <> 0;" % (segrelname) - vpinfo_curs = self.db.query(qry) - nrows = vpinfo_curs.ntuples() + curs.execute(qry) + nrows = curs.rowcount if nrows == 0: continue elif nrows > 1: @@ -1475,7 +1473,7 @@ class checkAOSegVpinfoThread(execThread): logger.error(qry) continue - vpinfo_length = vpinfo_curs.getresult()[0][0] + vpinfo_length = curs.fetchone()[0] # vpinfo is bytea type, the length of the first 3 fields is 12 bytes, and the size of AOCSVPInfoEntry is 16 # typedef struct AOCSVPInfo @@ -1508,8 +1506,85 @@ def checkAOSegVpinfo(): # parallelise check for dbid in GV.cfg: cfg = GV.cfg[dbid] - db_connection = connect2(cfg) - thread = checkAOSegVpinfoThread(cfg, db_connection) + conn = connect2(cfg) + thread = checkAOSegVpinfoThread(cfg, conn) + thread.start() + logger.debug('launching check thread %s for dbid %i' % + (thread.name, dbid)) + threads.append(thread) + + if (i % GV.opt['-B']) == 0: + processThread(threads) + threads = [] + + i += 1 + + processThread(threads) + +class checkAOLastrownumThread(execThread): + def __init__(self, cfg, conn): + execThread.__init__(self, cfg, conn, None) + + # pg_attribute_encoding.lastrownums[segno], if exists, should have a corresponding entry in gp_fastsequence with + # an objid same as segno. And the value of pg_attribute_encoding.lastrownums[segno] should fall in the range of + # [0, {last_sequence}] where {last_sequence} is the current gp_fastsequence value with the corresponding objid. + # Note that objmod starts from 0 but the array index starts from 1. + def run(self): + aolastrownum_query = """ + SELECT + c.relname, + ao.relid, + ae.attnum, + ae.lastrownums, + f.objmod, + f.last_sequence, + ae.lastrownums[f.objmod + 1] AS lastrownum + FROM + pg_attribute_encoding ae + JOIN pg_appendonly ao ON ae.attrelid = ao.relid + LEFT JOIN gp_fastsequence f ON ao.segrelid = f.objid + JOIN pg_class c ON ao.relid = c.oid + WHERE + f.last_sequence IS NULL + OR f.last_sequence < ae.lastrownums[f.objmod + 1] + OR ae.lastrownums[f.objmod + 1] < 0; + """ + + try: + # Execute the query + curs = self.conn.cursor() + curs.execute(aolastrownum_query) + nrows = curs.rowcount + + if nrows == 0: + logger.info('[OK] AO lastrownums check for pg_attribute_encoding') + else: + GV.checkStatus = False + # we could not fix this issue automatically + setError(ERROR_NOREPAIR) + logger.info('[FAIL] AO lastrownums check for pg_attribute_encoding') + for relname, relid, attnum, lastrownums, objmod, last_sequence, last_rownum in curs.fetchall(): + logger.error(" found inconsistent last_rownum {rownum} with last_sequence {seqnum} of aoseg {segno} for table '{relname}' attribute {attnum} on segment {content}" + .format(rownum = last_rownum, + seqnum = last_sequence, + segno = objmod, + relname = relname, + attnum = attnum, + content = self.cfg['content'])) + + except Exception as e: + GV.checkStatus = False + self.error = e + +# for test "ao_lastrownums" +def checkAOLastrownums(): + threads = [] + i = 1 + # parallelise check + for dbid in GV.cfg: + cfg = GV.cfg[dbid] + conn = connect2(cfg) + thread = checkAOLastrownumThread(cfg, conn) thread.start() logger.debug('launching check thread %s for dbid %i' % (thread.name, dbid)) @@ -1663,9 +1738,10 @@ def checkTableInconsistentEntry(cat): # Execute the query try: - db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) - curs = db.query(qry) - nrows = curs.ntuples() + conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) + curs = conn.cursor() + curs.execute(qry) + nrows = curs.rowcount if nrows == 0: logger.info('[OK] Checking for inconsistent entries for ' + catname) @@ -1676,16 +1752,14 @@ def checkTableInconsistentEntry(cat): logger.info('[FAIL] Checking for inconsistent entries for ' + catname) logger.error(' %s has %d issue(s)' % (catname, nrows)) - fields = curs.listfields() + fields = [desc[0] for desc in curs.description] gplog.log_literal(logger, logging.ERROR, " " + " | ".join(fields)) - for row in curs.getresult(): + results = curs.fetchall() + for row in results: gplog.log_literal(logger, logging.ERROR, " " + " | ".join(map(str, row))) - results = curs.getresult() processInconsistentEntryResult(catname, pkey, fields, results) if catname == 'pg_type': generateVerifyFile(catname, fields, results, 'duplicate') - - except Exception as e: setError(ERROR_NOREPAIR) GV.inconsistentEntryStatus = False @@ -1799,9 +1873,10 @@ def checkTableDuplicateEntry(cat): # Execute the query try: - db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) - curs = db.query(qry) - nrows = curs.ntuples() + conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) + curs = conn.cursor() + curs.execute(qry) + nrows = curs.rowcount if nrows == 0: logger.info('[OK] Checking for duplicate entries for ' + catname) @@ -1813,7 +1888,7 @@ def checkTableDuplicateEntry(cat): fields = curs.listfields() gplog.log_literal(logger, logging.ERROR, " " + " | ".join(fields)) - results = curs.getresult() + results = curs.fetchall() for row in results: gplog.log_literal(logger, logging.ERROR, " " + " | ".join(map(str, row))) processMissingDuplicateEntryResult(catname, fields, results, "duplicate") @@ -1862,9 +1937,9 @@ def duplicateEntryQuery(catname, pkey): def checkUniqueIndexViolation(): logger.info('-----------------------------------') logger.info('Performing check: checking for violated unique indexes') - db_connection = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) + conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) - violations = UniqueIndexViolationCheck().runCheck(db_connection) + violations = UniqueIndexViolationCheck().runCheck(conn) checkname = 'unique index violation(s)' if violations: @@ -1901,9 +1976,9 @@ def checkOrphanedToastTables(): logger.info('-----------------------------------') logger.info('Performing check: checking for orphaned TOAST tables') - db_connection = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) + conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) checker = OrphanedToastTablesCheck() - check_passed = checker.runCheck(db_connection) + check_passed = checker.runCheck(conn) checkname = 'orphaned toast table(s)' if check_passed: @@ -2424,12 +2499,14 @@ def getOidFromPK(catname, pkeys): pkeystr=pkeystr) try: - db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) - curs = db.query(qry) - if (len(curs.dictresult()) == 0): + conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) + curs = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) + curs.execute(qry) + results = curs.fetchall() + if (len(results) == 0): raise QueryException("No such entry '%s' in %s" % (pkeystr, catname)) - return curs.dictresult().pop()['oid'] + return results.pop()['oid'] except Exception as e: setError(ERROR_NOREPAIR) @@ -2441,10 +2518,11 @@ def getOidFromPK(catname, pkeys): def getClassOidForRelfilenode(relfilenode): qry = "SELECT oid FROM pg_class WHERE relfilenode = %d;" % (relfilenode) try: - dburl = dbconn.DbURL(hostname=GV.opt['-h'], port=GV.opt['-p'], dbname=GV.dbname) - conn = dbconn.connect(dburl) - oid = dbconn.queryRow(conn, qry)[0] - return oid + with closing(connect()) as conn: + with conn.cursor() as curs: + curs.execute(qry) + oid = curs.fetchone()[0] + return oid except Exception as e: setError(ERROR_NOREPAIR) myprint(' Execution error: ' + str(e)) @@ -2464,10 +2542,12 @@ def getResourceTypeOid(oid): """ % (oid, oid) try: - db = connect() - curs = db.query(qry) - if len(curs.dictresult()) == 0: return 0 - return curs.dictresult().pop()['oid'] + with closing(connect()) as conn: + with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as curs: + curs.execute(qry) + results = curs.fetchall() + if len(results) == 0: return 0 + return results.pop()['oid'] except Exception as e: setError(ERROR_NOREPAIR) myprint(' Execution error: ' + str(e)) @@ -3062,7 +3142,7 @@ class GPObject: # Collect all tables with missing issues for later reporting if len(self.missingIssues): - db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) + conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) oid_query = "select (select nspname from pg_namespace where oid=relnamespace) || '.' || relname from pg_class where oid=%d" type_query = "select (select nspname from pg_namespace where oid=relnamespace) || '.' || relname from pg_class where reltype=%d" for issues in self.missingIssues.values() : @@ -3070,19 +3150,23 @@ class GPObject: # Get schemaname.tablename corresponding to oid for key in issue.pkeys: if 'relid' in key or key in ['ev_class', 'reloid']: - table_list = db.query(oid_query % issue.pkeys[key]).getresult() + curs = conn.cursor() + curs.execute(oid_query % issue.pkeys[key]) + table_list = curs.fetchone() if table_list: if issue.type == 'missing': - GV.missing_attr_tables.append( (table_list[0][0], issue.segids) ) + GV.missing_attr_tables.append( (table_list[0], issue.segids) ) else: - GV.extra_attr_tables.append( (table_list[0][0], issue.segids) ) + GV.extra_attr_tables.append( (table_list[0], issue.segids) ) elif key == 'oid': - table_list = db.query(type_query % issue.pkeys[key]).getresult() + curs = conn.cursor() + curs.execute(type_query % issue.pkeys[key]) + table_list = curs.fetchone() if table_list: if issue.type == 'missing': - GV.missing_attr_tables.append( (table_list[0][0], issue.segids) ) + GV.missing_attr_tables.append( (table_list[0], issue.segids) ) else: - GV.extra_attr_tables.append( (table_list[0][0], issue.segids) ) + GV.extra_attr_tables.append( (table_list[0], issue.segids) ) def __cmp__(self, other): @@ -3231,9 +3315,11 @@ def getRelInfo(objects): """.format(oids=','.join(map(str, oids))) try: - db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) - curs = db.query(qry) - for row in curs.getresult(): + conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False) + curs = conn.cursor() + curs.execute(qry) + results = curs.fetchall() + for row in results: (oid, relname, nspname, relkind, paroid) = row objects[oid, 'pg_class'].setRelInfo(relname, nspname, relkind, paroid) diff --git a/gpMgmt/bin/gpcheckcat_modules/foreign_key_check.py b/gpMgmt/bin/gpcheckcat_modules/foreign_key_check.py index ad4c1543fae..5ac49972110 100644 --- a/gpMgmt/bin/gpcheckcat_modules/foreign_key_check.py +++ b/gpMgmt/bin/gpcheckcat_modules/foreign_key_check.py @@ -2,6 +2,7 @@ from gppylib.gplog import * from gppylib.gpcatalog import * +from contextlib import closing import re class ForeignKeyCheck: @@ -105,25 +106,25 @@ def checkTableForeignKey(self, cat): def _validate_relation(self, catname, fkeystr, pkcatname, pkeystr, qry): issue_list = [] try: - curs = self.db_connection.query(qry) - nrows = curs.ntuples() - - if nrows == 0: - self.logger.info('[OK] Foreign key check for %s(%s) referencing %s(%s)' % - (catname, fkeystr, pkcatname, pkeystr)) - else: - self.logger.info('[FAIL] Foreign key check for %s(%s) referencing %s(%s)' % - (catname, fkeystr, pkcatname, pkeystr)) - self.logger.error(' %s has %d issue(s): entry has NULL reference of %s(%s)' % - (catname, nrows, pkcatname, pkeystr)) - - fields = curs.listfields() - log_literal(self.logger, logging.ERROR, " " + " | ".join(fields)) - for row in curs.getresult(): - log_literal(self.logger, logging.ERROR, " " + " | ".join(map(str, row))) - results = curs.getresult() - issue_list.append((pkcatname, fields, results)) - + with closing(self.db_connection.cursor()) as curs: + curs.execute(qry) + nrows = curs.rowcount + + if nrows == 0: + self.logger.info('[OK] Foreign key check for %s(%s) referencing %s(%s)' % + (catname, fkeystr, pkcatname, pkeystr)) + else: + self.logger.info('[FAIL] Foreign key check for %s(%s) referencing %s(%s)' % + (catname, fkeystr, pkcatname, pkeystr)) + self.logger.error(' %s has %d issue(s): entry has NULL reference of %s(%s)' % + (catname, nrows, pkcatname, pkeystr)) + + fields = [desc[0] for desc in curs.description] + log_literal(self.logger, logging.ERROR, " " + " | ".join(fields)) + results = curs.fetchall() + for row in results: + log_literal(self.logger, logging.ERROR, " " + " | ".join(map(str, row))) + issue_list.append((pkcatname, fields, results)) except Exception as e: err_msg = '[ERROR] executing: Foreign key check for catalog table {0}. Query : \n {1}\n'.format(catname, qry) err_msg += str(e) diff --git a/gpMgmt/bin/gpcheckcat_modules/leaked_schema_dropper.py b/gpMgmt/bin/gpcheckcat_modules/leaked_schema_dropper.py index 87e55a5cf7b..dc7cfacb32f 100644 --- a/gpMgmt/bin/gpcheckcat_modules/leaked_schema_dropper.py +++ b/gpMgmt/bin/gpcheckcat_modules/leaked_schema_dropper.py @@ -35,16 +35,19 @@ class LeakedSchemaDropper: """ def __get_leaked_schemas(self, db_connection): - leaked_schemas = db_connection.query(self.leaked_schema_query) + with db_connection.cursor() as curs: + curs.execute(self.leaked_schema_query) + leaked_schemas = curs.fetchall() - if not leaked_schemas: - return [] + if not leaked_schemas: + return [] - return [row[0] for row in leaked_schemas.getresult() if row[0]] + return [row[0] for row in leaked_schemas if row[0]] def drop_leaked_schemas(self, db_connection): leaked_schemas = self.__get_leaked_schemas(db_connection) for leaked_schema in leaked_schemas: escaped_schema_name = escapeDoubleQuoteInSQLString(leaked_schema) - db_connection.query('DROP SCHEMA IF EXISTS %s CASCADE;' % (escaped_schema_name)) + with db_connection.cursor() as curs: + curs.execute('DROP SCHEMA IF EXISTS %s CASCADE;' % (escaped_schema_name)) return leaked_schemas diff --git a/gpMgmt/bin/gpcheckcat_modules/orphaned_toast_tables_check.py b/gpMgmt/bin/gpcheckcat_modules/orphaned_toast_tables_check.py index 21ec8d18047..a76ef560867 100644 --- a/gpMgmt/bin/gpcheckcat_modules/orphaned_toast_tables_check.py +++ b/gpMgmt/bin/gpcheckcat_modules/orphaned_toast_tables_check.py @@ -4,6 +4,8 @@ from collections import namedtuple from gpcheckcat_modules.orphan_toast_table_issues import OrphanToastTableIssue, DoubleOrphanToastTableIssue, ReferenceOrphanToastTableIssue, DependencyOrphanToastTableIssue, MismatchOrphanToastTableIssue +import psycopg2 +from psycopg2 import extras OrphanedTable = namedtuple('OrphanedTable', 'oid catname') @@ -117,8 +119,10 @@ def __init__(self): """ def runCheck(self, db_connection): - orphaned_toast_tables = db_connection.query(self.orphaned_toast_tables_query).dictresult() - if len(orphaned_toast_tables) == 0: + curs = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor) + curs.execute(self.orphaned_toast_tables_query) + orphaned_toast_tables = curs.fetchall() + if curs.rowcount == 0: return True for row in orphaned_toast_tables: diff --git a/gpMgmt/bin/gpcheckcat_modules/unique_index_violation_check.py b/gpMgmt/bin/gpcheckcat_modules/unique_index_violation_check.py index 47999f5c59c..6778401f31a 100644 --- a/gpMgmt/bin/gpcheckcat_modules/unique_index_violation_check.py +++ b/gpMgmt/bin/gpcheckcat_modules/unique_index_violation_check.py @@ -34,22 +34,24 @@ def __init__(self): ) as violations """ - def runCheck(self, db_connection): - unique_indexes = db_connection.query(self.unique_indexes_query).getresult() - violations = [] + def runCheck(self, conn): + with conn.cursor() as cur: + cur.execute(self.unique_indexes_query) + unique_indexes = cur.fetchall() + violations = [] - for (table_oid, index_name, table_name, column_names) in unique_indexes: - column_names = ",".join(column_names) - sql = self.get_violated_segments_query(table_name, column_names) - violated_segments = db_connection.query(sql).getresult() - if violated_segments: - violations.append(dict(table_oid=table_oid, - table_name=table_name, - index_name=index_name, - column_names=column_names, - violated_segments=[row[0] for row in violated_segments])) - - return violations + for (table_oid, index_name, table_name, column_names) in unique_indexes: + column_names = ",".join(column_names) + sql = self.get_violated_segments_query(table_name, column_names) + cur.execute(sql) + violated_segments = cur.fetchall() + if violated_segments: + violations.append(dict(table_oid=table_oid, + table_name=table_name, + index_name=index_name, + column_names=column_names, + violated_segments=[row[0] for row in violated_segments])) + return violations def get_violated_segments_query(self, table_name, column_names): return self.violated_segments_query % ( diff --git a/gpMgmt/bin/gpconfig b/gpMgmt/bin/gpconfig index 7bd3023ea85..464e9d79440 100755 --- a/gpMgmt/bin/gpconfig +++ b/gpMgmt/bin/gpconfig @@ -15,7 +15,7 @@ import os import sys import re - +from psycopg2 import DatabaseError try: from gppylib.gpparseopts import OptParser, OptChecker from gppylib.gparray import GpArray @@ -25,7 +25,6 @@ try: from gppylib.commands.gp import * from gppylib.db import dbconn from gppylib.userinput import * - from pg import DatabaseError from gpconfig_modules.segment_guc import SegmentGuc from gpconfig_modules.database_segment_guc import DatabaseSegmentGuc from gpconfig_modules.file_segment_guc import FileSegmentGuc diff --git a/gpMgmt/bin/gpexpand b/gpMgmt/bin/gpexpand index 562fad58213..57cd6618773 100755 --- a/gpMgmt/bin/gpexpand +++ b/gpMgmt/bin/gpexpand @@ -17,10 +17,10 @@ import signal import traceback from collections import defaultdict from time import strftime, sleep - +import psycopg2 +from psycopg2 import DatabaseError, OperationalError +from psycopg2 import extras try: - import pg, pgdb - from gppylib.commands.unix import * from gppylib.commands.gp import * from gppylib.gparray import GpArray, MODE_NOT_SYNC, STATUS_DOWN @@ -32,9 +32,7 @@ try: from gppylib.operations.startSegments import MIRROR_MODE_MIRRORLESS from gppylib.system import configurationInterface, configurationImplGpdb from gppylib.system.environment import GpCoordinatorEnvironment - from pgdb import DatabaseError - from gppylib.gpcatalog import COORDINATOR_ONLY_TABLES_MAPPED - from gppylib.gpcatalog import COORDINATOR_ONLY_TABLES_NON_MAPPED + from gppylib.gpcatalog import COORDINATOR_ONLY_TABLES from gppylib.operations.package import SyncPackages from gppylib.operations.utils import ParallelOperation from gppylib.parseutils import line_reader, check_values, canonicalize_address @@ -1864,7 +1862,7 @@ WHERE expansionStopped) dbconn.execSQL(self.conn, sql) self.conn.close() - except pgdb.OperationalError: + except OperationalError: pass except Exception: # schema doesn't exist. Cancel or error during setup @@ -1916,7 +1914,7 @@ WHERE def connect_database(self, dbname): test_url = copy.deepcopy(self.dburl) test_url.pgdb = dbname - c = dbconn.connect(test_url, encoding='UTF8', allowSystemTableMods=True) + c = dbconn.connect(test_url, encoding='UTF8', allowSystemTableMods=True, cursorFactory=psycopg2.extras.NamedTupleCursor) return c def sync_packages(self): @@ -2157,7 +2155,7 @@ class ExpandCommand(SQLCommand): try: status_conn = dbconn.connect(self.status_url, encoding='UTF8') - table_conn = dbconn.connect(self.table_url, encoding='UTF8') + table_conn = dbconn.connect(self.table_url, encoding='UTF8', cursorFactory=psycopg2.extras.NamedTupleCursor) except DatabaseError as ex: if self.options.verbose: logger.exception(ex) diff --git a/gpMgmt/bin/gpload.py b/gpMgmt/bin/gpload.py index a1696947bbc..19963c7c80d 100755 --- a/gpMgmt/bin/gpload.py +++ b/gpMgmt/bin/gpload.py @@ -35,22 +35,8 @@ sys.exit(2) import platform - -try: - import pg -except ImportError: - try: - from pygresql import pg - except Exception as e: - pass -except Exception as e: - print(repr(e)) - errorMsg = "gpload was unable to import The PyGreSQL Python module (pg.py) - %s\n" % str(e) - sys.stderr.write(str(errorMsg)) - errorMsg = "Please check if you have the correct Visual Studio redistributable package installed.\n" - sys.stderr.write(str(errorMsg)) - sys.exit(2) - +import psycopg2 +from psycopg2 import extras import hashlib import datetime,getpass,os,signal,socket,threading,time,traceback,re import subprocess @@ -562,6 +548,16 @@ def is_keyword(tab): else: return False +# Escape single quotes, backslashes appearing in the string according to the SQL string constants syntax. +# E.g., +# >>> escape_string(r"O'Reilly") +# "O''Reilly" +def escape_string(string): + adapted = psycopg2.extensions.QuotedString(string) + # The getquoted() API returns 'latin-1' encoded binary string by default, we need to specify + # the encoding manually. + adapted.encoding = 'utf-8' + return adapted.getquoted().decode()[1:-1] def caseInsensitiveDictLookup(key, dictionary): """ @@ -724,25 +720,6 @@ def match_notice_obj(notice): else: return 0 - -def notice_processor_Notice(notice): - # process the notice in main branch - # notice is a class which is different in 6X, we need a new function to process - global NUM_WARN_ROWS - if windowsPlatform == True: - # We don't have a pygresql with our notice fix, so skip for windows. - # This means we will not get any warnings on windows (MPP10989). - return - theNotices = notice.message - messageNumber = 0 - if isinstance(theNotices, list): - while messageNumber < len(theNotices) and NUM_WARN_ROWS==0: - NUM_WARN_ROWS = match_notice_obj(theNotices[messageNumber]) - messageNumber+=1 - else: - NUM_WARN_ROWS = match_notice_obj(theNotices) - - def notice_processor(notice): global NUM_WARN_ROWS if windowsPlatform == True: @@ -1757,9 +1734,9 @@ def setup_connection(self, recurse = 0): """ Connect to the backend """ - if self.db != None: - self.db.close() - self.db = None + if self.conn != None: + self.conn.close() + self.conn = None if self.options.W: if self.options.password==None: self.options.password = getpass.getpass() @@ -1778,19 +1755,20 @@ def setup_connection(self, recurse = 0): " host=" + str(self.options.h) + " port=" + str(self.options.p) + " database=" + str(self.options.d)) - self.db = pg.DB( dbname=self.options.d - , host=self.options.h - , port=self.options.p - , user=self.options.U - , passwd=self.options.password - ) + self.conn = psycopg2.connect(dbname=self.options.d, + host=self.options.h, + port=self.options.p, + user=self.options.U, + password=self.options.password) + self.conn.set_session(autocommit=True) self.log(self.DEBUG, "Successfully connected to database") if noGpVersion == False: # Get GPDB version - curs = self.db.query("SELECT version()") - self.gpdb_version = GpVersion(curs.getresult()[0][0]) - self.log(self.DEBUG, "GPDB version is: %s" % self.gpdb_version) + with self.conn.cursor() as cur: + cur.execute("SELECT version()") + self.gpdb_version = GpVersion(cur.fetchall()[0][0]) + self.log(self.DEBUG, "GPDB version is: %s" % self.gpdb_version) except Exception as e: errorMessage = str(e) @@ -1884,13 +1862,15 @@ def read_table_metadata(self): WHERE c.relname = '%s' AND pg_catalog.pg_table_is_visible(c.oid);""" % quote_unident(self.table) - resultList = self.db.query(queryString).getresult() + with self.conn.cursor() as cur: + cur.execute(queryString) + resultList = cur.fetchall() - if len(resultList) > 0: - self.schema = (resultList[0])[0] - self.log(self.INFO, "setting schema '%s' for table '%s'" % (self.schema, quote_unident(self.table))) - else: - self.log(self.ERROR, "table %s not found in any database schema" % self.table) + if len(resultList) > 0: + self.schema = (resultList[0])[0] + self.log(self.INFO, "setting schema '%s' for table '%s'" % (self.schema, quote_unident(self.table))) + else: + self.log(self.ERROR, "table %s not found in any database schema" % self.table) queryString = """select nt.nspname as table_schema, @@ -1913,41 +1893,45 @@ def read_table_metadata(self): count = 0 self.into_columns = [] self.into_columns_dict = dict() - resultList = self.db.query(queryString).dictresult() - while count < len(resultList): - row = resultList[count] - count += 1 - ct = str(row['data_type']) - if ct == 'bigserial': - ct = 'bigint' - elif ct == 'serial': - ct = 'int4' - name = row['column_name'] - name = quote_ident(name) - has_seq = row['has_sequence'] - if has_seq == str('f') or has_seq==False: - has_seq_bool = False - if has_seq == str('t') or has_seq==True: - has_sql_bool = True - i = [name,ct,None, has_seq_bool] - # i: [column name, column data type, mapping target, has_sequence] - self.into_columns.append(i) - self.into_columns_dict[name] = i - self.log(self.DEBUG, "found input column: " + str(i)) - if count == 0: - # see if it's a permissions issue or it actually doesn't exist - tableName = quote_unident(self.table) - tableSchema = quote_unident(self.schema) - sql = """select 1 from pg_class c, pg_namespace n - where c.relname = '%s' and - n.nspname = '%s' and - n.oid = c.relnamespace""" % (tableName, tableSchema) - resultList = self.db.query(sql).getresult() - if len(resultList) > 0: - self.log(self.ERROR, "permission denied for table %s.%s" % \ - (tableSchema, tableName)) - else: - self.log(self.ERROR, 'table %s.%s does not exist in database %s'% (tableSchema, tableName, self.options.d)) + with self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cur: + cur.execute(queryString) + resultList = cur.fetchall() + while count < len(resultList): + row = resultList[count] + count += 1 + ct = str(row['data_type']) + if ct == 'bigserial': + ct = 'bigint' + elif ct == 'serial': + ct = 'int4' + name = row['column_name'] + name = quote_ident(name) + has_seq = row['has_sequence'] + if has_seq == str('f') or has_seq==False: + has_seq_bool = False + if has_seq == str('t') or has_seq==True: + has_sql_bool = True + i = [name,ct,None, has_seq_bool] + # i: [column name, column data type, mapping target, has_sequence] + self.into_columns.append(i) + self.into_columns_dict[name] = i + self.log(self.DEBUG, "found input column: " + str(i)) + if count == 0: + # see if it's a permissions issue or it actually doesn't exist + tableName = quote_unident(self.table) + tableSchema = quote_unident(self.schema) + sql = """select 1 from pg_class c, pg_namespace n + where c.relname = '%s' and + n.nspname = '%s' and + n.oid = c.relnamespace""" % (tableName, tableSchema) + with self.conn.cursor() as cur: + cur.execute(sql) + resultList = cur.fetchall() + if len(resultList) > 0: + self.log(self.ERROR, "permission denied for table %s.%s" % \ + (tableSchema, tableName)) + else: + self.log(self.ERROR, 'table %s.%s does not exist in database %s'% (tableSchema, tableName, self.options.d)) def read_mapping(self): ''' @@ -2190,8 +2174,10 @@ def get_table_oid(self, tableName): if tableName: sql = "select %s::regclass::oid" % quote(quote_unident(tableName)) try: - resultList = self.db.query(sql).getresult() - return resultList[0][0] + with self.conn.cursor() as cur: + cur.execute(sql) + resultList = cur.fetchall() + return resultList[0][0] except Exception as e: pass return None @@ -2322,15 +2308,19 @@ def create_external_table(self): encodingCode = None encodingStr = self.getconfig('gpload:input:encoding', str, None) if encodingStr is None: - result = self.db.query("SHOW SERVER_ENCODING").getresult() - if len(result) > 0: - encodingStr = result[0][0] + with self.conn.cursor() as cur: + cur.execute("SHOW SERVER_ENCODING") + result = cur.fetchall() + if len(result) > 0: + encodingStr = result[0][0] if encodingStr: sql = "SELECT pg_char_to_encoding('%s')" % encodingStr - result = self.db.query(sql).getresult() - if len(result) > 0: - encodingCode = result[0][0] + with self.conn.cursor() as cur: + cur.execute(sql) + result = cur.fetchall() + if len(result) > 0: + encodingCode = result[0][0] limitStr = self.getconfig('gpload:input:error_limit',int, None) if self.log_errors and not limitStr: @@ -2370,11 +2360,13 @@ def create_external_table(self): AND n.nspname <> 'pg_catalog' AND n.nspname <> 'information_schema' AND n.nspname !~ '^pg_toast'""" - result = self.db.query(sql).getresult() - if len(result) > 0: - self.extSchemaTable = self.get_schematable(quote_unident(self.extSchemaName), self.extTableName) - self.log(self.INFO, "reusing external staging table %s" % self.extSchemaTable) - return + with self.conn.cursor() as cur: + cur.execute(sql) + result = cur.fetchall() + if len(result) > 0: + self.extSchemaTable = self.get_schematable(self.extSchemaName, self.extTableName) + self.log(self.INFO, "reusing external staging table %s" % self.extSchemaTable) + return # staging table is not specified, we need to find it manually else: # process the single quotes in order to successfully find an existing external table to reuse. @@ -2386,17 +2378,19 @@ def create_external_table(self): sql = self.get_reuse_exttable_query(formatType, self.formatOpts, limitStr, from_cols, self.extSchemaName, self.log_errors, encodingCode) - resultList = self.db.query(sql).getresult() - if len(resultList) > 0: - # found an external table to reuse. no need to create one. we're done here. - self.extTableName = (resultList[0])[0] - # fast match result is only table name, so we need add schema info - if self.fast_match: - self.extSchemaTable = self.get_schematable(quote_unident(self.extSchemaName), self.extTableName) - else: - self.extSchemaTable = self.extTableName - self.log(self.INFO, "reusing external table %s" % self.extSchemaTable) - return + with self.conn.cursor() as cur: + cur.execute(sql) + resultList = cur.fetchall() + if len(resultList) > 0: + # found an external table to reuse. no need to create one. we're done here. + self.extTableName = (resultList[0])[0] + # fast match result is only table name, so we need add schema info + if self.fast_match: + self.extSchemaTable = self.get_schematable(self.extSchemaName, self.extTableName) + else: + self.extSchemaTable = self.extTableName + self.log(self.INFO, "reusing external table %s" % self.extSchemaTable) + return # didn't find an existing external table suitable for reuse. Format a reusable # name and issue a CREATE EXTERNAL TABLE on it. Hopefully we can use it next time @@ -2428,7 +2422,8 @@ def create_external_table(self): sql += "segment reject limit %s "%limitStr try: - self.db.query(sql.encode('utf-8')) + with self.conn.cursor() as cur: + cur.execute(sql) except Exception as e: self.log(self.ERROR, 'could not run SQL "%s": %s' % (sql, str(e))) @@ -2446,7 +2441,9 @@ def get_distribution_key(self): sql = '''select * from pg_get_table_distributedby('%s.%s'::regclass::oid)'''% (self.schema, self.table) try: - dk_text = self.db.query(sql.encode('utf-8')).getresult() + with self.conn.cursor() as cur: + cur.execute(sql) + dk_text = cur.fetchall() except Exception as e: self.log(self.ERROR, 'could not run SQL "%s": %s ' % (sql, str(e))) @@ -2470,11 +2467,13 @@ def create_staging_table(self): distcols = self.get_distribution_key() sql = "SELECT * FROM pg_class WHERE relname LIKE 'temp_gpload_reusable_%%';" - resultList = self.db.query(sql).getresult() - if len(resultList) > 0: - self.log(self.WARN, """Old style, reusable tables named "temp_gpload_reusable_*" from a previous versions were found. - Cloudberry recommends running "DROP TABLE temp_gpload_reusable_..." on each table. This only needs to be done once.""") - + with self.conn.cursor() as cur: + cur.execute(sql) + resultList = cur.fetchall() + if len(resultList) > 0: + self.log(self.WARN, """Old style, reusable tables named "temp_gpload_reusable_*" from a previous versions were found. + Greenplum recommends running "DROP TABLE temp_gpload_reusable_..." on each table. This only needs to be done once.""") + # If the 'reuse tables' option was specified we now try to find an # already existing staging table in the catalog which will match # the one that we need to use. It must meet the reuse conditions @@ -2491,20 +2490,19 @@ def create_staging_table(self): # create a string from all reuse conditions for staging tables and ancode it conditions_str = self.get_staging_conditions_string(target_table_name, target_columns, distcols).encode() encoding_conditions = hashlib.md5(conditions_str).hexdigest() + table_name = 'staging_gpload_reusable_%s'% (encoding_conditions) + sql = self.get_reuse_staging_table_query(table_name) + with self.conn.cursor() as cur: + cur.execute(sql) + resultList = cur.fetchall() - sql = self.get_reuse_staging_table_query(encoding_conditions) - resultList = self.db.query(sql).getresult() - - if len(resultList) > 0: - - # found a temp table to reuse. no need to create one. we're done here. - self.staging_table_name = (resultList[0])[0] - self.log(self.INFO, "reusing staging table %s" % self.staging_table_name) - - # truncate it so we don't use old data - self.do_truncate(self.staging_table_name) - - return + if len(resultList) > 0: + # found a temp table to reuse. no need to create one. we're done here. + self.staging_table_name = self.get_schematable(self.extSchemaName, table_name) + self.log(self.INFO, "reusing staging table %s" % self.staging_table_name) + # truncate it so we don't use old data + self.do_truncate(self.staging_table_name) + return # didn't find an existing staging table suitable for reuse. Format a reusable # name and issue a CREATE TABLE on it (without TEMP!). Hopefully we can use it @@ -2541,27 +2539,31 @@ def create_staging_table(self): self.log(self.LOG, sql) if not self.options.D: - self.db.query(sql) + with self.conn.cursor() as cur: + cur.execute(sql) if not self.reuse_tables: self.cleanupSql.append('DROP TABLE IF EXISTS %s' % self.staging_table_name) def count_errors(self): - # callback function is setted before insert - # notice processor will be called automaticly + notice_processor(self.conn.notices) if self.log_errors and not self.options.D: # make sure we only get errors for our own instance if not self.reuse_tables: - queryStr = "select count(*) from gp_read_error_log('%s')" % pg.escape_string(self.extSchemaTable) - results = self.db.query(queryStr).getresult() - return (results[0])[0] + queryStr = "select count(*) from gp_read_error_log('%s')" % escape_string(self.extSchemaTable) + with self.conn.cursor() as cur: + cur.execute(queryStr) + results = cur.fetchall() + return (results[0])[0] else: # reuse_tables - queryStr = "select count(*) from gp_read_error_log('%s') where cmdtime > to_timestamp(%s)" % (pg.escape_string(self.extSchemaTable), self.startTimestamp) - results = self.db.query(queryStr).getresult() - global NUM_WARN_ROWS - NUM_WARN_ROWS = (results[0])[0] - return (results[0])[0] + queryStr = "select count(*) from gp_read_error_log('%s') where cmdtime > to_timestamp(%s)" % (escape_string(self.extSchemaTable), self.startTimestamp) + with self.conn.cursor() as cur: + cur.execute(queryStr) + results = cur.fetchall() + global NUM_WARN_ROWS + NUM_WARN_ROWS = (results[0])[0] + return (results[0])[0]; return 0 def report_errors(self): @@ -2575,7 +2577,7 @@ def report_errors(self): # if reuse_table is set, error message is not deleted. if errors and self.log_errors and self.reuse_tables: self.log(self.WARN, "Please use following query to access the detailed error") - self.log(self.WARN, "select * from gp_read_error_log('{0}') where cmdtime > to_timestamp('{1}')".format(pg.escape_string(self.extSchemaTable), self.startTimestamp)) + self.log(self.WARN, "select * from gp_read_error_log('{0}') where cmdtime > to_timestamp('{1}')".format(escape_string(self.extSchemaTable), self.startTimestamp)) self.exitValue = 1 if errors else 0 @@ -2603,9 +2605,9 @@ def do_insert(self, dest): self.log(self.LOG, sql) if not self.options.D: try: - # we need to set the notice receiver function before do insert - self.db.set_notice_receiver(notice_processor_Notice) - self.rowsInserted = self.db.query(sql.encode('utf-8')) + with self.conn.cursor() as cur: + cur.execute(sql) + self.rowsInserted = cur.rowcount except Exception as e: # We need to be a bit careful about the error since it may contain non-unicode characters strE = e.__str__().encode().decode('unicode-escape') @@ -2699,7 +2701,9 @@ def do_update(self,fromname,index): self.log(self.LOG, sql) if not self.options.D: try: - self.rowsUpdated = self.db.query(sql.encode('utf-8')) + with self.conn.cursor() as cur: + cur.execute(sql) + self.rowsUpdated = cur.rowcount except Exception as e: # We need to be a bit careful about the error since it may contain non-unicode characters strE = str(str(e), errors = 'ignore') @@ -2725,14 +2729,15 @@ def get_table_dist_key(self): "c.relnamespace = n.oid and " + \ "n.nspname = '%s' and c.relname = '%s'; " % (quote_unident(self.schema), quote_unident(self.table)) - resultList = self.db.query(sql).getresult() - attrs = [] - count = 0 - while count < len(resultList): - attrs.append((resultList[count])[0]) - count = count + 1 - - return attrs + with self.conn.cursor() as cur: + cur.execute(sql) + resultList = cur.fetchall() + attrs = [] + count = 0 + while count < len(resultList): + attrs.append((resultList[count])[0]) + count = count + 1 + return attrs def table_supports_update(self): """ Check wether columns being updated are distribution key.""" @@ -2789,7 +2794,8 @@ def do_method_merge(self): self.log(self.LOG, sql) if not self.options.D: try: - self.db.query(sql.encode('utf-8')) + with self.conn.cursor() as cur: + cur.execute(sql) except Exception as e: strE = str(str(e), errors = 'ignore') strF = str(str(sql), errors = 'ignore') @@ -2810,7 +2816,9 @@ def do_method_merge(self): self.log(self.LOG, sql) if not self.options.D: try: - self.rowsInserted = self.db.query(sql.encode('utf-8')) + with self.conn.cursor() as cur: + cur.execute(sql) + self.rowsInserted = cur.rowcount except Exception as e: # We need to be a bit careful about the error since it may contain non-unicode characters strE = str(str(e), errors = 'ignore') @@ -2822,7 +2830,8 @@ def do_truncate(self, tblname): if not self.options.D: try: truncateSQLtext = "truncate %s" % tblname - self.db.query(truncateSQLtext.encode('utf-8')) + with self.conn.cursor() as cur: + cur.execute(truncateSQLtext) except Exception as e: self.log(self.ERROR, 'could not execute truncate target %s: %s' % (tblname, str(e))) @@ -2843,8 +2852,9 @@ def do_method(self): truncate = False self.reuse_tables = False - if not self.options.no_auto_trans: - self.db.query("BEGIN") + if not self.options.no_auto_trans and not method=='insert': + with self.conn.cursor() as cur: + cur.execute("BEGIN") self.extSchemaName = self.getconfig('gpload:external:schema', str, None) if self.extSchemaName == '%': @@ -2880,7 +2890,8 @@ def do_method(self): self.log(self.LOG, "Pre-SQL from user: %s" % before) if not self.options.D: try: - self.db.query(before.encode('utf-8')) + with self.conn.cursor() as cur: + cur.execute(before) except Exception as e: self.log(self.ERROR, 'could not execute SQL in sql:before "%s": %s' % (before, str(e))) @@ -2903,15 +2914,15 @@ def do_method(self): self.log(self.LOG, "Post-SQL from user: %s" % after) if not self.options.D: try: - self.db.query(after.encode('utf-8')) + with self.conn.cursor() as cur: + cur.execute(after) except Exception as e: self.log(self.ERROR, 'could not execute SQL in sql:after "%s": %s' % (after, str(e))) - if not self.options.no_auto_trans: - self.db.query("COMMIT") - - + if not self.options.no_auto_trans and not method=='insert': + with self.conn.cursor() as cur: + cur.execute("COMMIT") def stop_gpfdists(self): if self.subprocesses: @@ -2948,7 +2959,7 @@ def run2(self): self.log(self.INFO, 'running time: %.2f seconds'%(time.time()-start)) def run(self): - self.db = None + self.conn = None self.rowsInserted = 0 self.rowsUpdated = 0 signal.signal(signal.SIGINT, handle_kill) @@ -2979,14 +2990,15 @@ def run(self): for a in self.cleanupSql: try: self.log(self.DEBUG, a) - self.db.query(a) + with self.conn.cursor() as cur: + cur.execute(a) except (Exception, SystemExit): traceback.print_exc(file=self.logfile) self.logfile.flush() traceback.print_exc() - if self.db != None: - self.db.close() + if self.conn != None: + self.conn.close() self.log(self.INFO, 'rows Inserted = ' + str(self.rowsInserted)) self.log(self.INFO, 'rows Updated = ' + str(self.rowsUpdated)) diff --git a/gpMgmt/bin/gpload_test/gpload2/TEST_REMOTE.py b/gpMgmt/bin/gpload_test/gpload2/TEST_REMOTE.py index ae1618f78cf..eb59c129dae 100755 --- a/gpMgmt/bin/gpload_test/gpload2/TEST_REMOTE.py +++ b/gpMgmt/bin/gpload_test/gpload2/TEST_REMOTE.py @@ -11,7 +11,7 @@ import re import subprocess from shutil import copyfile -import pg +import psycopg2 """ Global Values @@ -275,44 +275,34 @@ def copy_data(source='',target=''): copyfile(os.path.join('data', source), target) def get_table_name(): - try: - db = pg.DB(dbname='reuse_gptest' - ,host='localhost' - ,port=int(PGPORT) - ) - except Exception as e: - errorMessage = str(e) - print(('could not connect to database: ' + errorMessage)) - queryString = """SELECT relname - from pg_class - WHERE relname - like 'ext_gpload_reusable%' - OR relname - like 'staging_gpload_reusable%';""" - resultList = db.query(queryString.encode('utf-8')).getresult() - return resultList + with psycopg2.connect(dbname='reuse_gptest', + host='localhost', + port=int(PGPORT)) as conn: + with conn.cursor() as cur: + queryString = """SELECT relname + from pg_class + WHERE relname + like 'ext_gpload_reusable%' + OR relname + like 'staging_gpload_reusable%';""" + cur.execute(queryString) + resultList = cur.fetchall() + return resultList def drop_tables(): - try: - db = pg.DB(dbname='reuse_gptest' - ,host='localhost' - ,port=int(PGPORT) - ) - except Exception as e: - errorMessage = str(e) - print(('could not connect to database: ' + errorMessage)) - - list = get_table_name() - for i in list: - name = i[0] - match = re.search('ext_gpload',name) - if match: - queryString = "DROP EXTERNAL TABLE %s" % name - db.query(queryString.encode('utf-8')) - - else: - queryString = "DROP TABLE %s" % name - db.query(queryString.encode('utf-8')) + table_list = get_table_name() + with psycopg2.connect(dbname='reuse_gptest', + host='localhost', + port=int(PGPORT)) as conn: + with conn.cursor() as cur: + for i in table_list: + name = i[0] + match = re.search('ext_gpload',name) + if match: + queryString = "DROP EXTERNAL TABLE %s" % name + else: + queryString = "DROP TABLE %s" % name + cur.execute(queryString) class PSQLError(Exception): ''' diff --git a/gpMgmt/bin/gpload_test/gpload2/TEST_local_base.py b/gpMgmt/bin/gpload_test/gpload2/TEST_local_base.py index 5c47e724ff3..6aa60f9d8bc 100755 --- a/gpMgmt/bin/gpload_test/gpload2/TEST_local_base.py +++ b/gpMgmt/bin/gpload_test/gpload2/TEST_local_base.py @@ -12,6 +12,7 @@ import re #import yaml import pytest +import psycopg2 from gppylib.commands.gp import get_coordinatordatadir @@ -19,20 +20,6 @@ import subprocess32 as subprocess except: import subprocess -try: - import pg -except ImportError: - try: - from pygresql import pg - except Exception as e: - pass -except Exception as e: - print(repr(e)) - errorMsg = "gpload was unable to import The PyGreSQL Python module (pg.py) - %s\n" % str(e) - sys.stderr.write(str(errorMsg)) - errorMsg = "Please check if you have the correct Visual Studio redistributable package installed.\n" - sys.stderr.write(str(errorMsg)) - sys.exit(2) def get_port_from_conf(): file = get_coordinatordatadir()+'/postgresql.conf' @@ -414,49 +401,38 @@ def copy_data(source='',target=''): def get_table_name(): - try: - db = pg.DB(dbname='reuse_gptest' - ,host='localhost' - ,port=int(PGPORT) - ) - except Exception as e: - errorMessage = str(e) - print ('could not connect to database: ' + errorMessage) - queryString = """SELECT sch.table_schema, cls.relname - FROM pg_class AS cls, information_schema.tables AS sch - WHERE - (cls.relname LIKE 'ext_gpload_reusable%' - OR - relname LIKE 'staging_gpload_reusable%') - AND cls.relname=sch.table_name;""" - resultList = db.query(queryString.encode('utf-8')).getresult() - print(resultList) - return resultList + with psycopg2.connect(dbname='reuse_gptest', + host='localhost', + port=int(PGPORT)) as conn: + with conn.cursor() as cur: + queryString = """SELECT sch.table_schema, cls.relname + FROM pg_class AS cls, information_schema.tables AS sch + WHERE + (cls.relname LIKE 'ext_gpload_reusable%' + OR + relname LIKE 'staging_gpload_reusable%') + AND cls.relname=sch.table_name;""" + cur.execute(queryString) + resultList = cur.fetchall() + return resultList def drop_tables(): '''drop external and staging tables''' - try: - db = pg.DB(dbname='reuse_gptest' - ,host='localhost' - ,port=int(PGPORT) - ) - except Exception as e: - errorMessage = str(e) - print ('could not connect to database: ' + errorMessage) - tableList = get_table_name() - for i in tableList: - schema = i[0] - name = i[1] - match = re.search('ext_gpload',name) - if match: - queryString = 'DROP EXTERNAL TABLE "%s"."%s";'%(schema, name) - db.query(queryString.encode('utf-8')) - - else: - queryString = 'DROP TABLE "%s"."%s";'%(schema, name) - db.query(queryString.encode('utf-8')) + with psycopg2.connect(dbname='reuse_gptest', + host='localhost', + port=int(PGPORT)) as conn: + with conn.cursor() as cur: + for i in tableList: + schema = i[0] + name = i[1] + match = re.search('ext_gpload',name) + if match: + queryString = 'DROP EXTERNAL TABLE "%s"."%s";'%(schema, name) + else: + queryString = 'DROP TABLE "%s"."%s";'%(schema, name) + cur.execute(queryString) class PSQLError(Exception): ''' diff --git a/gpMgmt/bin/gpload_test/gpload2/TEST_local_schema_and_mode.py b/gpMgmt/bin/gpload_test/gpload2/TEST_local_schema_and_mode.py index 79f05d1c74c..8ad5e45deeb 100644 --- a/gpMgmt/bin/gpload_test/gpload2/TEST_local_schema_and_mode.py +++ b/gpMgmt/bin/gpload_test/gpload2/TEST_local_schema_and_mode.py @@ -880,3 +880,14 @@ def test_547_gpload_insert_staging_without_DK(): f = open(TestBase.mkpath('query547.sql'), 'a') f.write("\\! psql -d reuse_gptest -c '\\d staging_gpload_reusable_*'") f.close() + +@TestBase.prepare_before_test(num=548, times=1) +def test_548_gpload_exttable_with_special_schema_name(): + "548 gpload reuse external table with special schema name" + setup_file = TestBase.mkpath('setup.sql') + TestBase.runfile(setup_file) + with open(TestBase.mkpath('query548.sql'), 'wt') as f: + f.write("\\! gpload -f " + TestBase.mkpath('config/config_file') + "\n") + f.write("\\! psql -d reuse_gptest -c 'select count(*) from csvtable;'\n") + TestBase.copy_data('external_file_13.csv','data_file.csv') + TestBase.write_config_file(reuse_tables=True, format='csv', file='data_file.csv', table='csvtable', delimiter="','",log_errors=True,error_limit=10, staging_table='staging_table',externalSchema='spiegelungssätze') diff --git a/gpMgmt/bin/gpload_test/gpload2/query205.ans b/gpMgmt/bin/gpload_test/gpload2/query205.ans index e47d90653e6..542c9597b12 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query205.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query205.ans @@ -2,7 +2,7 @@ 2021-11-29 11:56:45|INFO|setting schema 'public' for table 'texttable2' 2021-11-29 11:56:45|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30 2021-11-29 11:56:45|INFO|did not find an external table to reuse. creating ext_gpload_reusable_5e8c16ac_50c8_11ec_a3c6_0050569e2380 -2021-11-29 11:56:45|ERROR|could not run SQL "create external table ext_gpload_reusable_5e8c16ac_50c8_11ec_a3c6_0050569e2380("s1" text,"s2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter 'Ÿ' null '\N' escape '\' ) encoding'UTF8' ": ERROR: COPY delimiter must be a single one-byte character, or 'off' +2021-11-29 11:56:45|ERROR|could not run SQL "create external table ext_gpload_reusable_5e8c16ac_50c8_11ec_a3c6_0050569e2380("s1" text,"s2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter 'Ÿ' null '\N' escape '\' ) encoding'UTF8' ": COPY delimiter must be a single one-byte character, or 'off' 2021-11-29 11:56:45|INFO|rows Inserted = 0 2021-11-29 11:56:45|INFO|rows Updated = 0 diff --git a/gpMgmt/bin/gpload_test/gpload2/query207.ans b/gpMgmt/bin/gpload_test/gpload2/query207.ans index bde4034f5af..332aff46d38 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query207.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query207.ans @@ -2,7 +2,7 @@ 2020-12-17 15:38:22|INFO|setting schema 'public' for table 'texttable2' 2020-12-17 15:38:22|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30 2020-12-17 15:38:22|INFO|reusing external table ext_gpload_reusable_6f2dfb50_4035_11eb_b7f6_00505698d059 -2020-12-17 15:38:22|ERROR|ERROR: missing data for column "s2" (seg0 slice1 10.152.8.113:7002 pid=6654) +2020-12-17 15:38:22|ERROR|missing data for column "s2" (seg0 slice1 10.152.8.113:7002 pid=6654) CONTEXT: External table ext_gpload_reusable_6f2dfb50_4035_11eb_b7f6_00505698d059, line 1 of gpfdist://*:pathto/data_file.txt: "123456789 ab c d" encountered while running INSERT INTO public."texttable2" ("s1","s2") SELECT "s1","s2" FROM ext_gpload_reusable_6f2dfb50_4035_11eb_b7f6_00505698d059 2020-12-17 15:38:22|INFO|rows Inserted = 0 diff --git a/gpMgmt/bin/gpload_test/gpload2/query211.ans b/gpMgmt/bin/gpload_test/gpload2/query211.ans index 086b23498e8..f9e732da4e6 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query211.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query211.ans @@ -2,7 +2,7 @@ 2020-12-18 16:59:08|INFO|setting schema 'public' for table 'texttable2' 2020-12-18 16:59:08|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30 2020-12-18 16:59:08|INFO|did not find an external table to reuse. creating ext_gpload_reusable_49ed34de_410f_11eb_bbac_00505698d059 -2020-12-18 16:59:08|ERROR|ERROR: extra data after last expected column (seg2 slice1 10.152.8.113:7004 pid=2301) +2020-12-18 16:59:08|ERROR|extra data after last expected column (seg2 slice1 10.152.8.113:7004 pid=2301) CONTEXT: External table ext_gpload_reusable_49ed34de_410f_11eb_bbac_00505698d059, line 1 of gpfdist://*:pathto/data_file.txt: "a|||b" encountered while running INSERT INTO public."texttable2" ("s1","s2") SELECT "s1","s2" FROM ext_gpload_reusable_49ed34de_410f_11eb_bbac_00505698d059 2020-12-18 16:59:08|INFO|rows Inserted = 0 diff --git a/gpMgmt/bin/gpload_test/gpload2/query220.ans b/gpMgmt/bin/gpload_test/gpload2/query220.ans index 77139e22a90..af6249e4be0 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query220.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query220.ans @@ -2,7 +2,7 @@ 2020-12-21 10:55:38|INFO|setting schema 'public' for table 'texttable1' 2020-12-21 10:55:38|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30 2020-12-21 10:55:38|INFO|did not find an external table to reuse. creating ext_gpload_reusable_015f190a_4338_11eb_b807_00505698d059 -2020-12-21 10:55:38|ERROR|ERROR: missing data for column "n8" (seg0 slice1 10.152.8.113:7002 pid=3859) +2020-12-21 10:55:38|ERROR|missing data for column "n8" (seg0 slice1 10.152.8.113:7002 pid=3859) CONTEXT: External table ext_gpload_reusable_015f190a_4338_11eb_b807_00505698d059, line 1 of gpfdist://*:pathto/data_file.txt: "aaa|qwer|shjhjg|2012-06-01 15:30:30|1|111|834567|45.67|789.123|7.12345|123.456789" encountered while running INSERT INTO public."texttable1" ("s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8") SELECT "s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8" FROM ext_gpload_reusable_015f190a_4338_11eb_b807_00505698d059 2020-12-21 10:55:38|INFO|rows Inserted = 0 diff --git a/gpMgmt/bin/gpload_test/gpload2/query233.ans b/gpMgmt/bin/gpload_test/gpload2/query233.ans index 1a0ac829822..d2bf87c241d 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query233.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query233.ans @@ -2,7 +2,7 @@ 2021-11-29 11:56:55|INFO|setting schema 'public' for table 'texttable2' 2021-11-29 11:56:55|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30 2021-11-29 11:56:55|INFO|did not find an external table to reuse. creating ext_gpload_reusable_64b5f930_50c8_11ec_ad50_0050569e2380 -2021-11-29 11:56:55|ERROR|could not run SQL "create external table ext_gpload_reusable_64b5f930_50c8_11ec_ad50_0050569e2380("s1" text,"s2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter '|' null 'E''\x08E'\'' escape '\' ) encoding'UTF8' ": ERROR: syntax error at or near "\" +2021-11-29 11:56:55|ERROR|could not run SQL "create external table ext_gpload_reusable_64b5f930_50c8_11ec_ad50_0050569e2380("s1" text,"s2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter '|' null 'E''\x08E'\'' escape '\' ) encoding'UTF8' ": syntax error at or near "\" LINE 1: ....txt') format'text' (delimiter '|' null 'E''\x08E'\'' escape... ^ diff --git a/gpMgmt/bin/gpload_test/gpload2/query241.ans b/gpMgmt/bin/gpload_test/gpload2/query241.ans index 89f88129cec..09f834db98a 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query241.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query241.ans @@ -2,7 +2,7 @@ 2021-01-04 16:30:12|INFO|setting schema 'public' for table 'texttable' 2021-01-04 16:30:12|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30 2021-01-04 16:30:12|INFO|did not find an external table to reuse. creating ext_gpload_reusable_0ff6e3e6_4e67_11eb_9d70_00505698d059 -2021-01-04 16:30:12|ERROR|ERROR: character with byte sequence 0xad 0xe5 in encoding "GBK" has no equivalent in encoding "UTF8" (seg1 slice1 10.152.8.113:7003 pid=20453) +2021-01-04 16:30:12|ERROR|character with byte sequence 0xad 0xe5 in encoding "GBK" has no equivalent in encoding "UTF8" (seg1 slice1 10.152.8.113:7003 pid=20453) CONTEXT: External table ext_gpload_reusable_0ff6e3e6_4e67_11eb_9d70_00505698d059, line 1 of file gpfdist://*:pathto/data_file.txt encountered while running INSERT INTO public."texttable" ("s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7") SELECT "s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7" FROM ext_gpload_reusable_0ff6e3e6_4e67_11eb_9d70_00505698d059 2021-01-04 16:30:12|INFO|rows Inserted = 0 diff --git a/gpMgmt/bin/gpload_test/gpload2/query244.ans b/gpMgmt/bin/gpload_test/gpload2/query244.ans index 4349fd2fa78..316c5b6bb21 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query244.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query244.ans @@ -2,7 +2,7 @@ 2021-01-04 16:35:29|INFO|setting schema 'public' for table 'texttable2' 2021-01-04 16:35:29|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30 2021-01-04 16:35:29|INFO|did not find an external table to reuse. creating ext_gpload_reusable_ccfc0c6e_4e67_11eb_bac2_00505698d059 -2021-01-04 16:35:29|ERROR|ERROR: invalid byte sequence for encoding "UTF8": 0xd6 0xd0 (seg0 slice1 10.152.8.113:7002 pid=21163) +2021-01-04 16:35:29|ERROR|invalid byte sequence for encoding "UTF8": 0xd6 0xd0 (seg0 slice1 10.152.8.113:7002 pid=21163) CONTEXT: External table ext_gpload_reusable_ccfc0c6e_4e67_11eb_bac2_00505698d059, line 1 of file gpfdist://*:pathto/data_file.txt encountered while running INSERT INTO public."texttable2" ("s1","s2") SELECT "s1","s2" FROM ext_gpload_reusable_ccfc0c6e_4e67_11eb_bac2_00505698d059 2021-01-04 16:35:29|INFO|rows Inserted = 0 diff --git a/gpMgmt/bin/gpload_test/gpload2/query259.ans b/gpMgmt/bin/gpload_test/gpload2/query259.ans index 44553764526..da5c847fec1 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query259.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query259.ans @@ -2,7 +2,7 @@ 2021-11-29 11:57:05|INFO|setting schema 'public' for table 'texttable2' 2021-11-29 11:57:05|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30 2021-11-29 11:57:05|INFO|did not find an external table to reuse. creating ext_gpload_reusable_6adbab20_50c8_11ec_94a0_0050569e2380 -2021-11-29 11:57:05|ERROR|could not run SQL "create external table ext_gpload_reusable_6adbab20_50c8_11ec_94a0_0050569e2380("s1" text,"s2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter '|' null '\N' escape '\' newline 'LFCR' ) encoding'UTF8' ": ERROR: invalid value for NEWLINE "LFCR" +2021-11-29 11:57:05|ERROR|could not run SQL "create external table ext_gpload_reusable_6adbab20_50c8_11ec_94a0_0050569e2380("s1" text,"s2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter '|' null '\N' escape '\' newline 'LFCR' ) encoding'UTF8' ": invalid value for NEWLINE "LFCR" HINT: Valid options are: 'LF', 'CRLF' and 'CR'. 2021-11-29 11:57:05|INFO|rows Inserted = 0 diff --git a/gpMgmt/bin/gpload_test/gpload2/query260.ans b/gpMgmt/bin/gpload_test/gpload2/query260.ans index 0019311b50c..8256642f596 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query260.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query260.ans @@ -2,7 +2,7 @@ 2021-11-30 15:00:23|INFO|setting schema 'public' for table 'texttable2' 2021-11-30 15:00:23|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.csv" -t 30 2021-11-30 15:00:23|INFO|did not find an external table to reuse. creating ext_gpload_reusable_30a0dcd0_51ab_11ec_839b_0050569e2380 -2021-11-30 15:00:23|ERROR|could not run SQL "create external table ext_gpload_reusable_30a0dcd0_51ab_11ec_839b_0050569e2380("s1" text,"s2" text)location('gpfdist://*:pathto/data_file.csv') format'csv' (delimiter ',' null '' escape '"' quote '"' newline 'LFCR' ) encoding'UTF8' ": ERROR: invalid value for NEWLINE "LFCR" +2021-11-30 15:00:23|ERROR|could not run SQL "create external table ext_gpload_reusable_30a0dcd0_51ab_11ec_839b_0050569e2380("s1" text,"s2" text)location('gpfdist://*:pathto/data_file.csv') format'csv' (delimiter ',' null '' escape '"' quote '"' newline 'LFCR' ) encoding'UTF8' ": invalid value for NEWLINE "LFCR" HINT: Valid options are: 'LF', 'CRLF' and 'CR'. 2021-11-30 15:00:23|INFO|rows Inserted = 0 diff --git a/gpMgmt/bin/gpload_test/gpload2/query31.ans b/gpMgmt/bin/gpload_test/gpload2/query31.ans index f01dc093e85..e43ec31c74f 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query31.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query31.ans @@ -3,7 +3,7 @@ 2018-07-20 09:06:30|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30 2018-07-20 09:06:30|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_9faa546d615fa55cc3e9e2cee6f130b0 2018-07-20 09:06:30|INFO|reusing external table ext_gpload_reusable_30024be2_8bfc_11e8_83d4_0242ac110002 -2018-07-20 09:06:30|ERROR|ERROR: column "n8" does not exist +2018-07-20 09:06:30|ERROR|column "n8" does not exist LINE 1: ..."s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8" FROM ... ^ encountered while running INSERT INTO staging_gpload_reusable_9faa546d615fa55cc3e9e2cee6f130b0 ("s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8") SELECT "s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8" FROM ext_gpload_reusable_30024be2_8bfc_11e8_83d4_0242ac110002 @@ -16,7 +16,7 @@ LINE 1: ..."s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8" FROM ... 2018-07-20 09:06:30|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30 2018-07-20 09:06:30|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_9faa546d615fa55cc3e9e2cee6f130b0 2018-07-20 09:06:30|INFO|reusing external table ext_gpload_reusable_30024be2_8bfc_11e8_83d4_0242ac110002 -2018-07-20 09:06:30|ERROR|ERROR: column "n8" does not exist +2018-07-20 09:06:30|ERROR|column "n8" does not exist LINE 1: ..."s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8" FROM ... ^ encountered while running INSERT INTO staging_gpload_reusable_9faa546d615fa55cc3e9e2cee6f130b0 ("s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8") SELECT "s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8" FROM ext_gpload_reusable_30024be2_8bfc_11e8_83d4_0242ac110002 diff --git a/gpMgmt/bin/gpload_test/gpload2/query312.ans b/gpMgmt/bin/gpload_test/gpload2/query312.ans index 84bea6cc455..030f88c59de 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query312.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query312.ans @@ -2,7 +2,7 @@ 2020-12-17 15:57:28|INFO|setting schema 'public' for table 'texttable' 2020-12-17 15:57:28|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30 2020-12-17 15:57:28|INFO|did not find an external table to reuse. creating ext_gpload_reusable_823e0c46_403d_11eb_ba00_000c299afcc5 -2020-12-17 15:57:29|ERROR|ERROR: segment reject limit reached, aborting operation (seg0 slice1 127.0.0.1:6000 pid=3953) +2020-12-17 15:57:29|ERROR|segment reject limit reached, aborting operation (seg0 slice1 127.0.0.1:6000 pid=3953) DETAIL: Last error was: invalid input syntax for type smallint: "invalid string", column n1 CONTEXT: External table ext_gpload_reusable_823e0c46_403d_11eb_ba00_000c299afcc5, line 4 of gpfdist://*:pathto/data_file.txt, column n1 encountered while running INSERT INTO public."texttable" ("s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7") SELECT "s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7" FROM ext_gpload_reusable_823e0c46_403d_11eb_ba00_000c299afcc5 diff --git a/gpMgmt/bin/gpload_test/gpload2/query37.ans b/gpMgmt/bin/gpload_test/gpload2/query37.ans index c51d7cdcd7d..b0050f24435 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query37.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query37.ans @@ -5,7 +5,7 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur 2021-11-29 11:57:32|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30 2021-11-29 11:57:32|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_afbaac0da7ced19791c9ab9c537f41d3 2021-11-29 11:57:32|INFO|did not find an external table to reuse. creating ext_gpload_reusable_7ac93e9e_50c8_11ec_821e_0050569e2380 -2021-11-29 11:57:32|ERROR|could not run SQL "create external table ext_gpload_reusable_7ac93e9e_50c8_11ec_821e_0050569e2380("s1" text,"s2" text,"s3" text,"dt" timestamp without time zone,"n1" smallint,"n2" integer,"n3" bigint,"n4" numeric,"n5" numeric,"n6" real,"n7" double precision)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter '|' null '\N' escape '\' ) encoding'xxxx' ": ERROR: xxxx is not a valid encoding name +2021-11-29 11:57:32|ERROR|could not run SQL "create external table ext_gpload_reusable_7ac93e9e_50c8_11ec_821e_0050569e2380("s1" text,"s2" text,"s3" text,"dt" timestamp without time zone,"n1" smallint,"n2" integer,"n3" bigint,"n4" numeric,"n5" numeric,"n6" real,"n7" double precision)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter '|' null '\N' escape '\' ) encoding'xxxx' ": xxxx is not a valid encoding name 2021-11-29 11:57:32|INFO|rows Inserted = 0 2021-11-29 11:57:32|INFO|rows Updated = 0 @@ -18,7 +18,7 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur 2021-11-29 11:57:32|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30 2021-11-29 11:57:32|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_afbaac0da7ced19791c9ab9c537f41d3 2021-11-29 11:57:32|INFO|did not find an external table to reuse. creating ext_gpload_reusable_7ae5d7e8_50c8_11ec_9979_0050569e2380 -2021-11-29 11:57:32|ERROR|could not run SQL "create external table ext_gpload_reusable_7ae5d7e8_50c8_11ec_9979_0050569e2380("s1" text,"s2" text,"s3" text,"dt" timestamp without time zone,"n1" smallint,"n2" integer,"n3" bigint,"n4" numeric,"n5" numeric,"n6" real,"n7" double precision)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter '|' null '\N' escape '\' ) encoding'xxxx' ": ERROR: xxxx is not a valid encoding name +2021-11-29 11:57:32|ERROR|could not run SQL "create external table ext_gpload_reusable_7ae5d7e8_50c8_11ec_9979_0050569e2380("s1" text,"s2" text,"s3" text,"dt" timestamp without time zone,"n1" smallint,"n2" integer,"n3" bigint,"n4" numeric,"n5" numeric,"n6" real,"n7" double precision)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter '|' null '\N' escape '\' ) encoding'xxxx' ": xxxx is not a valid encoding name 2021-11-29 11:57:32|INFO|rows Inserted = 0 2021-11-29 11:57:32|INFO|rows Updated = 0 diff --git a/gpMgmt/bin/gpload_test/gpload2/query402.ans b/gpMgmt/bin/gpload_test/gpload2/query402.ans index b2b699b307e..9ec5a34c349 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query402.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query402.ans @@ -2,7 +2,7 @@ 2021-11-30 15:17:58|INFO|setting schema 'public' for table 'texttable' 2021-11-30 15:17:58|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30 2021-11-30 15:17:58|INFO|did not find an external table to reuse. creating non_ext_schema_test.ext_gpload_reusable_a53b654a_51ad_11ec_b1b3_0050569e2380 -2021-11-30 15:17:58|ERROR|could not run SQL "create external table non_ext_schema_test.ext_gpload_reusable_a53b654a_51ad_11ec_b1b3_0050569e2380("s1" text,"s2" text,"s3" text,"dt" timestamp without time zone,"n1" smallint,"n2" integer,"n3" bigint,"n4" numeric,"n5" numeric,"n6" real,"n7" double precision)location('gpfdist://*:pathto/data_file.txt') format'text' (delimiter '|' null '\N' escape '\' ) encoding'UTF8' ": ERROR: schema "non_ext_schema_test" does not exist +2021-11-30 15:17:58|ERROR|could not run SQL "create external table non_ext_schema_test.ext_gpload_reusable_a53b654a_51ad_11ec_b1b3_0050569e2380("s1" text,"s2" text,"s3" text,"dt" timestamp without time zone,"n1" smallint,"n2" integer,"n3" bigint,"n4" numeric,"n5" numeric,"n6" real,"n7" double precision)location('gpfdist://*:pathto/data_file.txt') format'text' (delimiter '|' null '\N' escape '\' ) encoding'UTF8' ": schema "non_ext_schema_test" does not exist 2021-11-30 15:17:58|INFO|rows Inserted = 0 2021-11-30 15:17:58|INFO|rows Updated = 0 diff --git a/gpMgmt/bin/gpload_test/gpload2/query497.ans b/gpMgmt/bin/gpload_test/gpload2/query497.ans index 8a9dd85417e..9d291185652 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query497.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query497.ans @@ -5,7 +5,10 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur 2021-01-04 16:55:52|INFO|started gpfdist -p 8081 -P 8082 -f "/home/cc/repo/gpdb/gpMgmt/bin/gpload_test/gpload2/data/external_file_01.txt" -t 30 2021-01-04 16:55:52|INFO|reusing staging table STAGING_GPLOAD_REUSABLE 2021-01-04 16:55:52|INFO|did not find an external table to reuse. creating ext_gpload_reusable_a651e6f8_4e6a_11eb_b8a4_7085c2381836 -2021-01-04 16:55:53|ERROR|unexpected error -- backtrace written to log file +2021-01-04 16:55:52|ERROR|column "non_col" does not exist +LINE 1: ...ble."s1" and into_table."s2"=from_table."s2" and non_col = ... + ^ + encountered while running update public."texttable" into_table set "n2"=from_table."n2" from staging_gpload_reusable_4b4814f7db18b678f1605a0caec3c1fe from_table where into_table."n1"=from_table."n1" and into_table."s1"=from_table."s1" and into_table."s2"=from_table."s2" and non_col = 5 2021-01-04 16:55:53|INFO|rows Inserted = 0 2021-01-04 16:55:53|INFO|rows Updated = 0 2021-01-04 16:55:53|INFO|data formatting errors = 0 diff --git a/gpMgmt/bin/gpload_test/gpload2/query522.ans b/gpMgmt/bin/gpload_test/gpload2/query522.ans index 5114fb13fad..79634a5feaa 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query522.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query522.ans @@ -7,7 +7,7 @@ CREATE TABLE 2021-01-07 16:25:52|INFO|setting schema 'public' for table 'mapping_test' 2021-01-07 16:25:52|INFO|started gpfdist -p 8081 -P 8082 -f "/home/cc/repo/gpdb/gpMgmt/bin/gpload_test/gpload2/data/column_mapping_01.txt" -t 30 2021-01-07 16:25:52|INFO|did not find an external table to reuse. creating ext_gpload_reusable_f42535ca_50c1_11eb_a32e_7085c2381836 -2021-01-07 16:25:52|ERROR|ERROR: column "n3" does not exist +2021-01-07 16:25:52|ERROR|column "n3" does not exist LINE 1: ...blic."mapping_test" ("s1","s2","s3") SELECT c1,c2,n3 FROM ex... ^ HINT: Perhaps you meant to reference the column "ext_gpload_reusable_f42535ca_50c1_11eb_a32e_7085c2381836.c3" or the column "mapping_test.s3". diff --git a/gpMgmt/bin/gpload_test/gpload2/query523.ans b/gpMgmt/bin/gpload_test/gpload2/query523.ans index 2faf394b2a4..e9b6bff5bd8 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query523.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query523.ans @@ -7,7 +7,7 @@ CREATE TABLE 2021-01-07 16:26:05|INFO|setting schema 'public' for table 'mapping_test' 2021-01-07 16:26:06|INFO|started gpfdist -p 8081 -P 8082 -f "/home/cc/repo/gpdb/gpMgmt/bin/gpload_test/gpload2/data/column_mapping_01.txt" -t 30 2021-01-07 16:26:06|INFO|did not find an external table to reuse. creating ext_gpload_reusable_fc7440a4_50c1_11eb_a0a7_7085c2381836 -2021-01-07 16:26:06|ERROR|ERROR: column "s4" is of type integer but expression is of type text +2021-01-07 16:26:06|ERROR|column "s4" is of type integer but expression is of type text LINE 1: ...blic."mapping_test" ("s2","s3","s4") SELECT c2,c3,c1 FROM ex... ^ HINT: You will need to rewrite or cast the expression. diff --git a/gpMgmt/bin/gpload_test/gpload2/query529.ans b/gpMgmt/bin/gpload_test/gpload2/query529.ans index 092c9478e23..4f10bb12c7b 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query529.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query529.ans @@ -7,7 +7,7 @@ CREATE TABLE 2021-01-07 16:37:32|INFO|setting schema 'public' for table 'mapping_test' 2021-01-07 16:37:32|INFO|started gpfdist -p 8081 -P 8082 -f "/home/cc/repo/gpdb/gpMgmt/bin/gpload_test/gpload2/data/column_mapping_01.txt" -t 30 2021-01-07 16:37:32|INFO|did not find an external table to reuse. creating ext_gpload_reusable_959774bc_50c3_11eb_b301_7085c2381836 -2021-01-07 16:37:32|ERROR|ERROR: function rocket_bites(unknown) does not exist +2021-01-07 16:37:32|ERROR|function rocket_bites(unknown) does not exist LINE 1: INSERT INTO public."mapping_test" ("s1") SELECT rocket_bites... ^ HINT: No function matches the given name and argument types. You might need to add explicit type casts. diff --git a/gpMgmt/bin/gpload_test/gpload2/query532.ans b/gpMgmt/bin/gpload_test/gpload2/query532.ans index 98d7ea1ee9a..244ad2f3be6 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query532.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query532.ans @@ -12,7 +12,7 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur 2021-01-07 17:38:14|INFO|started gpfdist -p 8081 -P 8082 -f "/home/cc/repo/gpdb/gpMgmt/bin/gpload_test/gpload2/data/column_mapping_01.txt" -t 30 2021-01-07 17:38:14|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_edcb757d70ae1c70cdd2f7d15496f54b 2021-01-07 17:38:14|INFO|did not find an external table to reuse. creating ext_gpload_reusable_102fae3a_50cc_11eb_b6c8_7085c2381836 -2021-01-07 17:38:14|ERROR|ERROR: column "s4" is of type integer but expression is of type text +2021-01-07 17:38:14|ERROR|column "s4" is of type integer but expression is of type text LINE 1: ...5c64da950cfbc41ff55 ("s1","s2","s4") SELECT c1,c3,c2 FROM ex... ^ HINT: You will need to rewrite or cast the expression. diff --git a/gpMgmt/bin/gpload_test/gpload2/query533.ans b/gpMgmt/bin/gpload_test/gpload2/query533.ans index 809573e625a..64d947128fa 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query533.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query533.ans @@ -12,7 +12,7 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur 2021-01-07 17:38:26|INFO|started gpfdist -p 8081 -P 8082 -f "/home/cc/repo/gpdb/gpMgmt/bin/gpload_test/gpload2/data/column_mapping_01.txt" -t 30 2021-01-07 17:38:26|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_edcb757d70ae1c70cdd2f7d15496f54b 2021-01-07 17:38:26|INFO|did not find an external table to reuse. creating ext_gpload_reusable_1777c5e2_50cc_11eb_bb05_7085c2381836 -2021-01-07 17:38:26|ERROR|ERROR: column "s4" is of type integer but expression is of type text +2021-01-07 17:38:26|ERROR|column "s4" is of type integer but expression is of type text LINE 1: ...5c64da950cfbc41ff55 ("s1","s2","s4") SELECT c1,c3,c2 FROM ex... ^ HINT: You will need to rewrite or cast the expression. diff --git a/gpMgmt/bin/gpload_test/gpload2/query548.ans b/gpMgmt/bin/gpload_test/gpload2/query548.ans new file mode 100644 index 00000000000..0933a693444 --- /dev/null +++ b/gpMgmt/bin/gpload_test/gpload2/query548.ans @@ -0,0 +1,13 @@ +2023-09-11 11:05:29|INFO|gpload session started 2023-09-11 11:05:29 +2023-09-11 11:05:29|INFO|setting schema 'public' for table 'csvtable' +2023-09-11 11:05:29|INFO|started gpfdist -p 8081 -P 8082 -f "/home/v/workspace/gpdb7/gpMgmt/bin/gpload_test/gpload2/data_file.csv" -t 30 +2023-09-11 11:05:29|INFO|running time: 0.09 seconds +2023-09-11 11:05:29|INFO|rows Inserted = 2 +2023-09-11 11:05:29|INFO|rows Updated = 0 +2023-09-11 11:05:29|INFO|data formatting errors = 0 +2023-09-11 11:05:29|INFO|gpload succeeded + count +------- + 2 +(1 row) + diff --git a/gpMgmt/bin/gpload_test/gpload2/query60.ans b/gpMgmt/bin/gpload_test/gpload2/query60.ans index 2d1c9dc10bf..673ff09913b 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query60.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query60.ans @@ -20,7 +20,7 @@ 2020-12-07 09:57:17|INFO|setting schema 'public' for table 'texttable' 2020-12-07 09:57:17|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30 2020-12-07 09:57:17|INFO|did not find an external table to reuse. creating ext_gpload_reusable_89036be0_382f_11eb_95c3_00505698707d -2020-12-07 09:57:18|ERROR|ERROR: connection with gpfdist failed for "gpfdist://*:pathto/data_file.txt", effective url: "http://*:pathto/data_file.txt": error code = 111 (Connection refused); (seg1 slice1 10.152.8.160:7003 pid=4267) +2020-12-07 09:57:18|ERROR|connection with gpfdist failed for "gpfdist://*:pathto/data_file.txt", effective url: "http://*:pathto/data_file.txt": error code = 111 (Connection refused); (seg1 slice1 10.152.8.160:7003 pid=4267) encountered while running INSERT INTO public."texttable" ("s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7") SELECT "s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7" FROM ext_gpload_reusable_89036be0_382f_11eb_95c3_00505698707d 2020-12-07 09:57:18|INFO|rows Inserted = 0 2020-12-07 09:57:18|INFO|rows Updated = 0 diff --git a/gpMgmt/bin/gpload_test/gpload2/query604.ans b/gpMgmt/bin/gpload_test/gpload2/query604.ans index a245c1a586b..a8fb8e49529 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query604.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query604.ans @@ -1,8 +1,8 @@ 2021-01-17 20:28:18|INFO|gpload session started 2021-01-17 20:28:18 2021-01-17 20:28:18|INFO|setting schema 'public' for table 'texttable' 2021-01-17 20:28:18|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/temp/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30 -2021-01-17 20:28:18|INFO|reusing external staging table staging_test -2021-01-17 20:28:18|ERROR|ERROR: column "n8" does not exist +2021-01-17 20:28:18|INFO|reusing external staging table "STAGING_test" +2021-01-17 20:28:18|ERROR|column "n8" does not exist LINE 1: ..."s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8" FROM ... ^ HINT: There is a column named "n8" in table "texttable", but it cannot be referenced from this part of the query. diff --git a/gpMgmt/bin/gpload_test/gpload2/query65.ans b/gpMgmt/bin/gpload_test/gpload2/query65.ans index 574bc77fb24..442d8ce5e12 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query65.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query65.ans @@ -2,7 +2,7 @@ 2021-01-08 16:05:19|INFO|setting schema 'public' for table 'texttable' 2021-01-08 16:05:19|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt pathto/data_file1.txt pathto/data_file99.txt" -t 30 2021-01-08 16:05:19|INFO|did not find an external table to reuse. creating ext_gpload_reusable_3fe4da80_5188_11eb_bc9e_00505698707d -2021-01-08 16:05:20|ERROR|ERROR: http response code 404 from gpfdist (gpfdist://*:pathto/data_file.txt%pathto/data_file1.txt%pathto/data_file99.txt): HTTP/1.0 404 file not found (seg0 slice1 10.152.8.160:7002 pid=18998) +2021-01-08 16:05:20|ERROR|http response code 404 from gpfdist (gpfdist://*:pathto/data_file.txt%pathto/data_file1.txt%pathto/data_file99.txt): HTTP/1.0 404 file not found (seg0 slice1 10.152.8.160:7002 pid=18998) encountered while running INSERT INTO public."texttable" ("s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7") SELECT "s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7" FROM ext_gpload_reusable_3fe4da80_5188_11eb_bc9e_00505698707d 2021-01-08 16:05:20|INFO|rows Inserted = 0 2021-01-08 16:05:20|INFO|rows Updated = 0 diff --git a/gpMgmt/bin/gpload_test/gpload2/query652.ans b/gpMgmt/bin/gpload_test/gpload2/query652.ans index 65314566d42..e0904221073 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query652.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query652.ans @@ -1,7 +1,7 @@ 2021-01-04 19:52:14|INFO|gpload session started 2021-01-04 19:52:14 2021-01-04 19:52:14|INFO|setting schema 'public' for table 'texttable_652' 2021-01-04 19:52:14|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data/external_file_01.txt" -t 30 -2021-01-04 19:52:14|ERROR|could not execute SQL in sql:before "INSERT INTO test_652 VALUES(1)": ERROR: relation "test_652" does not exist +2021-01-04 19:52:14|ERROR|could not execute SQL in sql:before "INSERT INTO test_652 VALUES(1)": relation "test_652" does not exist LINE 1: INSERT INTO test_652 VALUES(1) ^ diff --git a/gpMgmt/bin/gpload_test/gpload2/query662.ans b/gpMgmt/bin/gpload_test/gpload2/query662.ans index d88ee406f42..9eb0144afd1 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query662.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query662.ans @@ -2,7 +2,7 @@ 2021-01-11 18:03:26|INFO|setting schema 'public' for table 'texttable_662' 2021-01-11 18:03:26|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data/external_file_01.txt" -t 30 2021-01-11 18:03:26|INFO|did not find an external table to reuse. creating ext_gpload_reusable_3f87f84c_53f4_11eb_8b61_005056983e1a -2021-01-11 18:03:26|ERROR|could not execute SQL in sql:after "INSERT INTO test_662 VALUES(1)": ERROR: relation "test_662" does not exist +2021-01-11 18:03:26|ERROR|could not execute SQL in sql:after "INSERT INTO test_662 VALUES(1)": relation "test_662" does not exist LINE 1: INSERT INTO test_662 VALUES(1) ^ diff --git a/gpMgmt/bin/gpload_test/gpload2/query664.ans b/gpMgmt/bin/gpload_test/gpload2/query664.ans index 794c289b470..bffb8338e58 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query664.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query664.ans @@ -1,7 +1,7 @@ 2021-01-11 18:56:05|INFO|gpload session started 2021-01-11 18:56:05 2021-01-11 18:56:05|INFO|setting schema 'public' for table 'texttable_664' 2021-01-11 18:56:05|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data/external_file_01.txt" -t 30 -2021-01-11 18:56:05|ERROR|could not execute SQL in sql:before "INSERT INTO test_664_before VALUES('a')": ERROR: invalid input syntax for type integer: "a" +2021-01-11 18:56:05|ERROR|could not execute SQL in sql:before "INSERT INTO test_664_before VALUES('a')": invalid input syntax for type integer: "a" LINE 1: INSERT INTO test_664_before VALUES('a') ^ diff --git a/gpMgmt/bin/gpload_test/gpload2/query665.ans b/gpMgmt/bin/gpload_test/gpload2/query665.ans index 8a005605cca..c634c40d808 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query665.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query665.ans @@ -2,7 +2,7 @@ 2021-01-11 18:56:59|INFO|setting schema 'public' for table 'texttable_665' 2021-01-11 18:56:59|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data/external_file_01.txt" -t 30 2021-01-11 18:56:59|INFO|did not find an external table to reuse. creating ext_gpload_reusable_ba362bb6_53fb_11eb_a8b6_005056983e1a -2021-01-11 18:56:59|ERROR|could not execute SQL in sql:after "INSERT INTO test_665_after VALUES('a')": ERROR: invalid input syntax for type integer: "a" +2021-01-11 18:56:59|ERROR|could not execute SQL in sql:after "INSERT INTO test_665_after VALUES('a')": invalid input syntax for type integer: "a" LINE 1: INSERT INTO test_665_after VALUES('a') ^ diff --git a/gpMgmt/bin/gpload_test/gpload2/query666.ans b/gpMgmt/bin/gpload_test/gpload2/query666.ans index a2ebf47574a..85e34e6e217 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query666.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query666.ans @@ -1,7 +1,7 @@ 2021-01-11 19:18:24|INFO|gpload session started 2021-01-11 19:18:24 2021-01-11 19:18:24|INFO|setting schema 'public' for table 'texttable_666' 2021-01-11 19:18:24|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data/external_file_01.txt" -t 30 -2021-01-11 19:18:24|ERROR|could not execute SQL in sql:before "INSERT INTO test_666_before VALUES('a')": ERROR: invalid input syntax for type integer: "a" +2021-01-11 19:18:24|ERROR|could not execute SQL in sql:before "INSERT INTO test_666_before VALUES('a')": invalid input syntax for type integer: "a" LINE 1: INSERT INTO test_666_before VALUES('a') ^ diff --git a/gpMgmt/bin/gpload_test/gpload2/query667.ans b/gpMgmt/bin/gpload_test/gpload2/query667.ans index 89653613c34..205403e8ce2 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query667.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query667.ans @@ -5,7 +5,7 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur 2021-01-11 19:20:03|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data/external_file_01.txt" -t 30 2021-01-11 19:20:03|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_08aff1d5e0be087569323178726e90f6 2021-01-11 19:20:03|INFO|did not find an external table to reuse. creating ext_gpload_reusable_f39a934e_53fe_11eb_898a_005056983e1a -2021-01-11 19:20:04|ERROR|could not execute SQL in sql:after "INSERT INTO test_667_after VALUES('a')": ERROR: invalid input syntax for type integer: "a" +2021-01-11 19:20:04|ERROR|could not execute SQL in sql:after "INSERT INTO test_667_after VALUES('a')": invalid input syntax for type integer: "a" LINE 1: INSERT INTO test_667_after VALUES('a') ^ diff --git a/gpMgmt/bin/gpload_test/gpload2/query68.ans b/gpMgmt/bin/gpload_test/gpload2/query68.ans index 808eff402fd..fcd3dfde0ff 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query68.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query68.ans @@ -5,7 +5,7 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur 2021-01-08 16:05:22|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file2.txt" -t 30 2021-01-08 16:05:22|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_a1101b5024707ea34f55e778f329e548 2021-01-08 16:05:22|INFO|did not find an external table to reuse. creating ext_gpload_reusable_418b1b42_5188_11eb_93db_00505698707d -2021-01-08 16:05:22|ERROR|ERROR: column "Field1" is of type bigint but expression is of type text +2021-01-08 16:05:22|ERROR|column "Field1" is of type bigint but expression is of type text LINE 1: ...bb31496d7e9a13bd29b90 ("Field1","Field#2") SELECT "Field1","... ^ HINT: You will need to rewrite or cast the expression. diff --git a/gpMgmt/bin/gpload_test/gpload2/query69.ans b/gpMgmt/bin/gpload_test/gpload2/query69.ans index c157448556b..fce4bb8c984 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query69.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query69.ans @@ -5,7 +5,7 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur 2021-01-08 16:28:20|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file2.txt" -t 30 2021-01-08 16:28:20|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_a1101b5024707ea34f55e778f329e548 2021-01-08 16:28:20|INFO|did not find an external table to reuse. creating ext_gpload_reusable_770f4452_518b_11eb_98a2_00505698707d -2021-01-08 16:28:20|ERROR|ERROR: column "Field1" is of type bigint but expression is of type text +2021-01-08 16:28:20|ERROR|column "Field1" is of type bigint but expression is of type text LINE 1: ...bb31496d7e9a13bd29b90 ("Field1","Field#2") SELECT "Field1","... ^ HINT: You will need to rewrite or cast the expression. diff --git a/gpMgmt/bin/gpload_test/gpload2/query75.ans b/gpMgmt/bin/gpload_test/gpload2/query75.ans index dd5b01b3add..9bfe7c6edf0 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query75.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query75.ans @@ -14,7 +14,11 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur 2021-08-10 14:56:46|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30 2021-08-10 14:56:46|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_5171458efa83aaf8c5bc7004bae85d5b 2021-08-10 14:56:46|INFO|reusing external table ext_gpload_reusable_2092c476_f9a8_11eb_b503_0050569e2380 -2021-08-10 14:56:46|ERROR|unexpected error -- backtrace written to log file +2021-08-10 14:56:46|ERROR|column "列" does not exist +LINE 1: ..." FROM (SELECT *, row_number() OVER (PARTITION BY 列#2) AS g... + ^ +HINT: Perhaps you meant to reference the column "staging_gpload_reusable_77874e55aae34d59751eb574ff0f5cf7.列1" or the column "chinese表.列1". + encountered while running INSERT INTO public."chinese表" ("列1","列#2","lie3") (SELECT from_table."列1",from_table."列#2",from_table."lie3" FROM (SELECT *, row_number() OVER (PARTITION BY 列#2) AS gpload_row_number FROM staging_gpload_reusable_77874e55aae34d59751eb574ff0f5cf7) AS from_table WHERE gpload_row_number=1) 2021-08-10 14:56:46|INFO|rows Inserted = 0 2021-08-10 14:56:46|INFO|rows Updated = 8 2021-08-10 14:56:46|INFO|data formatting errors = 0 @@ -33,7 +37,11 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur 2021-08-10 14:56:46|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30 2021-08-10 14:56:46|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_5171458efa83aaf8c5bc7004bae85d5b 2021-08-10 14:56:46|INFO|reusing external table ext_gpload_reusable_2092c476_f9a8_11eb_b503_0050569e2380 -2021-08-10 14:56:46|ERROR|unexpected error -- backtrace written to log file +2021-08-10 14:56:46|ERROR|column "列" does not exist +LINE 1: ..." FROM (SELECT *, row_number() OVER (PARTITION BY 列#2) AS g... + ^ +HINT: Perhaps you meant to reference the column "staging_gpload_reusable_77874e55aae34d59751eb574ff0f5cf7.列1" or the column "chinese表.列1". + encountered while running INSERT INTO public."chinese表" ("列1","列#2","lie3") (SELECT from_table."列1",from_table."列#2",from_table."lie3" FROM (SELECT *, row_number() OVER (PARTITION BY 列#2) AS gpload_row_number FROM staging_gpload_reusable_77874e55aae34d59751eb574ff0f5cf7) AS from_table WHERE gpload_row_number=1) 2021-08-10 14:56:46|INFO|rows Inserted = 0 2021-08-10 14:56:46|INFO|rows Updated = 8 2021-08-10 14:56:46|INFO|data formatting errors = 0 diff --git a/gpMgmt/bin/gpload_test/gpload2/query76.ans b/gpMgmt/bin/gpload_test/gpload2/query76.ans index cb958591c94..26872cf30e8 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query76.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query76.ans @@ -3,18 +3,12 @@ LINE 13: and pgext.fmtopts like '%delimiter '';'' nu... ^ HINT: Use the escape string syntax for backslashes, e.g., E'\\'. WARNING: nonstandard use of escape in a string literal -LINE 1: ...pathto/data_file.txt') format'text' (delimiter ';' null '\N' escap... - ^ -HINT: Use the escape string syntax for escapes, e.g., E'\r\n'. WARNING: nonstandard use of \' in a string literal -LINE 1: ...xt') format'text' (delimiter ';' null '\N' escape '\' ) enco... - ^ -HINT: Use '' to write quotes in strings, or use the escape string syntax (E'...'). 2021-11-29 15:29:03|INFO|gpload session started 2021-11-29 15:29:03 2021-11-29 15:29:03|INFO|setting schema 'public' for table 'chinese表' 2021-11-29 15:29:03|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30 2021-11-29 15:29:03|INFO|did not find an external table to reuse. creating ext_gpload_reusable_075bc846_50e6_11ec_8cb7_0050569e2380 -2021-11-29 15:29:03|ERROR|could not run SQL "create external table ext_gpload_reusable_075bc846_50e6_11ec_8cb7_0050569e2380("列1" text,"列#2" int,"lie3" timestamp)location('gpfdist://*:pathto/data_file.txt') format'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' ": ERROR: syntax error at or near "UTF8" +2021-11-29 15:29:03|ERROR|could not run SQL "create external table ext_gpload_reusable_075bc846_50e6_11ec_8cb7_0050569e2380("列1" text,"列#2" int,"lie3" timestamp)location('gpfdist://*:pathto/data_file.txt') format'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' ": syntax error at or near "UTF8" LINE 1: ...'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' ^ @@ -41,7 +35,7 @@ HINT: Use the escape string syntax for backslashes, e.g., E'\\'. 2021-11-29 15:29:04|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30 2021-11-29 15:29:04|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_5171458efa83aaf8c5bc7004bae85d5b 2021-11-29 15:29:04|INFO|did not find an external table to reuse. creating ext_gpload_reusable_07a3c70e_50e6_11ec_9873_0050569e2380 -2021-11-29 15:29:04|ERROR|could not run SQL "create external table ext_gpload_reusable_07a3c70e_50e6_11ec_9873_0050569e2380(列1 text,列#2 int,lie3 timestamp)location('gpfdist://*:pathto/data_file.txt') format'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' ": ERROR: syntax error at or near "#" +2021-11-29 15:29:04|ERROR|could not run SQL "create external table ext_gpload_reusable_07a3c70e_50e6_11ec_9873_0050569e2380(列1 text,列#2 int,lie3 timestamp)location('gpfdist://*:pathto/data_file.txt') format'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' ": syntax error at or near "#" LINE 1: ...(列1 text,列#2 int,lie... ^ diff --git a/gpMgmt/bin/gpload_test/gpload2/query77.ans b/gpMgmt/bin/gpload_test/gpload2/query77.ans index cfeacc66960..02d530e7eaa 100644 --- a/gpMgmt/bin/gpload_test/gpload2/query77.ans +++ b/gpMgmt/bin/gpload_test/gpload2/query77.ans @@ -3,18 +3,12 @@ LINE 13: and pgext.fmtopts like '%delimiter '';'' nu... ^ HINT: Use the escape string syntax for backslashes, e.g., E'\\'. WARNING: nonstandard use of escape in a string literal -LINE 1: .../data_file.txt') format'text' (delimiter ';' null '\N' escap... - ^ -HINT: Use the escape string syntax for escapes, e.g., E'\r\n'. WARNING: nonstandard use of \' in a string literal -LINE 1: ...xt') format'text' (delimiter ';' null '\N' escape '\' ) enco... - ^ -HINT: Use '' to write quotes in strings, or use the escape string syntax (E'...'). 2021-11-29 11:57:13|INFO|gpload session started 2021-11-29 11:57:13 2021-11-29 11:57:13|INFO|setting schema 'public' for table 'testspecialchar' 2021-11-29 11:57:13|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30 2021-11-29 11:57:13|INFO|did not find an external table to reuse. creating ext_gpload_reusable_6f40f210_50c8_11ec_89e8_0050569e2380 -2021-11-29 11:57:13|ERROR|could not run SQL "create external table ext_gpload_reusable_6f40f210_50c8_11ec_89e8_0050569e2380("Field1" text,"Field#2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' ": ERROR: syntax error at or near "UTF8" +2021-11-29 11:57:13|ERROR|could not run SQL "create external table ext_gpload_reusable_6f40f210_50c8_11ec_89e8_0050569e2380("Field1" text,"Field#2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' ": syntax error at or near "UTF8" LINE 1: ...'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' ^ @@ -22,26 +16,18 @@ LINE 1: ...'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' 2021-11-29 11:57:13|INFO|rows Updated = 0 2021-11-29 11:57:13|INFO|data formatting errors = 0 2021-11-29 11:57:13|INFO|gpload failed -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'Field1' as the Apache Cloudberry data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. WARNING: nonstandard use of \\ in a string literal LINE 13: and pgext.fmtopts like '%delimiter '';'' nu... ^ HINT: Use the escape string syntax for backslashes, e.g., E'\\'. WARNING: nonstandard use of escape in a string literal -LINE 1: .../data_file.txt') format'text' (delimiter ';' null '\N' escap... - ^ -HINT: Use the escape string syntax for escapes, e.g., E'\r\n'. WARNING: nonstandard use of \' in a string literal -LINE 1: ...xt') format'text' (delimiter ';' null '\N' escape '\' ) enco... - ^ -HINT: Use '' to write quotes in strings, or use the escape string syntax (E'...'). 2021-11-29 11:57:13|INFO|gpload session started 2021-11-29 11:57:13 2021-11-29 11:57:13|INFO|setting schema 'public' for table 'testspecialchar' 2021-11-29 11:57:13|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30 2021-11-29 11:57:13|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_40df9a45044f2d17b97f89bbbc58f24f 2021-11-29 11:57:13|INFO|did not find an external table to reuse. creating ext_gpload_reusable_6f5c6568_50c8_11ec_ae2f_0050569e2380 -2021-11-29 11:57:13|ERROR|could not run SQL "create external table ext_gpload_reusable_6f5c6568_50c8_11ec_ae2f_0050569e2380("Field1" text,"Field#2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' ": ERROR: syntax error at or near "UTF8" +2021-11-29 11:57:13|ERROR|could not run SQL "create external table ext_gpload_reusable_6f5c6568_50c8_11ec_ae2f_0050569e2380("Field1" text,"Field#2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' ": syntax error at or near "UTF8" LINE 1: ...'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' ^ diff --git a/gpMgmt/bin/gpload_test/gpload2/setup.ans b/gpMgmt/bin/gpload_test/gpload2/setup.ans index ea4eeaeb625..386cdd1e1f8 100644 --- a/gpMgmt/bin/gpload_test/gpload2/setup.ans +++ b/gpMgmt/bin/gpload_test/gpload2/setup.ans @@ -5,6 +5,10 @@ CREATE DATABASE You are now connected to database "reuse_gptest" as user "gpadmin". CREATE SCHEMA test; CREATE SCHEMA +CREATE SCHEMA "EXT_schema_test"; +CREATE SCHEMA +CREATE SCHEMA "spiegelungssätze"; +CREATE SCHEMA set client_min_messages='warning'; SET DROP EXTERNAL TABLE IF EXISTS temp_gpload_staging_table; diff --git a/gpMgmt/bin/gpload_test/gpload2/setup.sql b/gpMgmt/bin/gpload_test/gpload2/setup.sql index 37bd494cafc..75b1d47fced 100644 --- a/gpMgmt/bin/gpload_test/gpload2/setup.sql +++ b/gpMgmt/bin/gpload_test/gpload2/setup.sql @@ -5,6 +5,8 @@ CREATE DATABASE reuse_gptest; \c reuse_gptest CREATE SCHEMA test; +CREATE SCHEMA "EXT_schema_test"; +CREATE SCHEMA "spiegelungssätze"; set client_min_messages='warning'; DROP EXTERNAL TABLE IF EXISTS temp_gpload_staging_table; diff --git a/gpMgmt/bin/gpmemwatcher b/gpMgmt/bin/gpmemwatcher index 6569015bc09..ecca0eac168 100755 --- a/gpMgmt/bin/gpmemwatcher +++ b/gpMgmt/bin/gpmemwatcher @@ -136,7 +136,7 @@ def launchProcess(host, workdir): # Now let's just quick check the host as to whether the python version is >= 2.6 try: - subprocess.check_call("ssh -T %s '%s python -c \"import sys; sys.exit(1) if sys.hexversion < 0x020600f0 else 0\"'" % (host, py_string), shell=True) + subprocess.check_call("ssh -T %s '%s python3 -c \"import sys; sys.exit(1) if sys.hexversion < 0x020600f0 else 0\"'" % (host, py_string), shell=True) except subprocess.CalledProcessError as e: #print >> sys.stderr, 'Python version on host %s is < 2.6.0. Aborting' % (host) print('Python version on host %s is < 2.6.0. Aborting' % (host), file=sys.stderr) diff --git a/gpMgmt/bin/gpmovemirrors b/gpMgmt/bin/gpmovemirrors index 7220a0f897e..cd78c3b2771 100755 --- a/gpMgmt/bin/gpmovemirrors +++ b/gpMgmt/bin/gpmovemirrors @@ -10,10 +10,8 @@ import os import sys import signal import itertools - +from psycopg2 import DatabaseError try: - import pg - from gppylib.commands.unix import * from gppylib.commands.gp import * from gppylib.commands.pg import PgControlData @@ -23,7 +21,6 @@ try: from gppylib.db import dbconn from gppylib.userinput import * from gppylib.operations.startSegments import * - from pgdb import DatabaseError from gppylib import gparray, gplog, pgconf, userinput, utils from gppylib.parseutils import line_reader, check_values, canonicalize_address from gppylib.operations.segment_tablespace_locations import get_tablespace_locations diff --git a/gpMgmt/bin/gppylib/commands/base.py b/gpMgmt/bin/gppylib/commands/base.py index 98481425071..35f2bf4e4f1 100755 --- a/gpMgmt/bin/gppylib/commands/base.py +++ b/gpMgmt/bin/gppylib/commands/base.py @@ -29,7 +29,6 @@ from gppylib import gplog from gppylib import gpsubprocess -from pg import DB logger = gplog.get_default_logger() @@ -637,8 +636,7 @@ def cancel(self): # if self.conn is not set we cannot cancel. if self.cancel_conn: - DB(self.cancel_conn).cancel() - + self.cancel_conn.cancel() class CommandNotFoundException(Exception): def __init__(self, cmd, paths): diff --git a/gpMgmt/bin/gppylib/commands/pg.py b/gpMgmt/bin/gppylib/commands/pg.py index a2af133c28f..8ae45dbccfa 100644 --- a/gpMgmt/bin/gppylib/commands/pg.py +++ b/gpMgmt/bin/gppylib/commands/pg.py @@ -12,6 +12,7 @@ from .unix import * from gppylib.commands.base import * from gppylib.commands.gp import RECOVERY_REWIND_APPNAME +from psycopg2 import DatabaseError logger = get_default_logger() diff --git a/gpMgmt/bin/gppylib/commands/test/unit/test_unit_pg_base_backup.py b/gpMgmt/bin/gppylib/commands/test/unit/test_unit_pg_base_backup.py index 117f62b41ea..3ff231a4a92 100644 --- a/gpMgmt/bin/gppylib/commands/test/unit/test_unit_pg_base_backup.py +++ b/gpMgmt/bin/gppylib/commands/test/unit/test_unit_pg_base_backup.py @@ -1,6 +1,119 @@ import unittest from gppylib.commands import pg +from test.unit.gp_unittest import GpTestCase, run_tests +from psycopg2 import DatabaseError +from gppylib.test.unit.gp_unittest import GpTestCase +from gppylib.commands.base import CommandResult + +class TestUnitPgReplicationSlot(GpTestCase): + def setUp(self): + mock_logger = Mock(spec=['log', 'warn', 'info', 'debug', 'error', 'warning', 'fatal']) + self.replication_slot_name = "internal_wal_replication_slot" + self.source_host = "bar" + self.source_port = 1234 + + self.pg_replication_slot = pg.PgReplicationSlot( + self.source_host, + self.source_port, + self.replication_slot_name, + ) + self.apply_patches([ + patch('gppylib.commands.pg.logger', return_value=mock_logger), + patch('gppylib.db.dbconn.DbURL', return_value=Mock()) + ]) + + self.mock_logger = self.get_mock_from_apply_patch('logger') + + @patch('gppylib.db.dbconn.connect', side_effect=Exception()) + def test_slot_exist_conn_exception(self, mock1): + + with self.assertRaises(Exception) as ex: + self.pg_replication_slot.slot_exists() + + self.assertEqual(1, self.mock_logger.debug.call_count) + self.assertEqual([call('Checking if slot internal_wal_replication_slot exists for host:bar, port:1234')], + self.mock_logger.debug.call_args_list) + self.assertTrue('Failed to query pg_replication_slots for' in str(ex.exception)) + + @patch('gppylib.db.dbconn.connect', autospec=True) + @patch('gppylib.db.dbconn.querySingleton', return_value=1) + def test_slot_exist_query_true(self, mock1, mock2): + self.assertTrue(self.pg_replication_slot.slot_exists()) + self.assertEqual(1, self.mock_logger.debug.call_count) + self.assertEqual([call('Checking if slot internal_wal_replication_slot exists for host:bar, port:1234')], + self.mock_logger.debug.call_args_list) + + @patch('gppylib.db.dbconn.connect', autospec=True) + @patch('gppylib.db.dbconn.querySingleton', return_value=0) + def test_slot_exist_query_false(self, mock1, mock2): + self.assertFalse(self.pg_replication_slot.slot_exists()) + self.assertEqual(2, self.mock_logger.debug.call_count) + self.assertEqual([call('Checking if slot internal_wal_replication_slot exists for host:bar, port:1234'), + call('Slot internal_wal_replication_slot does not exist for host:bar, port:1234')], + self.mock_logger.debug.call_args_list) + + @patch('gppylib.db.dbconn.connect', side_effect=Exception()) + def test_drop_slot_conn_exception(self, mock1): + with self.assertRaises(Exception) as ex: + self.pg_replication_slot.drop_slot() + + self.assertEqual(1, self.mock_logger.debug.call_count) + self.assertEqual([call('Dropping slot internal_wal_replication_slot for host:bar, port:1234')], + self.mock_logger.debug.call_args_list) + self.assertTrue('Failed to drop replication slot for host:bar, port:1234' in str(ex.exception)) + + @patch('gppylib.db.dbconn.connect', autospec=True) + @patch('gppylib.db.dbconn.query', side_effect=DatabaseError("DatabaseError Exception")) + def test_drop_slot_db_error_exception(self, mock1, mock2): + self.pg_replication_slot.drop_slot() + self.assertEqual(1, self.mock_logger.debug.call_count) + self.assertEqual(1, self.mock_logger.exception.call_count) + self.assertEqual([call('Dropping slot internal_wal_replication_slot for host:bar, port:1234')], + self.mock_logger.debug.call_args_list) + self.assertEqual([call('Failed to query pg_drop_replication_slot for host:bar, port:1234: DatabaseError Exception')], + self.mock_logger.exception.call_args_list) + + @patch('gppylib.db.dbconn.connect', autospec=True) + @patch('gppylib.db.dbconn.query', autospec=True) + def test_drop_slot_success(self, mock1, mock2): + self.assertTrue(self.pg_replication_slot.drop_slot()) + self.assertEqual(2, self.mock_logger.debug.call_count) + self.assertEqual([call('Dropping slot internal_wal_replication_slot for host:bar, port:1234'), + call('Successfully dropped replication slot internal_wal_replication_slot for host:bar, port:1234')], + self.mock_logger.debug.call_args_list) + + @patch('gppylib.db.dbconn.connect', side_effect=Exception()) + def test_create_slot_conn_exception(self, mock1): + with self.assertRaises(Exception) as ex: + self.pg_replication_slot.create_slot() + + self.assertEqual(1, self.mock_logger.debug.call_count) + self.assertEqual([call('Creating slot internal_wal_replication_slot for host:bar, port:1234')], + self.mock_logger.debug.call_args_list) + self.assertTrue('Failed to create replication slot for host:bar, port:1234' in str(ex.exception)) + + @patch('gppylib.db.dbconn.connect', autospec=True) + @patch('gppylib.db.dbconn.query', side_effect=DatabaseError("DatabaseError Exception")) + def test_create_slot_db_error_exception(self, mock1, mock2): + self.pg_replication_slot.create_slot() + self.assertEqual(1, self.mock_logger.debug.call_count) + self.assertEqual(1, self.mock_logger.exception.call_count) + self.assertEqual([call('Creating slot internal_wal_replication_slot for host:bar, port:1234')], + self.mock_logger.debug.call_args_list) + self.assertEqual( + [call('Failed to query pg_create_physical_replication_slot for host:bar, port:1234: DatabaseError Exception')], + self.mock_logger.exception.call_args_list) + + @patch('gppylib.db.dbconn.connect', autospec=True) + @patch('gppylib.db.dbconn.query', autospec=True) + def test_create_slot_success(self, mock1, mock2): + self.assertTrue(self.pg_replication_slot.create_slot()) + self.assertEqual(2, self.mock_logger.debug.call_count) + self.assertEqual([call('Creating slot internal_wal_replication_slot for host:bar, port:1234'), + call( + 'Successfully created replication slot internal_wal_replication_slot for host:bar, port:1234')], + self.mock_logger.debug.call_args_list) class TestUnitPgBaseBackup(unittest.TestCase): def test_replication_slot_not_passed_when_not_given_slot_name(self): diff --git a/gpMgmt/bin/gppylib/db/catalog.py b/gpMgmt/bin/gppylib/db/catalog.py index 6214b805c29..80ca66289eb 100644 --- a/gpMgmt/bin/gppylib/db/catalog.py +++ b/gpMgmt/bin/gppylib/db/catalog.py @@ -6,8 +6,8 @@ """ import copy - -import pg +import os +from contextlib import closing from gppylib import gplog from gppylib.db import dbconn diff --git a/gpMgmt/bin/gppylib/db/dbconn.py b/gpMgmt/bin/gppylib/db/dbconn.py index b85f802d02b..1492b2511c5 100644 --- a/gpMgmt/bin/gppylib/db/dbconn.py +++ b/gpMgmt/bin/gppylib/db/dbconn.py @@ -9,9 +9,8 @@ import sys import os import stat - +import psycopg2 try: - import pgdb from gppylib.commands.unix import UserId except ImportError as e: @@ -159,67 +158,44 @@ def canonicalize(s): # 1. pg notice is accessible to a user of connection returned by dbconn.connect(), # lifted from the underlying _pg connection # 2. multiple calls to dbconn.close() should not return an error -class Connection(pgdb.Connection): +class Connection: def __init__(self, connection): - self._notices = collections.deque(maxlen=100) - # we must do an attribute by attribute copy of the notices here - # due to limitations in pg implementation. Wrap with with a - # namedtuple for ease of use. - def handle_notice(notice): - received = {} - for attr in dir(notice): - if attr.startswith('__'): - continue - value = getattr(notice, attr) - received[attr] = value - Notice = collections.namedtuple('Notice', sorted(received)) - self._notices.append(Notice(**received)) - - - self._impl = connection - self._impl._cnx.set_notice_receiver(handle_notice) + self._conn = connection + self._conn.notices = collections.deque(maxlen=100) def __enter__(self): - return self._impl.__enter__() + return self._conn.__enter__() # __exit__() does not close the connection. This is in line with the # python DB API v2 specification (pep-0249), where close() is done on # __del__(), not __exit__(). def __exit__(self, *args): - return self._impl.__exit__(*args) + return self._conn.__exit__(*args) def __getattr__(self, name): - return getattr(self._impl, name) + return getattr(self._conn, name) def notices(self): - notice_list = list(self._notices) - self._notices.clear() + notice_list = list(self._conn.notices) + self._conn.notices.clear() return notice_list # don't return operational error if connection is already closed def close(self): - if not self._impl.closed: - self._impl.close() + if not self._conn.closed: + self._conn.close() def connect(dburl, utility=False, verbose=False, - encoding=None, allowSystemTableMods=False, logConn=True, unsetSearchPath=True): + encoding=None, allowSystemTableMods=False, logConn=True, unsetSearchPath=True, cursorFactory=None): conninfo = { 'user': dburl.pguser, 'password': dburl.pgpass, 'host': dburl.pghost, 'port': dburl.pgport, - # dbname is very subtle, Package pgdb contains a bug it will only escape the string when - # 1. a space in the dbname, and - # 2. there are other keyword arguments of pgdb.connect method - # See issue https://github.com/PyGreSQL/PyGreSQL/issues/77 for details - # The code here is test if there is space, if so, we know pgdb will escape, let's not do here - # if not, let's do escape here since pgdb forget to do. - # - # NB: we always provide port keyword argument to connect method of pgdb, thus - # we will always enter the code path of pgdb.connect of the above escape logic. - 'database': dburl.pgdb if ' ' in dburl.pgdb else dburl.pgdb.replace('\\', '\\\\').replace("'", "\\'"), + 'database': dburl.pgdb, + 'cursor_factory': cursorFactory } # building options @@ -257,22 +233,23 @@ def connect(dburl, utility=False, verbose=False, logFunc = logger.info if dburl.timeout is not None else logger.debug logFunc("Connecting to db {} on host {}".format(dburl.pgdb, dburl.pghost)) - connection = None + conn = None for i in range(retries): try: - connection = pgdb.connect(**conninfo) + conn = psycopg2.connect(**conninfo) + conn.set_session(autocommit=True) break - except pgdb.OperationalError as e: + except psycopg2.OperationalError as e: if 'timeout expired' in str(e): logger.warning('Timeout expired connecting to %s, attempt %d/%d' % (dburl.pgdb, i+1, retries)) continue raise - if connection is None: + if conn is None: raise ConnectionError('Failed to connect to %s' % dburl.pgdb) - return Connection(connection) + return Connection(conn) def execSQL(conn, sql, autocommit=True): """ @@ -286,7 +263,6 @@ def execSQL(conn, sql, autocommit=True): Using `with dbconn.connect() as conn` syntax will override autocommit and complete queries in a transaction followed by a commit on context close """ - conn.autocommit = autocommit with conn.cursor() as cursor: cursor.execute(sql) diff --git a/gpMgmt/bin/gppylib/db/test/unit/test_cluster_dbconn.py b/gpMgmt/bin/gppylib/db/test/unit/test_cluster_dbconn.py index 086ccce9298..a03eb4db42b 100644 --- a/gpMgmt/bin/gppylib/db/test/unit/test_cluster_dbconn.py +++ b/gpMgmt/bin/gppylib/db/test/unit/test_cluster_dbconn.py @@ -46,7 +46,7 @@ def test_verbose_mode_allows_warnings_to_be_sent_to_the_client(self): for notice in notices: - if warning in notice.message: + if warning in notice: return # found it! self.fail("Didn't find expected notice '{}' in {!r}".format( diff --git a/gpMgmt/bin/gppylib/gpcatalog.py b/gpMgmt/bin/gppylib/gpcatalog.py index dc95de34e8f..f802e02da0e 100644 --- a/gpMgmt/bin/gppylib/gpcatalog.py +++ b/gpMgmt/bin/gppylib/gpcatalog.py @@ -168,7 +168,7 @@ def __init__(self, dbConnection): curs = self._query(version_query) except Exception as e: raise GPCatalogException("Error reading database version: " + str(e)) - self._version = GpVersion(curs.getresult()[0][0]) + self._version = GpVersion(curs.fetchone()[0]) # Read the list of catalog tables from the database try: @@ -178,7 +178,7 @@ def __init__(self, dbConnection): # Construct our internal representation of the catalog - for [oid, relname, relisshared] in curs.getresult(): + for [oid, relname, relisshared] in curs.fetchall(): self._tables[relname] = GPCatalogTable(self, relname) # Note: stupid API returns t/f for boolean value self._tables[relname]._setShared(relisshared == 't') @@ -217,7 +217,9 @@ def _query(self, qry): """ Simple wrapper around querying the database connection """ - return self._dbConnection.query(qry) + cur = self._dbConnection.cursor() + cur.execute(qry) + return cur def _markCoordinatorOnlyTables(self): """ @@ -507,10 +509,10 @@ def __init__(self, parent, name, pkey=None): # exist. raise GPCatalogException("Catalog table %s does not exist" % name) - if cur.ntuples() == 0: + if cur.rowcount == 0: raise GPCatalogException("Catalog table %s does not exist" % name) - for row in cur.getresult(): + for row in cur.fetchall(): (attname, atttype, typname) = row # Mark if the catalog has an oid column @@ -546,7 +548,7 @@ def __init__(self, parent, name, pkey=None): WHERE attrelid = 'pg_catalog.{catname}'::regclass """.format(catname=name) cur = parent._query(qry) - self._pkey = [row[0] for row in cur.getresult()] + self._pkey = [row[0] for row in cur.fetchall()] # Primary key must be in the column list for k in self._pkey: diff --git a/gpMgmt/bin/gppylib/operations/buildMirrorSegments.py b/gpMgmt/bin/gppylib/operations/buildMirrorSegments.py index a071aa89e83..1eb8b6dc334 100644 --- a/gpMgmt/bin/gppylib/operations/buildMirrorSegments.py +++ b/gpMgmt/bin/gppylib/operations/buildMirrorSegments.py @@ -565,7 +565,7 @@ def _get_running_postgres_segments(self, segments): return running_segments def dereference_remote_symlink(self, datadir, host): - cmdStr = """python -c 'import os; print(os.path.realpath("%s"))'""" % datadir + cmdStr = """python3 -c 'import os; print(os.path.realpath("%s"))'""" % datadir cmd = base.Command('dereference a symlink on a remote host', cmdStr=cmdStr, ctxt=base.REMOTE, remoteHost=host) cmd.run() results = cmd.get_results() diff --git a/gpMgmt/bin/gppylib/operations/segment_reconfigurer.py b/gpMgmt/bin/gppylib/operations/segment_reconfigurer.py index 643489fd086..913847a5eb5 100644 --- a/gpMgmt/bin/gppylib/operations/segment_reconfigurer.py +++ b/gpMgmt/bin/gppylib/operations/segment_reconfigurer.py @@ -1,8 +1,7 @@ import time - from gppylib.commands import base from gppylib.db import dbconn -import pg +from contextlib import closing FTS_PROBE_QUERY = 'SELECT pg_catalog.gp_request_fts_probe_scan()' @@ -13,15 +12,19 @@ def __init__(self, logger, worker_pool, timeout): self.timeout = timeout def _trigger_fts_probe(self, dburl): - conn = pg.connect(dbname=dburl.pgdb, - host=dburl.pghost, - port=dburl.pgport, - opt=None, - user=dburl.pguser, - passwd=dburl.pgpass, - ) - conn.query(FTS_PROBE_QUERY) - conn.close() + start_time = time.time() + while True: + try: + with closing(dbconn.connect(dburl)) as conn: + with conn.cursor() as cur: + cur.execute(FTS_PROBE_QUERY) + break + except Exception as e: + now = time.time() + if now < start_time + self.timeout: + continue + else: + raise RuntimeError("FTS probing did not complete in {} seconds.".format(self.timeout)) def reconfigure(self): # issue a distributed query to make sure we pick up the fault @@ -36,9 +39,12 @@ def reconfigure(self): # Empty block of 'BEGIN' and 'END' won't start a distributed transaction, # execute a DDL query to start a distributed transaction. # so the primaries'd better be up - conn = dbconn.connect(dburl) - conn.cursor().execute('CREATE TEMP TABLE temp_test(a int)') - conn.cursor().execute('COMMIT') + with closing(dbconn.connect(dburl)) as conn: + with conn.cursor() as cur: + cur.execute('BEGIN') + cur.execute('CREATE TEMP TABLE temp_test(a int)') + cur.execute('COMMIT') + break except Exception as e: # Should close conn here # Otherwise, the postmaster will be blocked by abort transaction @@ -48,6 +54,3 @@ def reconfigure(self): continue else: raise RuntimeError("Mirror promotion did not complete in {0} seconds.".format(self.timeout)) - else: - conn.close() - break diff --git a/gpMgmt/bin/gppylib/operations/test/unit/test_unit_segment_reconfigurer.py b/gpMgmt/bin/gppylib/operations/test/unit/test_unit_segment_reconfigurer.py index 9d86071bafd..d4c2a0c26e7 100644 --- a/gpMgmt/bin/gppylib/operations/test/unit/test_unit_segment_reconfigurer.py +++ b/gpMgmt/bin/gppylib/operations/test/unit/test_unit_segment_reconfigurer.py @@ -4,8 +4,7 @@ from gppylib.operations.segment_reconfigurer import SegmentReconfigurer, FTS_PROBE_QUERY from gppylib.test.unit.gp_unittest import GpTestCase -import pg -import pgdb +import psycopg2 import mock from mock import Mock, patch, call, MagicMock import contextlib @@ -38,22 +37,22 @@ def setUp(self): self.apply_patches([ patch('gppylib.db.dbconn.connect', new=self.connect), patch('gppylib.db.dbconn.DbURL', return_value=self.db_url), - patch('pg.connect'), + patch('psycopg2.connect'), ]) def test_it_triggers_fts_probe(self): reconfigurer = SegmentReconfigurer(logger=self.logger, worker_pool=self.worker_pool, timeout=self.timeout) reconfigurer.reconfigure() - pg.connect.assert_has_calls([ - call(dbname=self.db, host=self.host, port=self.port, opt=None, user=self.user, passwd=self.passwd), + psycopg2.connect.assert_has_calls([ + call(dbname=self.db, host=self.host, port=self.port, options=None, user=self.user, password=self.passwd), call().query(FTS_PROBE_QUERY), call().close(), ] ) def test_it_retries_the_connection(self): - self.connect.configure_mock(side_effect=[pgdb.DatabaseError, pgdb.DatabaseError, self.conn]) + self.connect.configure_mock(side_effect=[psycopg2.DatabaseError, psycopg2.DatabaseError, self.conn]) reconfigurer = SegmentReconfigurer(logger=self.logger, worker_pool=self.worker_pool, timeout=self.timeout) @@ -74,7 +73,7 @@ def fail_for_five_minutes(): # leap forward 300 seconds new_time += self.timeout / 2 now_mock.configure_mock(return_value=new_time) - yield pgdb.DatabaseError + yield psycopg2.DatabaseError self.connect.configure_mock(side_effect=fail_for_five_minutes()) @@ -87,3 +86,27 @@ def fail_for_five_minutes(): self.connect.assert_has_calls([call(self.db_url), call(self.db_url), ]) self.conn.close.assert_has_calls([]) + + @patch('time.time') + def test_it_gives_up_after_600_seconds_2(self, now_mock): + start_datetime = datetime.datetime(2023, 7, 27, 16, 0, 0) + start_time = time.mktime(start_datetime.timetuple()) + now_mock.configure_mock(return_value=start_time) + + def fail_for_ten_minutes(): + new_time = start_time + # leap forward 600 seconds + new_time += self.timeout + now_mock.configure_mock(return_value=new_time) + yield psycopg2.DatabaseError + + self.connect.configure_mock(side_effect=fail_for_ten_minutes()) + + reconfigurer = SegmentReconfigurer(logger=self.logger, + worker_pool=self.worker_pool, timeout=self.timeout) + with self.assertRaises(RuntimeError) as context: + reconfigurer.reconfigure() + self.assertEqual("FTS probing did not complete in {} seconds.".format(self.timeout), context.exception.message) + + self.connect.assert_has_calls([call(self.db_url)]) + self.conn.close.assert_has_calls([]) diff --git a/gpMgmt/bin/gppylib/operations/test/unit/test_unit_utils.py b/gpMgmt/bin/gppylib/operations/test/unit/test_unit_utils.py index 38fcd327dee..e62aa7a40b7 100755 --- a/gpMgmt/bin/gppylib/operations/test/unit/test_unit_utils.py +++ b/gpMgmt/bin/gppylib/operations/test/unit/test_unit_utils.py @@ -11,7 +11,7 @@ from gppylib.operations.test_utils_helper import TestOperation, RaiseOperation, RaiseOperation_Unpicklable, RaiseOperation_Safe, ExceptionWithArgs from operations.unix import ListFiles from test.unit.gp_unittest import GpTestCase, run_tests -from pg import DatabaseError +from psycopg2 import DatabaseError class UtilsTestCase(GpTestCase): """ diff --git a/gpMgmt/bin/gppylib/operations/test_utils_helper.py b/gpMgmt/bin/gppylib/operations/test_utils_helper.py index 109bc83bb87..74cfeda2bc0 100755 --- a/gpMgmt/bin/gppylib/operations/test_utils_helper.py +++ b/gpMgmt/bin/gppylib/operations/test_utils_helper.py @@ -1,4 +1,5 @@ from gppylib.operations import Operation +import psycopg2 """ These objects needed for gppylib.operations.test.test_utils are pulled out of said file for @@ -37,5 +38,4 @@ def __init__(self, x, y): class RaiseOperation_Unpicklable(Operation): def execute(self): - import pg - raise pg.DatabaseError() + raise psycopg2.DatabaseError() diff --git a/gpMgmt/bin/gppylib/programs/clsSystemState.py b/gpMgmt/bin/gppylib/programs/clsSystemState.py index e234b633122..4bf55a783c4 100644 --- a/gpMgmt/bin/gppylib/programs/clsSystemState.py +++ b/gpMgmt/bin/gppylib/programs/clsSystemState.py @@ -10,7 +10,7 @@ import sys, os import re import collections -import pgdb +import psycopg2 from contextlib import closing from gppylib import gparray, gplog from gppylib.commands import base, gp @@ -1026,7 +1026,7 @@ def _get_unsync_segs_add_wal_remaining_bytes(data, gpArray): wal_sync_bytes_out = 'Unknown' unsync_segs.append(s) data.addValue(VALUE__REPL_SYNC_REMAINING_BYTES, wal_sync_bytes_out) - except pgdb.InternalError: + except (psycopg2.InternalError, psycopg2.OperationalError): logger.warning('could not query segment {} ({}:{})'.format( s.dbid, s.hostname, s.port )) @@ -1098,7 +1098,7 @@ def _add_replication_info(data, primary, mirror): cursor.close() - except pgdb.InternalError: + except (psycopg2.InternalError, psycopg2.OperationalError): logger.warning('could not query segment {} ({}:{})'.format( primary.dbid, primary.hostname, primary.port )) diff --git a/gpMgmt/bin/gppylib/test/unit/test_unit_foreign_key_check.py b/gpMgmt/bin/gppylib/test/unit/test_unit_foreign_key_check.py index 9888472bb43..b9e691a7e56 100755 --- a/gpMgmt/bin/gppylib/test/unit/test_unit_foreign_key_check.py +++ b/gpMgmt/bin/gppylib/test/unit/test_unit_foreign_key_check.py @@ -11,7 +11,7 @@ class GpCheckCatTestCase(GpTestCase): def setUp(self): self.logger = Mock(spec=['log', 'info', 'debug', 'error']) - self.db_connection = Mock(spec=['close', 'query']) + self.db_connection = Mock(spec=['close', 'cursor']) self.autoCast = {'regproc': '::oid', 'regprocedure': '::oid', 'regoper': '::oid', @@ -25,9 +25,10 @@ def setUp(self): self.full_join_cat_tables = set(['pg_attribute','gp_distribution_policy','pg_appendonly','pg_constraint','pg_index']) self.foreign_key_check= Mock(spec=['runCheck']) self.foreign_key_check.runCheck.return_value = [] - self.db_connection.query.return_value.ntuples.return_value = 2 - self.db_connection.query.return_value.listfields.return_value = ['pkey1', 'pkey2'] - self.db_connection.query.return_value.getresult.return_value = [('r1','r2'), ('r3','r4')] + + self.db_connection.cursor.return_value.rowcount = 2 + self.db_connection.cursor.return_value.description = [('pkey1',), ('pkey2',)] + self.db_connection.cursor.return_value.fetchall.return_value = [('r1','r2'), ('r3','r4')] def test_get_fk_query_left_join_returns_the_correct_query(self): @@ -127,7 +128,7 @@ def test_checkTableForeignKey__returns_correct_join_query(self, log_literal_mock self.assertEqual(len(issue_list) , 2) self.assertEqual(issue_list[0], ('pg_class', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')])) self.assertEqual(issue_list[1], ('arbitrary_catalog_table', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')])) - self.assertEqual(self.db_connection.query.call_count, 2) + self.assertEqual(self.db_connection.cursor.call_count, 2) def __generate_pg_class_call(table, primary_key_cat_name, col_type, with_filter=True): if with_filter: @@ -168,7 +169,7 @@ def __generate_pg_class_call(table, primary_key_cat_name, col_type, with_filter= self.assertEqual(fk_query_full_join_mock.call_count, 0) fk_query_left_join_mock.assert_has_calls(foreign_key_mock_calls_left, any_order=False) - self.db_connection.query.call_count = 0 + self.db_connection.cursor.call_count = 0 fk_query_full_join_mock.call_count = 0 fk_query_left_join_mock.call_count = 0 diff --git a/gpMgmt/bin/gppylib/test/unit/test_unit_gpcheckcat.py b/gpMgmt/bin/gppylib/test/unit/test_unit_gpcheckcat.py index ccdfb03a7ad..28aa0f1227c 100755 --- a/gpMgmt/bin/gppylib/test/unit/test_unit_gpcheckcat.py +++ b/gpMgmt/bin/gppylib/test/unit/test_unit_gpcheckcat.py @@ -18,11 +18,11 @@ def setUp(self): self.subject = imp.load_source('gpcheckcat', gpcheckcat_file) self.subject.check_gpexpand = lambda : (True, "") - self.db_connection = Mock(spec=['close', 'query']) + self.db_connection = Mock(spec=['close', 'cursor', 'set_session']) self.unique_index_violation_check = Mock(spec=['runCheck']) self.foreign_key_check = Mock(spec=['runCheck', 'checkTableForeignKey']) self.apply_patches([ - patch("gpcheckcat.pg.connect", return_value=self.db_connection), + patch("gpcheckcat.connect", return_value=self.db_connection), patch("gpcheckcat.UniqueIndexViolationCheck", return_value=self.unique_index_violation_check), patch("gpcheckcat.ForeignKeyCheck", return_value=self.foreign_key_check), patch('os.environ', new={}), @@ -129,23 +129,26 @@ def test_drop_leaked_schemas__when_leaked_schemas_exist__reports_which_schemas_a self.assertIn(expected_message, log_messages) def test_automatic_thread_count(self): - self.db_connection.query.return_value.getresult.return_value = [[0]] + self.db_connection.cursor.return_value.fetchall.return_value = [[0]] self._run_batch_size_experiment(100) self._run_batch_size_experiment(101) + @patch('gpcheckcat.getversion', return_value='4.3') @patch('gpcheckcat.GPCatalog', return_value=Mock()) @patch('sys.exit') @patch('gppylib.gplog.log_literal') - def test_truncate_batch_size(self, mock_log, mock_gpcheckcat, mock_sys_exit): + def test_truncate_batch_size(self, mock_log, mock_sys_exit, mock_gpcatalog, mock_version): self.subject.GV.opt['-B'] = 300 # override the setting from available memory # setup conditions for 50 primaries and plenty of RAM such that max threads > 50 primaries = [dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=-1, dbid=0, isprimary='t')] for i in range(1, 50): primaries.append(dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=1, dbid=i, isprimary='t')) - self.db_connection.query.return_value.getresult.return_value = [['4.3']] - self.db_connection.query.return_value.dictresult.return_value = primaries + self.db_connection.cursor.return_value = Mock() + self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute'])) + self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False) + self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = primaries testargs = ['some_string','-port 1', '-R foo'] @@ -221,10 +224,11 @@ def test_checkForeignKey__no_arg(self, process_foreign_key_mock): self.foreign_key_check.runCheck.assert_called_once_with(cat_tables) # Test gpcheckat -C option with checkForeignKey + @patch('gpcheckcat.getversion', return_value='4.3') @patch('gpcheckcat.GPCatalog', return_value=Mock()) @patch('sys.exit') @patch('gpcheckcat.checkTableMissingEntry') - def test_runCheckCatname__for_checkForeignKey(self, mock1, mock2, mock3): + def test_runCheckCatname__for_checkForeignKey(self, mock1, mock2, mock3, mock4): self.subject.checkForeignKey = Mock() gpcat_class_mock = Mock(spec=['getCatalogTable']) cat_obj_mock = Mock() @@ -234,8 +238,12 @@ def test_runCheckCatname__for_checkForeignKey(self, mock1, mock2, mock3): for i in range(1, 50): primaries.append(dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=1, dbid=i, isprimary='t')) - self.db_connection.query.return_value.getresult.return_value = [['4.3']] - self.db_connection.query.return_value.dictresult.return_value = primaries + + # context manager helper functions. + self.db_connection.cursor.return_value = Mock() + self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute'])) + self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False) + self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = primaries self.subject.GV.opt['-C'] = 'pg_class' @@ -314,7 +322,13 @@ def test_skip_one_test(self, mock_ver, mock_run, mock1, mock2): primaries = [dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=-1, dbid=0, isprimary='t')] for i in range(1, 50): primaries.append(dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=1, dbid=i, isprimary='t')) - self.db_connection.query.return_value.dictresult.return_value = primaries + + # context manager helper functions. + self.db_connection.cursor.return_value = Mock() + self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute'])) + self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False) + self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = primaries + self.subject.all_checks = {'test1': 'a', 'test2': 'b', 'test3': 'c'} testargs = ['gpcheckcat', '-port 1', '-s test2'] @@ -330,7 +344,13 @@ def test_skip_multiple_test(self, mock_ver, mock_run, mock1, mock2): primaries = [dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=-1, dbid=0, isprimary='t')] for i in range(1, 50): primaries.append(dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=1, dbid=i, isprimary='t')) - self.db_connection.query.return_value.dictresult.return_value = primaries + + # context manager helper functions. + self.db_connection.cursor.return_value = Mock() + self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute'])) + self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False) + self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = primaries + self.subject.all_checks = {'test1': 'a', 'test2': 'b', 'test3': 'c'} testargs = ['gpcheckcat', '-port 1', '-s', "test1, test2"] @@ -346,7 +366,11 @@ def test_skip_test_warning(self, mock_ver, mock_run, mock1, mock2): primaries = [dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=-1, dbid=0, isprimary='t')] for i in range(1, 50): primaries.append(dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=1, dbid=i, isprimary='t')) - self.db_connection.query.return_value.dictresult.return_value = primaries + # context manager helper functions. + self.db_connection.cursor.return_value = Mock() + self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute'])) + self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False) + self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = primaries self.subject.all_checks = {'test1': 'a', 'test2': 'b', 'test3': 'c'} testargs = ['gpcheckcat', '-port 1', '-s', "test_invalid, test2"] @@ -365,7 +389,13 @@ def test_run_multiple_test(self, mock_ver, mock_run, mock1, mock2): primaries = [dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=-1, dbid=0, isprimary='t')] for i in range(1, 50): primaries.append(dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=1, dbid=i, isprimary='t')) - self.db_connection.query.return_value.dictresult.return_value = primaries + + # context manager helper functions. + self.db_connection.cursor.return_value = Mock() + self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute'])) + self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False) + self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = primaries + self.subject.all_checks = {'test1': 'a', 'test2': 'b', 'test3': 'c'} testargs = ['gpcheckcat', '-port 1', '-R', "test1, test2"] diff --git a/gpMgmt/bin/gppylib/test/unit/test_unit_gpconfig.py b/gpMgmt/bin/gppylib/test/unit/test_unit_gpconfig.py index 54fa8f3cf7c..42afb96763d 100644 --- a/gpMgmt/bin/gppylib/test/unit/test_unit_gpconfig.py +++ b/gpMgmt/bin/gppylib/test/unit/test_unit_gpconfig.py @@ -5,11 +5,10 @@ import shutil import sys import tempfile - +from psycopg2 import DatabaseError from gppylib.gparray import Segment, GpArray, SegmentPair from gpconfig_modules.parse_guc_metadata import ParseGuc import errno -from pg import DatabaseError from .gp_unittest import * from unittest.mock import * diff --git a/gpMgmt/bin/gppylib/test/unit/test_unit_gpload.py b/gpMgmt/bin/gppylib/test/unit/test_unit_gpload.py index ccc19515257..1d5dd2ae774 100755 --- a/gpMgmt/bin/gppylib/test/unit/test_unit_gpload.py +++ b/gpMgmt/bin/gppylib/test/unit/test_unit_gpload.py @@ -33,8 +33,11 @@ def help_test_with_config(self, gpload_param, expected_begin_value, expected_com print(gpload_param) gploader = gpload(gpload_param) gploader.read_config() - gploader.db = self - gploader.db.query = Mock(side_effect=self.mockQuery) + gploader.conn = Mock() + gploader.conn.cursor.return_value = Mock() + gploader.conn.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute'])) + gploader.conn.cursor.return_value.__exit__ = Mock(return_value=False) + gploader.conn.cursor.return_value.__enter__.return_value.execute = Mock(side_effect=self.mockQuery) gploader.do_method_merge = Mock(side_effect=self.mockDoNothing) gploader.do_method_update = Mock(side_effect=self.mockDoNothing) gploader.do_method_insert = Mock(side_effect=self.mockDoNothing) diff --git a/gpMgmt/bin/gppylib/test/unit/test_unit_gpstate.py b/gpMgmt/bin/gppylib/test/unit/test_unit_gpstate.py index e9c60760aeb..4858e80e65a 100644 --- a/gpMgmt/bin/gppylib/test/unit/test_unit_gpstate.py +++ b/gpMgmt/bin/gppylib/test/unit/test_unit_gpstate.py @@ -1,6 +1,6 @@ import unittest import mock -import pgdb +import psycopg2 import tempfile from gppylib import gparray @@ -228,7 +228,7 @@ def test_add_replication_info_adds_unknowns_if_primary_is_down(self): @mock.patch('gppylib.db.dbconn.connect', autospec=True) def test_add_replication_info_adds_unknowns_if_connection_cannot_be_made(self, mock_connect): # Simulate a connection failure in dbconn.connect(). - mock_connect.side_effect = pgdb.InternalError('connection failure forced by unit test') + mock_connect.side_effect = psycopg2.InternalError('connection failure forced by unit test') GpSystemStateProgram._add_replication_info(self.data, self.primary, self.mirror) self.assertEqual('Unknown', self.data.getStrValue(self.mirror, VALUE__REPL_SENT_LSN)) diff --git a/gpMgmt/bin/gppylib/test/unit/test_unit_leaked_schema_dropper.py b/gpMgmt/bin/gppylib/test/unit/test_unit_leaked_schema_dropper.py index 1eda2e72a25..1cd81684c23 100644 --- a/gpMgmt/bin/gppylib/test/unit/test_unit_leaked_schema_dropper.py +++ b/gpMgmt/bin/gppylib/test/unit/test_unit_leaked_schema_dropper.py @@ -6,45 +6,57 @@ class LeakedSchemaDropperTestCase(GpTestCase): def setUp(self): - self.db_connection = Mock(spec=['query']) - - two_leaked_schemas = Mock() - two_leaked_schemas.getresult.return_value = [ + self.db_connection = Mock(spec=['cursor']) + self.db_connection.cursor.return_value.fetchall.return_value = [ ('fake_leak_1', 'something_else'), ('some"test"special_#;character--schema', 'something_else') ] - self.db_connection.query.return_value = two_leaked_schemas - self.subject = LeakedSchemaDropper() def test_drop_leaked_schemas__returns_a_list_of_leaked_schemas(self): + self.db_connection.cursor.return_value = Mock() + self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute'])) + self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False) + self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = [ + ('fake_leak_1', 'something_else'), + ('some"test"special_#;character--schema', 'something_else') + ] self.assertEqual(self.subject.drop_leaked_schemas(self.db_connection), ['fake_leak_1', 'some"test"special_#;character--schema']) def test_drop_leaked_schemas__when_there_are_no_leaked_schemas__returns_an_empty_list(self): - no_leaked_schemas = Mock() - no_leaked_schemas.getresult.return_value = [] - self.db_connection.query.return_value = no_leaked_schemas - + self.db_connection.cursor.return_value = Mock() + self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute'])) + self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False) + self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = [] self.assertEqual(self.subject.drop_leaked_schemas(self.db_connection), []) def test_drop_leaked_schemas__when_query_returns_null_schema__returns_an_empty_list(self): - null_leaked_schema = Mock() - null_leaked_schema.getresult.return_value = [(None, 'something_else')] - self.db_connection.query.return_value = null_leaked_schema - + self.db_connection.cursor.return_value = Mock() + self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute'])) + self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False) + self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = [(None, 'something_else')] self.assertEqual(self.subject.drop_leaked_schemas(self.db_connection), []) def test_drop_leaked_schemas__when_query_returns_null__returns_an_empty_list(self): - self.db_connection.query.return_value = None - + self.db_connection.cursor.return_value = Mock() + self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute'])) + self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False) + self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = [] self.assertEqual(self.subject.drop_leaked_schemas(self.db_connection), []) def test_drop_leaked_schemas__drops_orphaned_and_leaked_schemas(self): + self.db_connection.cursor.return_value = Mock() + self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute'])) + self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False) + self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = [ + ('fake_leak_1', 'something_else'), + ('some"test"special_#;character--schema', 'something_else') + ] self.subject.drop_leaked_schemas(self.db_connection) drop_query_expected_list = [call("DROP SCHEMA IF EXISTS \"fake_leak_1\" CASCADE;"), call("DROP SCHEMA IF EXISTS \"some\"\"test\"\"special_#;character--schema\" CASCADE;")] - self.db_connection.query.assert_has_calls(drop_query_expected_list) + self.db_connection.cursor.return_value.__enter__.return_value.execute.assert_has_calls(drop_query_expected_list) if __name__ == '__main__': diff --git a/gpMgmt/bin/gppylib/test/unit/test_unit_unique_index_violation_check.py b/gpMgmt/bin/gppylib/test/unit/test_unit_unique_index_violation_check.py index c9306d69b97..19ddb773232 100644 --- a/gpMgmt/bin/gppylib/test/unit/test_unit_unique_index_violation_check.py +++ b/gpMgmt/bin/gppylib/test/unit/test_unit_unique_index_violation_check.py @@ -9,33 +9,37 @@ def setUp(self): self.subject = UniqueIndexViolationCheck() self.index_query_result = Mock() - self.index_query_result.getresult.return_value = [ + self.index_query_result.fetchall.return_value = [ (9001, 'index1', 'table1', ['index1_column1','index1_column2']), (9001, 'index2', 'table1', ['index2_column1','index2_column2']) ] - self.violated_segments_query_result = Mock() - - self.db_connection = Mock(spec=['query']) - self.db_connection.query.side_effect = self.mock_query_return_value - - def mock_query_return_value(self, query_string): - if query_string == UniqueIndexViolationCheck.unique_indexes_query: - return self.index_query_result - else: - return self.violated_segments_query_result + self.db_connection = Mock(spec=['cursor']) + self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute'])) + self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False) def test_run_check__when_there_are_no_issues(self): - self.violated_segments_query_result.getresult.return_value = [] + self.db_connection.cursor.return_value.__enter__.return_value.fetchall.side_effect = [ + [ + (9001, 'index1', 'table1', ['index1_column1','index1_column2']), + (9001, 'index2', 'table1', ['index2_column1','index2_column2']) + ], + [], + [], + ] violations = self.subject.runCheck(self.db_connection) self.assertEqual(len(violations), 0) def test_run_check__when_index_is_violated(self): - self.violated_segments_query_result.getresult.side_effect = [ + self.db_connection.cursor.return_value.__enter__.return_value.fetchall.side_effect = [ + [ + (9001, 'index1', 'table1', ['index1_column1','index1_column2']), + (9001, 'index2', 'table1', ['index2_column1','index2_column2']) + ], [(-1,), (0,), (1,)], - [(-1,)] + [(-1,)], ] violations = self.subject.runCheck(self.db_connection) diff --git a/gpMgmt/bin/gppylib/test/unit/test_unit_utils.py b/gpMgmt/bin/gppylib/test/unit/test_unit_utils.py new file mode 100644 index 00000000000..082e52d8d11 --- /dev/null +++ b/gpMgmt/bin/gppylib/test/unit/test_unit_utils.py @@ -0,0 +1,7 @@ +from mock import * +from .gp_unittest import * +from gppylib.utils import escape_string + +class UtilsFunctionsTest(GpTestCase): + def test_escape_string_can_handle_utf8(self): + self.assertEqual('public."spiegelungssätze"', escape_string('public."spiegelungssätze"')) diff --git a/gpMgmt/bin/gppylib/utils.py b/gpMgmt/bin/gppylib/utils.py index fead818d3ac..19791c6280d 100644 --- a/gpMgmt/bin/gppylib/utils.py +++ b/gpMgmt/bin/gppylib/utils.py @@ -5,8 +5,7 @@ from sys import * from xml.dom import minidom from xml.dom import Node - -import pgdb +import psycopg2 from gppylib.gplog import * logger = get_default_logger() @@ -503,14 +502,20 @@ def escapeDoubleQuoteInSQLString(string, forceDoubleQuote=True): string = '"' + string + '"' return string - -def Escape(query_str): - return pgdb.escape_string(query_str) - +# Escape single quotes, backslashes appearing in the string according to the SQL string constants syntax. +# E.g., +# >>> escape_string(r"O'Reilly") +# "O''Reilly" +def escape_string(string): + adapted = psycopg2.extensions.QuotedString(string) + # The getquoted() API returns 'latin-1' encoded binary string by default, we need to specify + # the encoding manually. + adapted.encoding = 'utf-8' + return adapted.getquoted().decode()[1:-1] def escapeArrayElement(query_str): # also escape backslashes and double quotes, in addition to the doubling of single quotes - return pgdb.escape_string(query_str.encode(errors='backslashreplace')).decode(errors='backslashreplace').replace('\\','\\\\').replace('"','\\"') + return escape_string(query_str.encode(errors='backslashreplace')).encode().decode(errors='backslashreplace').replace('\\','\\\\').replace('"','\\"') # Transform Python list to Postgres array literal (of the form: '{...}') @@ -593,7 +598,7 @@ def formatInsertValuesList(row, starelid, inclHLL): # Format stavalues5 for an hll slot elif i == 30 and hll: if inclHLL: - val = '\'{"%s"}\'' % pgdb.escape_bytea(val[0]) + val = '\'{\\%s}\'' % val[0] rowVals.append('\t{0}::{1}'.format(val, 'bytea[]')) else: rowVals.append('\t{0}'.format('NULL::int4[]')) diff --git a/gpMgmt/bin/gpsd b/gpMgmt/bin/gpsd index bffbc02065b..b9b19953b58 100755 --- a/gpMgmt/bin/gpsd +++ b/gpMgmt/bin/gpsd @@ -11,8 +11,8 @@ import sys import re from contextlib import closing from optparse import OptionParser -import pgdb -from gppylib.utils import formatInsertValuesList, Escape +from gppylib.utils import formatInsertValuesList, escape_string +import psycopg2 gpsd_version = '%prog 1.0' @@ -42,7 +42,7 @@ def get_num_segments(cursor): query = "select count(*) from gp_segment_configuration where role='p' and content >=0;" try: cursor.execute(query) - except pgdb.DatabaseError as e: + except psycopg2.DatabaseError as e: sys.stderr.write('\nError while trying to retrieve number of segments.\n\n' + str(e) + '\n\n') sys.exit(1) vals = cursor.fetchone() @@ -82,7 +82,13 @@ def dumpTupleCount(cur): def dumpStats(cur, inclHLL): - query = 'SELECT pgc.relname, pgn.nspname, pga.attname, pgtn.nspname, pgt.typname, pgs.* ' \ + query = 'SELECT pgc.relname, pgn.nspname, pga.attname, pgtn.nspname, pgt.typname, ' \ + 'pgs.starelid, pgs.staattnum, pgs.stainherit, pgs.stanullfrac, pgs.stawidth, pgs.stadistinct, ' \ + 'pgs.stakind1, pgs.stakind2, pgs.stakind3, pgs.stakind4, pgs.stakind5, ' \ + 'pgs.staop1, pgs.staop2, pgs.staop3, pgs.staop4, pgs.staop5, ' \ + 'pgs.stacoll1, pgs.stacoll2, pgs.stacoll3, pgs.stacoll4, pgs.stacoll5, ' \ + 'pgs.stanumbers1, pgs.stanumbers2, pgs.stanumbers3, pgs.stanumbers4, pgs.stanumbers5, ' \ + 'pgs.stavalues1::text::text[], pgs.stavalues2::text::text[], pgs.stavalues3::text::text[], pgs.stavalues4::text::text[], pgs.stavalues5::text::text[] ' \ 'FROM pg_class pgc, pg_statistic pgs, pg_namespace pgn, pg_attribute pga, pg_type pgt, pg_namespace pgtn ' \ 'WHERE pgc.relnamespace = pgn.oid and pgn.nspname NOT IN ' + \ sysnslist + \ @@ -101,7 +107,7 @@ def dumpStats(cur, inclHLL): cur.execute(query) for vals in ResultIter(cur): - starelid = "'%s.%s'::regclass" % (Escape(vals[1]), Escape(vals[0])) + starelid = "'%s.%s'::regclass" % (escape_string(vals[1]), escape_string(vals[0])) rowVals = formatInsertValuesList(vals, starelid, inclHLL) print(pstring.format(vals[0], vals[2], ',\n'.join(rowVals))) @@ -152,7 +158,7 @@ def main(): 'options': pgoptions } num_segments = 0 - with closing(pgdb.connect(**connectionInfo)) as connection: + with closing(psycopg2.connect(**connectionInfo)) as connection: with closing(connection.cursor()) as cursor: num_segments = get_num_segments(cursor) sys.stdout.writelines(['\n-- Greenplum database Statistics Dump', @@ -187,7 +193,7 @@ def main(): sys.stdout.flush() try: - with closing(pgdb.connect(**connectionInfo)) as connection: + with closing(psycopg2.connect(**connectionInfo)) as connection: with closing(connection.cursor()) as cursor: dumpTupleCount(cursor) dumpStats(cursor, inclHLL) @@ -196,11 +202,11 @@ def main(): 'which requires some data elements to be included in the output file.\n', 'Please review output file to ensure it is within corporate policy to transport the output file.\n']) - except pgdb.DatabaseError as err: # catch *all* exceptions + except psycopg2.DatabaseError as err: # catch *all* exceptions sys.stderr.write('Error while dumping statistics:\n') sys.stderr.write(str(err)) sys.exit(1) if __name__ == "__main__": - main() + main() diff --git a/gpMgmt/bin/minirepro b/gpMgmt/bin/minirepro index 79d4c9fbb31..858b77933dd 100755 --- a/gpMgmt/bin/minirepro +++ b/gpMgmt/bin/minirepro @@ -61,10 +61,10 @@ minirepro gptest -h locahost -U gpadmin -p 4444 -q ~/in.sql -f ~/out.sql import pwd import os, sys, re, json, platform, subprocess -import pgdb +import psycopg2 from optparse import OptionParser from datetime import datetime -from gppylib.utils import formatInsertValuesList, Escape +from gppylib.utils import formatInsertValuesList, escape_string version = '1.13' PATH_PREFIX = '/tmp/' @@ -97,7 +97,7 @@ def get_server_version(cursor): query = "select version()" try: cursor.execute(query) - except pgdb.DatabaseError as e: + except psycopg2.DatabaseError as e: sys.stderr.write('\nError while trying to find GPDB version.\n\n' + str(e) + '\n\n') sys.exit(1) vals = cursor.fetchone() @@ -107,7 +107,7 @@ def get_num_segments(cursor): query = "select count(*) from gp_segment_configuration where role='p' and content >=0;" try: cursor.execute(query) - except pgdb.DatabaseError as e: + except psycopg2.DatabaseError as e: sys.stderr.write('\nError while trying to retrieve number of segments.\n\n' + str(e) + '\n\n') sys.exit(1) vals = cursor.fetchone() @@ -136,7 +136,7 @@ def dump_query(connectionInfo, query_file): with open(query_file, 'r') as query_f: sql_text = query_f.read() - query = "select pg_catalog.gp_dump_query_oids('%s')" % Escape(sql_text) + query = "select pg_catalog.gp_dump_query_oids('%s')" % escape_string(sql_text) toolkit_sql = PATH_PREFIX + 'toolkit.sql' with open(toolkit_sql, 'w') as toolkit_f: @@ -187,7 +187,7 @@ def pg_dump_object(mr_query, connectionInfo, envOpts): out_file = PATH_PREFIX + PGDUMP_FILE dmp_cmd = 'pg_dump -h %s -p %s -U %s -sxO %s' % connectionInfo dmp_cmd = "%s --relation-oids %s --function-oids %s -f %s" % \ - (dmp_cmd, mr_query.relids, mr_query.funcids, Escape(out_file)) + (dmp_cmd, mr_query.relids, mr_query.funcids, escape_string(out_file)) print(dmp_cmd) p = subprocess.Popen(dmp_cmd, shell=True, stderr=subprocess.PIPE, env=envOpts) _, errormsg = p.communicate() @@ -213,11 +213,17 @@ def dump_tuple_count(cur, oid_str, f_out): for col, val, typ in zip(columns[2:], vals[2:], types): # i.e. relpages = 1::int, reltuples = 1.0::real lines.append('\t%s = %s::%s' % (col, val, typ)) - updateStmt = templateStmt.format(Escape(',\n'.join(lines)), Escape(vals[0]), Escape(vals[1])) + updateStmt = templateStmt.format(escape_string(',\n'.join(lines)), escape_string(vals[0]), escape_string(vals[1])) f_out.writelines(updateStmt) def dump_stats(cur, oid_str, f_out, inclHLL): - query = 'SELECT pgc.relname, pgn.nspname, pga.attname, pgtn.nspname, pgt.typname, pgs.* ' \ + query = 'SELECT pgc.relname, pgn.nspname, pga.attname, pgtn.nspname, pgt.typname, ' \ + 'pgs.starelid, pgs.staattnum, pgs.stainherit, pgs.stanullfrac, pgs.stawidth, pgs.stadistinct, ' \ + 'pgs.stakind1, pgs.stakind2, pgs.stakind3, pgs.stakind4, pgs.stakind5, ' \ + 'pgs.staop1, pgs.staop2, pgs.staop3, pgs.staop4, pgs.staop5, ' \ + 'pgs.stacoll1, pgs.stacoll2, pgs.stacoll3, pgs.stacoll4, pgs.stacoll5, ' \ + 'pgs.stanumbers1, pgs.stanumbers2, pgs.stanumbers3, pgs.stanumbers4, pgs.stanumbers5, ' \ + 'pgs.stavalues1::text::text[], pgs.stavalues2::text::text[], pgs.stavalues3::text::text[], pgs.stavalues4::text::text[], pgs.stavalues5::text::text[] ' \ 'FROM pg_class pgc, pg_statistic pgs, pg_namespace pgn, pg_attribute pga, pg_type pgt, pg_namespace pgtn ' \ 'WHERE pgc.relnamespace = pgn.oid and pgc.oid in (%s) ' \ 'and pgn.nspname NOT LIKE \'pg_temp_%%\' ' \ @@ -239,7 +245,7 @@ def dump_stats(cur, oid_str, f_out, inclHLL): for vals in result_iter(cur): schemaname = vals[1] - starelid = "'%s.%s'::regclass" % (Escape(vals[1]), Escape(vals[0])) + starelid = "'%s.%s'::regclass" % (escape_string(vals[1]), escape_string(vals[0])) rowVals = formatInsertValuesList(vals, starelid, inclHLL) # For non-catalog tables we don't need to delete stats first @@ -248,7 +254,7 @@ def dump_stats(cur, oid_str, f_out, inclHLL): if schemaname != 'pg_catalog': linecomment = '-- ' # This will comment out the DELETE query - f_out.writelines(pstring.format(Escape(vals[0]), Escape(vals[2]), linecomment, starelid, vals[6], ',\n'.join(rowVals))) + f_out.writelines(pstring.format(escape_string(vals[0]), escape_string(vals[2]), linecomment, starelid, vals[6], ',\n'.join(rowVals))) def main(): parser = parse_cmd_line() @@ -299,7 +305,7 @@ def main(): } print("Connecting to database: host=%s, port=%s, user=%s, db=%s ..." % connectionInfo) - conn = pgdb.connect(**connectionDict) + conn = psycopg2.connect(**connectionDict) cursor = conn.cursor() # get server version, which is dumped to minirepro output file @@ -346,7 +352,7 @@ def main(): # first create schema DDLs print("Writing schema DDLs ...") - table_schemas = ["CREATE SCHEMA %s;\n" % Escape(schema) for schema in mr_query.schemas if schema != 'public'] + table_schemas = ["CREATE SCHEMA %s;\n" % escape_string(schema) for schema in mr_query.schemas if schema != 'public'] f_out.writelines(table_schemas) # write relation and function DDLs @@ -392,4 +398,4 @@ def main(): print('Please review output file to ensure it is within corporate policy to transport the output file.') if __name__ == "__main__": - main() + main() diff --git a/gpMgmt/bin/pythonSrc/PyGreSQL/CMakeLists.txt b/gpMgmt/bin/pythonSrc/PyGreSQL/CMakeLists.txt deleted file mode 100644 index ce1b626d3c6..00000000000 --- a/gpMgmt/bin/pythonSrc/PyGreSQL/CMakeLists.txt +++ /dev/null @@ -1,52 +0,0 @@ -cmake_minimum_required(VERSION 3.12) - -set (CMAKE_CONFIGURATION_TYPES Release RelWithDebInfo) -project(pygresql C) - -find_package(Python2 COMPONENTS Interpreter Development) -if (Python2_FOUND) - include_directories(${Python2_INCLUDE_DIRS}) -else () - message(FATAL_ERROR "python2 not found") -endif(Python2_FOUND) - -file(GLOB SRC_TARBALL ${CMAKE_CURRENT_SOURCE_DIR}/../ext/PyGreSQL-*.tar.gz) -if (NOT SRC_TARBALL) - message(FATAL_ERROR "PyGreSQL source tarball not found, run git submodule update --init --recursive") -endif() - -execute_process(COMMAND tar -xf ${SRC_TARBALL} --strip-components=1 - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) - -file(READ pgmodule.c PGMODULE_LINES) -file(WRITE pgmodule.c "__declspec(dllexport) void init_pg(void);\n") -file(APPEND pgmodule.c "${PGMODULE_LINES}") - -add_definitions("/D FRONTEND") - -set (CPPFLAGS "/MP /wd4996 /wd4018 /wd4090 /wd4102 /wd4244 /wd4267 /wd4273 /wd4715") -add_definitions("${CPPFLAGS}") - -file(WRITE "${CMAKE_CURRENT_SOURCE_DIR}/__init__.py" "") -set(GPDB_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../../../) -include_directories(${GPDB_SRC_DIR}/src/include - ${GPDB_SRC_DIR}/src/interfaces/libpq - ${GPDB_SRC_DIR}/src/include/port - ${GPDB_SRC_DIR}/src/include/port/win32 - ${GPDB_SRC_DIR}/src/include/port/win32_msvc - ${GPDB_SRC_DIR}/src/port - ${Python2_INCLUDE_DIRS}) -link_directories(${CMAKE_PREFIX_PATH}/lib) -find_library(LIBPQ NAMES libpq HINTS ${CMAKE_INSTALL_PREFIX}/LIB) -find_library(LIBPGPORT NAMES libpgport HINTS ${CMAKE_INSTALL_PREFIX}/LIB) -find_library(LIBPGCOMMON NAMES libpgcommon HINTS ${CMAKE_INSTALL_PREFIX}/LIB) - -add_library (pygresql SHARED pgmodule.c) -target_link_libraries(pygresql ${LIBPQ} ${LIBPGPORT} ${LIBPGCOMMON} ws2_32 secur32 ${Python2_LIBRARIES}) - -set_target_properties(pygresql PROPERTIES OUTPUT_NAME "_pg") -set_target_properties(pygresql PROPERTIES SUFFIX ".pyd") -install(TARGETS pygresql DESTINATION lib/python) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/pg.py DESTINATION lib/python) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/pgdb.py DESTINATION lib/python) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/__init__.py DESTINATION lib/python) \ No newline at end of file diff --git a/gpMgmt/bin/pythonSrc/ext/PyGreSQL-5.2.tar.gz b/gpMgmt/bin/pythonSrc/ext/PyGreSQL-5.2.tar.gz deleted file mode 100644 index 9c39e0acc34..00000000000 Binary files a/gpMgmt/bin/pythonSrc/ext/PyGreSQL-5.2.tar.gz and /dev/null differ diff --git a/gpMgmt/bin/pythonSrc/ext/PyYAML-5.3.1.tar.gz b/gpMgmt/bin/pythonSrc/ext/PyYAML-5.3.1.tar.gz deleted file mode 100644 index 915d67b38f2..00000000000 Binary files a/gpMgmt/bin/pythonSrc/ext/PyYAML-5.3.1.tar.gz and /dev/null differ diff --git a/gpMgmt/bin/pythonSrc/ext/mock-1.0.1.tar.gz b/gpMgmt/bin/pythonSrc/ext/mock-1.0.1.tar.gz deleted file mode 100644 index 4fdea77c71c..00000000000 Binary files a/gpMgmt/bin/pythonSrc/ext/mock-1.0.1.tar.gz and /dev/null differ diff --git a/gpMgmt/bin/pythonSrc/ext/psutil-5.7.0.tar.gz b/gpMgmt/bin/pythonSrc/ext/psutil-5.7.0.tar.gz deleted file mode 100644 index e8b1d804420..00000000000 Binary files a/gpMgmt/bin/pythonSrc/ext/psutil-5.7.0.tar.gz and /dev/null differ diff --git a/gpMgmt/sbin/gpsegstop.py b/gpMgmt/sbin/gpsegstop.py index ab369f63473..9f13c10883b 100755 --- a/gpMgmt/sbin/gpsegstop.py +++ b/gpMgmt/sbin/gpsegstop.py @@ -23,7 +23,6 @@ from gppylib.commands import gp from gppylib.commands.gp import SEGMENT_STOP_TIMEOUT_DEFAULT, DEFAULT_SEGHOST_NUM_WORKERS from gppylib.commands import pg -from gppylib.db import dbconn from gppylib import pgconf from gppylib.commands.gp import is_pid_postmaster diff --git a/gpMgmt/test/behave/mgmt_utils/analyzedb.feature b/gpMgmt/test/behave/mgmt_utils/analyzedb.feature index 5809c7745a9..40b8d87623b 100644 --- a/gpMgmt/test/behave/mgmt_utils/analyzedb.feature +++ b/gpMgmt/test/behave/mgmt_utils/analyzedb.feature @@ -1781,12 +1781,3 @@ Feature: Incrementally analyze the database And the user executes "CREATE TEMP TABLE spiegelungssätze (c1 int) DISTRIBUTED BY (c1)" with named connection "default" When the user runs "analyzedb -a -d special_encoding_db" Then analyzedb should return a return code of 0 - - Scenario: analyzedb finds materialized views - Given a materialized view "public.mv_test_view" exists on table "pg_class" - And the user runs "analyzedb -a -d incr_analyze" - Then analyzedb should print "-public.mv_test_view" to stdout - And the user runs "analyzedb -a -s public -d incr_analyze" - Then analyzedb should print "-public.mv_test_view" to stdout - And the user runs "analyzedb -a -t public.mv_test_view -d incr_analyze" - Then analyzedb should print "-public.mv_test_view" to stdout diff --git a/gpMgmt/test/behave/mgmt_utils/environment.py b/gpMgmt/test/behave/mgmt_utils/environment.py index d79f9c18acc..c1dcca351ae 100644 --- a/gpMgmt/test/behave/mgmt_utils/environment.py +++ b/gpMgmt/test/behave/mgmt_utils/environment.py @@ -62,12 +62,20 @@ def before_feature(context, feature): dbconn.execSQL(context.conn, 'create table t1(a integer, b integer)') dbconn.execSQL(context.conn, 'create table t2(c integer, d integer)') dbconn.execSQL(context.conn, 'create table t3(e integer, f integer)') + dbconn.execSQL(context.conn, 'create table spiegelungssätze(col_ä integer, 列2 integer)') dbconn.execSQL(context.conn, 'create view v1 as select a, b from t1, t3 where t1.a=t3.e') dbconn.execSQL(context.conn, 'create view v2 as select c, d from t2, t3 where t2.c=t3.f') dbconn.execSQL(context.conn, 'create view v3 as select a, d from v1, v2 where v1.a=v2.c') dbconn.execSQL(context.conn, 'insert into t1 values(1, 2)') dbconn.execSQL(context.conn, 'insert into t2 values(1, 3)') dbconn.execSQL(context.conn, 'insert into t3 values(1, 4)') + dbconn.execSQL(context.conn, 'insert into spiegelungssätze values(1, 5)') + # minirepro tests require statistical data about the contents of the database + # we should execute 'ANALYZE' to fill the pg_statistic catalog table. + dbconn.execSQL(context.conn, 'analyze t1') + dbconn.execSQL(context.conn, 'analyze t2') + dbconn.execSQL(context.conn, 'analyze t3') + dbconn.execSQL(context.conn, 'analyze spiegelungssätze') context.conn.commit() if 'gppkg' in feature.tags: diff --git a/gpMgmt/test/behave/mgmt_utils/gpcheckcat.feature b/gpMgmt/test/behave/mgmt_utils/gpcheckcat.feature index d9b91838909..84cdcba4b03 100644 --- a/gpMgmt/test/behave/mgmt_utils/gpcheckcat.feature +++ b/gpMgmt/test/behave/mgmt_utils/gpcheckcat.feature @@ -9,6 +9,9 @@ Feature: gpcheckcat tests Given database "all_good" is dropped and recreated Then the user runs "gpcheckcat -A" Then gpcheckcat should return a return code of 0 + When the user runs "gpcheckcat -C pg_class" + Then gpcheckcat should return a return code of 0 + And gpcheckcat should not print "Execution error:" to stdout And the user runs "dropdb all_good" Scenario: gpcheckcat should drop leaked schemas diff --git a/gpMgmt/test/behave/mgmt_utils/minirepro.feature b/gpMgmt/test/behave/mgmt_utils/minirepro.feature index 15e9c666a51..756803aafac 100644 --- a/gpMgmt/test/behave/mgmt_utils/minirepro.feature +++ b/gpMgmt/test/behave/mgmt_utils/minirepro.feature @@ -263,3 +263,15 @@ Feature: Dump minimum database objects that is related to the query And the output file "/tmp/out.sql" should contain "Table: t3, Attribute: f" And the output file "/tmp/out.sql" should be loaded to database "minidb_tmp" without error And the file "/tmp/in.sql" should be executed in database "minidb_tmp" without error + + @minirepro_core + Scenario: Dump database objects related with select query on table with specially encoded charaters + Given the file "/tmp/in.sql" exists and contains "select * from spiegelungssätze;" + And the file "/tmp/out.sql" does not exist + When the user runs "minirepro minireprodb -q /tmp/in.sql -f /tmp/out.sql" + Then the output file "/tmp/out.sql" should exist + And the output file "/tmp/out.sql" should not contain "CREATE TABLE public.spiegelungssätze" + And the output file "/tmp/out.sql" should contain "Table: spiegelungssätze, Attribute: col_ä" + And the output file "/tmp/out.sql" should contain "Table: spiegelungssätze, Attribute: 列2" + And the output file "/tmp/out.sql" should be loaded to database "minidb_tmp" without error + And the file "/tmp/in.sql" should be executed in database "minidb_tmp" without error diff --git a/gpMgmt/test/behave/mgmt_utils/steps/mgmt_utils.py b/gpMgmt/test/behave/mgmt_utils/steps/mgmt_utils.py index dfcc137fe71..0d025ea7044 100644 --- a/gpMgmt/test/behave/mgmt_utils/steps/mgmt_utils.py +++ b/gpMgmt/test/behave/mgmt_utils/steps/mgmt_utils.py @@ -19,7 +19,8 @@ from datetime import datetime, timedelta from os import path from contextlib import closing - +import psycopg2 +from psycopg2 import extras from gppylib.gparray import GpArray, ROLE_PRIMARY, ROLE_MIRROR from gppylib.commands.gp import SegmentStart, GpStandbyStart, CoordinatorStop from gppylib.commands import gp @@ -3159,31 +3160,41 @@ def impl(context, table_name): dbname = 'gptest' conn = dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False) context.long_run_select_only_conn = conn + cursor = conn.cursor() + context.long_run_select_only_cursor = cursor + + # Start a readonly transaction. + cursor.execute("BEGIN") query = """SELECT gp_segment_id, * from %s order by 1, 2""" % table_name - data_result = dbconn.query(conn, query).fetchall() + cursor.execute(query) + data_result = cursor.fetchall() + context.long_run_select_only_data_result = data_result query = """SELECT txid_current()""" - xid = dbconn.querySingleton(conn, query) + cursor.execute(query) + xid = cursor.fetchone()[0] context.long_run_select_only_xid = xid @then('verify that long-run read-only transaction still exists on {table_name}') def impl(context, table_name): dbname = 'gptest' - conn = context.long_run_select_only_conn + cursor = context.long_run_select_only_cursor query = """SELECT gp_segment_id, * from %s order by 1, 2""" % table_name - data_result = dbconn.query(conn, query).fetchall() + cursor.execute(query) + data_result = cursor.fetchall() query = """SELECT txid_current()""" - xid = dbconn.querySingleton(conn, query) + cursor.execute(query) + xid = cursor.fetchone()[0] if (xid != context.long_run_select_only_xid or data_result != context.long_run_select_only_data_result): error_str = "Incorrect xid or select result of long run read-only transaction: \ - xid(before %s, after %), result(before %s, after %s)" - raise Exception(error_str % (context.long_run_select_only_xid, xid, context.long_run_select_only_data_result, data_result)) + xid(before {}, after {}), result(before {}, after {})" + raise Exception(error_str.format(context.long_run_select_only_xid, xid, context.long_run_select_only_data_result, data_result)) @given('a long-run transaction starts') def impl(context): @@ -3191,30 +3202,36 @@ def impl(context): conn = dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False) context.long_run_conn = conn + cursor = conn.cursor() + context.long_run_cursor = cursor + + cursor.execute("BEGIN") + query = """SELECT txid_current()""" - xid = dbconn.querySingleton(conn, query) + cursor.execute(query) + xid = cursor.fetchone()[0] context.long_run_xid = xid @then('verify that long-run transaction aborted for changing the catalog by creating table {table_name}') def impl(context, table_name): - dbname = 'gptest' - conn = context.long_run_conn + cursor = context.long_run_cursor query = """SELECT txid_current()""" - xid = dbconn.querySingleton(conn, query) + cursor.execute(query) + xid = cursor.fetchone()[0] if context.long_run_xid != xid: raise Exception("Incorrect xid of long run transaction: before %s, after %s" % (context.long_run_xid, xid)); query = """CREATE TABLE %s (a INT)""" % table_name try: - data_result = dbconn.query(conn, query) - except Exception as msg: - key_msg = "FATAL: cluster is expanded" - if key_msg not in msg.__str__(): - raise Exception("transaction not abort correctly, errmsg:%s" % msg) + cursor.execute(query) + except Exception as e: + key_msg = "cluster is expanded from" + if key_msg not in str(e): + raise Exception("transaction not abort correctly, errmsg:%s" % str(e)) else: - raise Exception("transaction not abort, result:%s" % data_result) + raise Exception("transaction not abort") @when('verify that the cluster has {num_of_segments} new segments') @then('verify that the cluster has {num_of_segments} new segments') @@ -3832,7 +3849,7 @@ def impl(context): @then('the database locales are saved') def impl(context): - with closing(dbconn.connect(dbconn.DbURL())) as conn: + with closing(dbconn.connect(dbconn.DbURL(), cursorFactory=psycopg2.extras.NamedTupleCursor)) as conn: rows = dbconn.query(conn, "SELECT name, setting FROM pg_settings WHERE name LIKE 'lc_%'").fetchall() context.database_locales = {row.name: row.setting for row in rows} diff --git a/gpMgmt/test/behave/mgmt_utils/steps/mirrors_mgmt_utils.py b/gpMgmt/test/behave/mgmt_utils/steps/mirrors_mgmt_utils.py index 47d4a400056..1b8c1caa7f0 100644 --- a/gpMgmt/test/behave/mgmt_utils/steps/mirrors_mgmt_utils.py +++ b/gpMgmt/test/behave/mgmt_utils/steps/mirrors_mgmt_utils.py @@ -479,7 +479,7 @@ def make_temp_dir_on_remote(context, hostname, tmp_base_dir_remote, mode='700'): raise Exception("tmp_base_dir cannot be empty") tempfile_cmd = Command(name="Create temp directory on remote host", - cmdStr=""" python -c "import tempfile; t=tempfile.mkdtemp(dir='{}');print(t)" """ + cmdStr=""" python3 -c "import tempfile; t=tempfile.mkdtemp(dir='{}');print(t)" """ .format(tmp_base_dir_remote), remoteHost=hostname, ctxt=REMOTE) tempfile_cmd.run(validateAfter=True) diff --git a/gpMgmt/test/behave/mgmt_utils/steps/replication_slots_utils.py b/gpMgmt/test/behave/mgmt_utils/steps/replication_slots_utils.py index aa9b4a011c1..1c20e690d53 100644 --- a/gpMgmt/test/behave/mgmt_utils/steps/replication_slots_utils.py +++ b/gpMgmt/test/behave/mgmt_utils/steps/replication_slots_utils.py @@ -28,7 +28,7 @@ def create_cluster(context, with_mirrors=True): cd ../gpAux/gpdemo; \ export DEMO_PORT_BASE={port_base} && \ export NUM_PRIMARY_MIRROR_PAIRS={num_primary_mirror_pairs} && \ - export WITH_MIRRORS={with_mirrors} && \A + export WITH_MIRRORS={with_mirrors} && \ ./demo_cluster.sh -d && ./demo_cluster.sh -c && \ ./demo_cluster.sh """.format(port_base=os.getenv('PORT_BASE', 15432), @@ -108,18 +108,17 @@ def step_impl(context): def step_impl(context): result_cursor = query_sql( "postgres", - "select pg_get_replication_slots() from gp_dist_random('gp_id') order by gp_segment_id" + "select (pg_get_replication_slots()).* from gp_dist_random('gp_id') order by gp_segment_id" ) if result_cursor.rowcount != context.current_cluster_size: raise Exception("expected all %d primaries to have replication slots, only %d have slots" % (context.current_cluster_size, results.rowcount)) - for content_id, result in enumerate(result_cursor.fetchall()): - pg_rep_slot = result[0] - if (pg_rep_slot[0], pg_rep_slot[2], pg_rep_slot[4]) != ('internal_wal_replication_slot','physical','f') : + for content_id, pg_rep_slot in enumerate(result_cursor.fetchall()): + if (pg_rep_slot[0], pg_rep_slot[2], pg_rep_slot[4]) != ('internal_wal_replication_slot', 'physical', False) : raise Exception( "expected replication slot to be active for content id %d, got %s" % - (content_id, result[0]) + (content_id, pg_rep_slot) ) @then('the mirrors should not have replication slots') diff --git a/gpMgmt/test/behave_utils/gpexpand_dml.py b/gpMgmt/test/behave_utils/gpexpand_dml.py index 8658c7e58ff..55f0e37b34d 100755 --- a/gpMgmt/test/behave_utils/gpexpand_dml.py +++ b/gpMgmt/test/behave_utils/gpexpand_dml.py @@ -31,9 +31,15 @@ def __init__(self, dbname, dmltype): def run(self): conn = dbconn.connect(dbconn.DbURL(dbname=self.dbname), unsetSearchPath=False) + with conn.cursor() as cur: + cur.execute("BEGIN") + self.loop(conn) self.verify(conn) + with conn.cursor() as cur: + cur.execute("COMMIT") + conn.commit() conn.close() @@ -109,13 +115,15 @@ def loop_step(self): def verify(self, conn): sql = ''' select c1 from {tablename} order by c1; - '''.format(tablename=self.tablename, counter=self.counter) - results = dbconn.query(conn, sql).fetchall() + '''.format(tablename=self.tablename) + with conn.cursor() as cur: + cur.execute(sql) + results = cur.fetchall() - for i in range(0, self.counter): - if i != int(results[i][0]): - self.report_incorrect_result() - return + for i in range(self.counter): + if i != int(results[i][0]): + self.report_incorrect_result() + return class TestUpdate(TestDML): datasize = 1000 @@ -135,13 +143,15 @@ def loop_step(self): def verify(self, conn): sql = ''' select c2 from {tablename} order by c1; - '''.format(tablename=self.tablename, counter=self.counter) - results = dbconn.query(conn, sql).fetchall() + '''.format(tablename=self.tablename) + with conn.cursor() as cur: + cur.execute(sql) + results = cur.fetchall() - for i in range(0, self.datasize): - if i + self.counter - 1 != int(results[i][0]): - self.report_incorrect_result() - return + for i in range(self.datasize): + if i + self.counter - 1 != int(results[i][0]): + self.report_incorrect_result() + return class TestDelete(TestDML): datasize = 100000 @@ -161,13 +171,15 @@ def loop_step(self): def verify(self, conn): sql = ''' select c1 from {tablename} order by c1; - '''.format(tablename=self.tablename, counter=self.counter) - results = dbconn.query(conn, sql).fetchall() - - for i in range(self.counter, self.datasize): - if i != int(results[i - self.counter][0]): - self.report_incorrect_result() - return + '''.format(tablename=self.tablename) + with conn.cursor() as cur: + cur.execute(sql) + results = cur.fetchall() + + for i in range(self.counter, self.datasize): + if i != int(results[i - self.counter][0]): + self.report_incorrect_result() + return # for test only if __name__ == '__main__': diff --git a/gpMgmt/test/behave_utils/utils.py b/gpMgmt/test/behave_utils/utils.py index a61febcd9f7..06c8c2209a6 100644 --- a/gpMgmt/test/behave_utils/utils.py +++ b/gpMgmt/test/behave_utils/utils.py @@ -11,15 +11,13 @@ import subprocess import difflib -import pg - from contextlib import closing from datetime import datetime from gppylib.commands.base import Command, ExecutionError, REMOTE from gppylib.commands.gp import chk_local_db_running, get_coordinatordatadir from gppylib.db import dbconn from gppylib.gparray import GpArray, MODE_SYNCHRONIZED - +from gppylib.utils import escape_string PARTITION_START_DATE = '2010-01-01' PARTITION_END_DATE = '2013-01-01' @@ -317,14 +315,14 @@ def check_table_exists(context, dbname, table_name, table_type=None, host=None, FROM pg_class c, pg_namespace n WHERE c.relname = '%s' AND n.nspname = '%s' AND c.relnamespace = n.oid; """ - SQL = SQL_format % (escape_string(tablename, conn=conn), escape_string(schemaname, conn=conn)) + SQL = SQL_format % (escape_string(tablename), escape_string(schemaname)) else: SQL_format = """ SELECT oid, relkind, relam, reloptions \ FROM pg_class \ WHERE relname = E'%s';\ """ - SQL = SQL_format % (escape_string(table_name, conn=conn)) + SQL = SQL_format % (escape_string(table_name)) table_row = None try: @@ -773,11 +771,6 @@ def replace_special_char_env(str): str = str.replace("$%s" % var, os.environ[var]) return str - -def escape_string(string, conn): - return pg.DB(db=conn).escape_string(string) - - def wait_for_unblocked_transactions(context, num_retries=150): """ Tries once a second to successfully commit a transaction to the database diff --git a/gpMgmt/test/coveragerc b/gpMgmt/test/coveragerc index 511c2626fbe..566c9c83169 100644 --- a/gpMgmt/test/coveragerc +++ b/gpMgmt/test/coveragerc @@ -8,7 +8,4 @@ branch = True omit = */site-packages/* */bin/behave - */python/psutil/* - */python/pygresql/* - */python/yaml/* */python/lockfile/* \ No newline at end of file diff --git a/gpcontrib/gp_replica_check/gp_replica_check.py b/gpcontrib/gp_replica_check/gp_replica_check.py index 1627b859c1e..d00b66dc34a 100755 --- a/gpcontrib/gp_replica_check/gp_replica_check.py +++ b/gpcontrib/gp_replica_check/gp_replica_check.py @@ -40,8 +40,7 @@ import time import os from collections import defaultdict -from pg import DB - +import psycopg2 def run_sql(sql, host=None, port=None, dbname="postgres", is_query=True, @@ -51,10 +50,15 @@ def run_sql(sql, host=None, port=None, if port is None: port = int(os.getenv("PGPORT")) opt = "-c gp_role=utility" if is_utility else None - with DB(dbname=dbname, host=host, port=port, opt=opt) as db: - r = db.query(sql) - if is_query: - return r.getresult() + try: + with psycopg2.connect(dbname=dbname, host=host, port=port, options=opt) as conn: + with conn.cursor() as cur: + cur.execute(sql) + if is_query: + resultList = cur.fetchall() + return resultList + except Exception as e: + print('Exception: %s while running query %s dbname = %s' % (e, sql, dbname)) class ReplicaCheck(threading.Thread): diff --git a/gpcontrib/gpcloud/regress/input/5_01_normal_http_param.source b/gpcontrib/gpcloud/regress/input/5_01_normal_http_param.source index 6b18334c0d9..66f3564a100 100644 --- a/gpcontrib/gpcloud/regress/input/5_01_normal_http_param.source +++ b/gpcontrib/gpcloud/regress/input/5_01_normal_http_param.source @@ -1,6 +1,6 @@ SET client_min_messages TO 'warning'; CREATE EXTERNAL WEB TABLE dummyHttpServerstart (x text) -execute E'((python @abs_srcdir@/dummyHTTPServer.py -p 8553 -f @config_file@ -t Parameter_Server >/dev/null 2>&1 &);for i in `seq 1 30`; do curl http://127.0.0.1:8553 >/dev/null 2>&1 && break; sleep 1; done; echo "starting...") ' +execute E'((python3 @abs_srcdir@/dummyHTTPServer.py -p 8553 -f @config_file@ -t Parameter_Server >/dev/null 2>&1 &);for i in `seq 1 30`; do curl http://127.0.0.1:8553 >/dev/null 2>&1 && break; sleep 1; done; echo "starting...") ' on SEGMENT 0 FORMAT 'text' (delimiter '|'); diff --git a/gpcontrib/gpcloud/regress/output/5_01_normal_http_param.source b/gpcontrib/gpcloud/regress/output/5_01_normal_http_param.source index dbe5349e385..68b4d131d52 100644 --- a/gpcontrib/gpcloud/regress/output/5_01_normal_http_param.source +++ b/gpcontrib/gpcloud/regress/output/5_01_normal_http_param.source @@ -1,6 +1,6 @@ SET client_min_messages TO 'warning'; CREATE EXTERNAL WEB TABLE dummyHttpServerstart (x text) -execute E'((python @abs_srcdir@/dummyHTTPServer.py -p 8553 -f @config_file@ -t Parameter_Server >/dev/null 2>&1 &);for i in `seq 1 30`; do curl http://127.0.0.1:8553 >/dev/null 2>&1 && break; sleep 1; done; echo "starting...") ' +execute E'((python3 @abs_srcdir@/dummyHTTPServer.py -p 8553 -f @config_file@ -t Parameter_Server >/dev/null 2>&1 &);for i in `seq 1 30`; do curl http://127.0.0.1:8553 >/dev/null 2>&1 && break; sleep 1; done; echo "starting...") ' on SEGMENT 0 FORMAT 'text' (delimiter '|'); CREATE EXTERNAL WEB TABLE dummyHttpServerstop (x text) diff --git a/python-dependencies.txt b/python-dependencies.txt index 9e63afef528..2fd60bd41b1 100644 --- a/python-dependencies.txt +++ b/python-dependencies.txt @@ -1,3 +1,3 @@ psutil==5.7.0 -pygresql==5.2 pyyaml==5.3.1 +psycopg2==2.9.6 diff --git a/src/Makefile.global.in b/src/Makefile.global.in index 8a195a39856..22c74e7a856 100644 --- a/src/Makefile.global.in +++ b/src/Makefile.global.in @@ -192,7 +192,6 @@ bitcodedir = $(pkglibdir)/bitcode with_icu = @with_icu@ with_perl = @with_perl@ with_python = @with_python@ -with_pythonsrc_ext = @with_pythonsrc_ext@ with_tcl = @with_tcl@ with_ssl = @with_ssl@ with_readline = @with_readline@ diff --git a/src/test/gpdb_pitr/expected/gpdb_pitr_setup.out b/src/test/gpdb_pitr/expected/gpdb_pitr_setup.out index 48df10e656d..52fb6f56c3f 100644 --- a/src/test/gpdb_pitr/expected/gpdb_pitr_setup.out +++ b/src/test/gpdb_pitr/expected/gpdb_pitr_setup.out @@ -1,26 +1,26 @@ -- Create some tables and load some data -- We do 1 row for gpdb_one_phase_commit to bypass autostats later CREATE TABLE gpdb_two_phase_commit_before_acquire_share_lock(num int); -CREATE +CREATE TABLE CREATE TABLE gpdb_two_phase_commit_after_acquire_share_lock(num int); -CREATE +CREATE TABLE CREATE TABLE gpdb_one_phase_commit(num int); -CREATE +CREATE TABLE CREATE TABLE gpdb_two_phase_commit_after_restore_point(num int); -CREATE +CREATE TABLE INSERT INTO gpdb_two_phase_commit_before_acquire_share_lock SELECT generate_series(1, 10); -INSERT 10 +INSERT 0 10 INSERT INTO gpdb_two_phase_commit_after_acquire_share_lock SELECT generate_series(1, 10); -INSERT 10 +INSERT 0 10 INSERT INTO gpdb_one_phase_commit VALUES (1); -INSERT 1 +INSERT 0 1 -- Inject suspend faults that will be used later to test different -- distributed commit scenarios, and to also test the commit blocking -- requirement which should only block twophase commits during -- distributed commit broadcast when a restore point is being created. 1: CREATE EXTENSION IF NOT EXISTS gp_inject_fault; -CREATE +CREATE EXTENSION 1: SELECT gp_inject_fault('dtm_broadcast_prepare', 'suspend', 1); gp_inject_fault ----------------- @@ -50,7 +50,7 @@ DELETE 10 4: BEGIN; BEGIN 4: INSERT INTO gpdb_two_phase_commit_after_restore_point SELECT generate_series(1, 10); -INSERT 10 +INSERT 0 10 4&: SELECT gp_segment_id, count(*) FROM gp_create_restore_point('test_restore_point') GROUP BY gp_segment_id ORDER BY gp_segment_id; 1: SELECT gp_wait_until_triggered_fault('gp_create_restore_point_acquired_lock', 1, 1); gp_wait_until_triggered_fault @@ -72,7 +72,7 @@ INSERT 10 3&: COMMIT; -- One-phase commit query should not block. 1: INSERT INTO gpdb_one_phase_commit VALUES (2); -INSERT 1 +INSERT 0 1 -- Read-only query should not block. 1: SELECT * FROM gpdb_two_phase_commit_before_acquire_share_lock; num @@ -154,7 +154,7 @@ SELECT * FROM gpdb_two_phase_commit_after_restore_point ORDER BY num; -- must do this in a plpgsql cursor because of a known limitation with -- CTAS on an EXECUTE ON COORDINATOR function. CREATE TEMP TABLE switch_walfile_names(content_id smallint, walfilename text); -CREATE +CREATE TABLE CREATE OR REPLACE FUNCTION populate_switch_walfile_names() RETURNS void AS $$ DECLARE curs CURSOR FOR SELECT * FROM gp_switch_wal(); /*in func*/ DECLARE rec record; /*in func*/ BEGIN /*in func*/ OPEN curs; /*in func*/ LOOP FETCH curs INTO rec; /*in func*/ EXIT WHEN NOT FOUND; /*in func*/ INSERT INTO switch_walfile_names VALUES (rec.gp_segment_id, rec.pg_walfile_name); /*in func*/ END LOOP; /*in func*/ END $$ LANGUAGE plpgsql; /*in func*/ SELECT populate_switch_walfile_names(); populate_switch_walfile_names @@ -166,7 +166,7 @@ INSERT INTO switch_walfile_names VALUES (rec.gp_segment_id, rec.pg_walfile_name) -- This function loops until the archival is complete. It times out after -- approximately 10mins. CREATE OR REPLACE FUNCTION check_archival() RETURNS BOOLEAN AS $$ DECLARE archived BOOLEAN; /*in func*/ DECLARE archived_count INTEGER; /*in func*/ BEGIN /*in func*/ FOR i in 1..3000 LOOP SELECT bool_and(seg_archived), count(*) FROM (SELECT last_archived_wal = l.walfilename AS seg_archived FROM switch_walfile_names l INNER JOIN gp_stat_archiver a ON l.content_id = a.gp_segment_id) s INTO archived, archived_count; /*in func*/ IF archived AND archived_count = 4 THEN RETURN archived; /*in func*/ END IF; /*in func*/ PERFORM pg_sleep(0.2); /*in func*/ END LOOP; /*in func*/ END $$ LANGUAGE plpgsql; -CREATE +CREATE FUNCTION SELECT check_archival(); check_archival diff --git a/src/test/gpdb_pitr/expected/test_gp_switch_wal.out b/src/test/gpdb_pitr/expected/test_gp_switch_wal.out index 6eb3a2d6a0a..8f81bcbf8e4 100644 --- a/src/test/gpdb_pitr/expected/test_gp_switch_wal.out +++ b/src/test/gpdb_pitr/expected/test_gp_switch_wal.out @@ -71,3 +71,14 @@ SELECT gp_switch_wal() FROM gp_dist_random('gp_id'); ERROR: function with EXECUTE ON restrictions cannot be used in the SELECT list of a query with FROM CREATE TABLE this_ctas_should_fail AS SELECT gp_segment_id AS contentid, pg_switch_wal, pg_walfile_name FROM gp_switch_wal(); ERROR: cannot use gp_switch_wal() when not in QD mode (xlogfuncs_gp.c:LINE_NUM) + +CREATE ROLE switch_wal_error_role; +CREATE ROLE +SET ROLE TO switch_wal_error_role; +SET +SELECT * FROM gp_switch_wal(); +ERROR: permission denied for function gp_switch_wal +RESET ROLE; +RESET +DROP ROLE switch_wal_error_role; +DROP ROLE diff --git a/src/test/isolation2/expected/add_column_after_vacuum_skip_drop_column.out b/src/test/isolation2/expected/add_column_after_vacuum_skip_drop_column.out index 62468f7904a..55cf5abd109 100644 --- a/src/test/isolation2/expected/add_column_after_vacuum_skip_drop_column.out +++ b/src/test/isolation2/expected/add_column_after_vacuum_skip_drop_column.out @@ -4,9 +4,9 @@ -- the test expected result were adjusted accordingly. -- CREATE TABLE aoco_add_column_after_vacuum_skip_drop (a INT, b INT) WITH (appendonly=true, orientation=column); -CREATE +CREATE TABLE INSERT INTO aoco_add_column_after_vacuum_skip_drop SELECT i as a, i as b FROM generate_series(1, 10) AS i; -INSERT 10 +INSERT 0 10 DELETE FROM aoco_add_column_after_vacuum_skip_drop; DELETE 10 @@ -30,7 +30,7 @@ BEGIN 2: VACUUM aoco_add_column_after_vacuum_skip_drop; VACUUM 1: END; -END +COMMIT -- We should see an aocsseg in state 2 (AOSEG_STATE_AWAITING_DROP) 0U: SELECT segno, column_num, state FROM gp_toolkit.__gp_aocsseg('aoco_add_column_after_vacuum_skip_drop'); @@ -44,7 +44,7 @@ END -- The ADD COLUMN should clean up aocssegs in state 2 (AOSEG_STATE_AWAITING_DROP) 1: ALTER TABLE aoco_add_column_after_vacuum_skip_drop ADD COLUMN c INT DEFAULT 0; -ALTER +ALTER TABLE 0U: SELECT segno, column_num, state FROM gp_toolkit.__gp_aocsseg('aoco_add_column_after_vacuum_skip_drop'); segno | column_num | state -------+------------+------- @@ -58,7 +58,7 @@ ALTER -- Check if insert goes into segno 1 instead of segno 2 1: INSERT INTO aoco_add_column_after_vacuum_skip_drop SELECT i as a, i as b, i as c FROM generate_series(1, 100) AS i; -INSERT 100 +INSERT 0 100 0U: SELECT segno, tupcount > 0, state FROM gp_toolkit.__gp_aocsseg('aoco_add_column_after_vacuum_skip_drop'); segno | ?column? | state -------+----------+------- diff --git a/src/test/isolation2/expected/alter_blocks_for_update_and_viceversa.out b/src/test/isolation2/expected/alter_blocks_for_update_and_viceversa.out index 18ef845b750..7f9e1a7004a 100644 --- a/src/test/isolation2/expected/alter_blocks_for_update_and_viceversa.out +++ b/src/test/isolation2/expected/alter_blocks_for_update_and_viceversa.out @@ -1,10 +1,10 @@ -- setup 1: drop table if exists alter_block; -DROP +DROP TABLE 1: create table alter_block(a int, b int) distributed by (a); -CREATE +CREATE TABLE 1: insert into alter_block select 1, 1; -INSERT 1 +INSERT 0 1 -- Validate UPDATE blocks the Alter 2: BEGIN; BEGIN @@ -20,12 +20,12 @@ UPDATE 1 2: COMMIT; COMMIT 1<: <... completed> -ALTER +ALTER TABLE -- Now validate ALTER blocks the UPDATE 2: BEGIN; BEGIN 2: ALTER TABLE alter_block SET DISTRIBUTED BY (a); -ALTER +ALTER TABLE 1&: UPDATE alter_block SET b = b + 1; 2: SELECT wait_event_type FROM pg_stat_activity where query like 'UPDATE alter_block SET %'; wait_event_type diff --git a/src/test/isolation2/expected/ao_blkdir.out b/src/test/isolation2/expected/ao_blkdir.out index f202d8ec4da..e626105b911 100644 --- a/src/test/isolation2/expected/ao_blkdir.out +++ b/src/test/isolation2/expected/ao_blkdir.out @@ -7,12 +7,12 @@ -------------------------------------------------------------------------------- CREATE TABLE ao_blkdir_test(i int, j int) USING ao_row DISTRIBUTED BY (j); -CREATE +CREATE TABLE CREATE INDEX ao_blkdir_test_idx ON ao_blkdir_test(i); -CREATE +CREATE INDEX 1: INSERT INTO ao_blkdir_test SELECT i, 2 FROM generate_series(1, 10) i; -INSERT 10 +INSERT 0 10 -- There should be 1 block directory row with a single entry covering 10 rows SELECT (gp_toolkit.__gp_aoblkdir('ao_blkdir_test')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0 ORDER BY 1,2,3,4,5; tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count @@ -21,7 +21,7 @@ SELECT (gp_toolkit.__gp_aoblkdir('ao_blkdir_test')).* FROM gp_dist_random('gp_id (1 row) 1: INSERT INTO ao_blkdir_test SELECT i, 2 FROM generate_series(11, 30) i; -INSERT 20 +INSERT 0 20 -- There should be 2 block directory entries in a new block directory row, and -- the row from the previous INSERT should not be visible. The entry from the -- first INSERT should remain unchanged. @@ -35,11 +35,11 @@ SELECT (gp_toolkit.__gp_aoblkdir('ao_blkdir_test')).* FROM gp_dist_random('gp_id 1: BEGIN; BEGIN 1: INSERT INTO ao_blkdir_test SELECT i, 2 FROM generate_series(31, 60) i; -INSERT 30 +INSERT 0 30 2: BEGIN; BEGIN 2: INSERT INTO ao_blkdir_test SELECT i, 2 FROM generate_series(71, 110) i; -INSERT 40 +INSERT 0 40 1: COMMIT; COMMIT 2: COMMIT; @@ -57,14 +57,10 @@ SELECT (gp_toolkit.__gp_aoblkdir('ao_blkdir_test')).* FROM gp_dist_random('gp_id (4 rows) TRUNCATE ao_blkdir_test; -TRUNCATE -set gp_appendonly_insert_files = 0; -SET +TRUNCATE TABLE -- Insert enough rows to overflow the first block directory minipage by 2. INSERT INTO ao_blkdir_test SELECT i, 2 FROM generate_series(1, 292700) i; -INSERT 292700 -reset gp_appendonly_insert_files; -RESET +INSERT 0 292700 -- There should be 2 block directory rows, one with 161 entries covering 292698 -- rows and the other with 1 entry covering the 2 overflow rows. SELECT (gp_toolkit.__gp_aoblkdir('ao_blkdir_test')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0 ORDER BY 1,2,3,4,5; @@ -236,9 +232,9 @@ SELECT (gp_toolkit.__gp_aoblkdir('ao_blkdir_test')).* FROM gp_dist_random('gp_id -- Unique index white box tests DROP TABLE ao_blkdir_test; -DROP +DROP TABLE CREATE TABLE ao_blkdir_test(i int UNIQUE, j int) USING ao_row DISTRIBUTED BY (i); -CREATE +CREATE TABLE SELECT gp_inject_fault('appendonly_insert', 'suspend', '', '', 'ao_blkdir_test', 1, 1, 0, dbid) FROM gp_segment_configuration WHERE role = 'p' AND content = 0; gp_inject_fault @@ -284,7 +280,7 @@ SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_confi Success: (1 row) 1<: <... completed> -INSERT 1 +INSERT 0 1 -- The placeholder row is invisible to the INSERTing transaction. Since the -- INSERT finished, there should be 1 visible blkdir row representing the INSERT. @@ -317,19 +313,297 @@ COMMIT (1 row) DROP TABLE ao_blkdir_test; -DROP +DROP TABLE + +-- Test `tupcount` in pg_aoseg == sum of number of `row_count` across all +-- aoblkdir entries for each segno. Test with commits, aborts and deletes. + +-- Case1: without VACUUM ANALYZE +CREATE TABLE ao_blkdir_test_rowcount(i int, j int) USING ao_row DISTRIBUTED BY (j); +CREATE TABLE +1: BEGIN; +BEGIN +2: BEGIN; +BEGIN +3: BEGIN; +BEGIN +4: BEGIN; +BEGIN +1: INSERT INTO ao_blkdir_test_rowcount SELECT i, 2 FROM generate_series(1, 10) i; +INSERT 0 10 +2: INSERT INTO ao_blkdir_test_rowcount SELECT i, 3 FROM generate_series(1, 20) i; +INSERT 0 20 +3: INSERT INTO ao_blkdir_test_rowcount SELECT i, 4 FROM generate_series(1, 30) i; +INSERT 0 30 +3: ABORT; +ROLLBACK +3: BEGIN; +BEGIN +3: INSERT INTO ao_blkdir_test_rowcount SELECT i, 4 FROM generate_series(1, 40) i; +INSERT 0 40 +4: INSERT INTO ao_blkdir_test_rowcount SELECT i, 7 FROM generate_series(1, 50) i; +INSERT 0 50 +1: COMMIT; +COMMIT +2: COMMIT; +COMMIT +3: COMMIT; +COMMIT +4: COMMIT; +COMMIT +DELETE FROM ao_blkdir_test_rowcount WHERE j = 7; +DELETE 50 + +CREATE INDEX ao_blkdir_test_rowcount_idx ON ao_blkdir_test_rowcount(i); +CREATE INDEX + +SELECT segno, sum(row_count) AS totalrows FROM (SELECT (gp_toolkit.__gp_aoblkdir('ao_blkdir_test_rowcount')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0)s GROUP BY segno, columngroup_no ORDER BY segno; + segno | totalrows +-------+----------- + 1 | 10 + 2 | 20 + 3 | 40 + 4 | 50 +(4 rows) +SELECT segno, sum(tupcount) AS totalrows FROM gp_toolkit.__gp_aoseg('ao_blkdir_test_rowcount') WHERE segment_id = 0 GROUP BY segno; + segno | totalrows +-------+----------- + 1 | 10 + 2 | 20 + 3 | 40 + 4 | 50 +(4 rows) + +-- Case2: with VACUUM ANALYZE +DROP TABLE ao_blkdir_test_rowcount; +DROP TABLE +CREATE TABLE ao_blkdir_test_rowcount(i int, j int) USING ao_row DISTRIBUTED BY (j); +CREATE TABLE +CREATE INDEX ao_blkdir_test_rowcount_idx ON ao_blkdir_test_rowcount(i); +CREATE INDEX +1: BEGIN; +BEGIN +2: BEGIN; +BEGIN +3: BEGIN; +BEGIN +4: BEGIN; +BEGIN +1: INSERT INTO ao_blkdir_test_rowcount SELECT i, 2 FROM generate_series(1, 10) i; +INSERT 0 10 +1: INSERT INTO ao_blkdir_test_rowcount SELECT i, 2 FROM ao_blkdir_test_rowcount; +INSERT 0 10 +1: INSERT INTO ao_blkdir_test_rowcount SELECT i, 2 FROM ao_blkdir_test_rowcount; +INSERT 0 20 +2: INSERT INTO ao_blkdir_test_rowcount SELECT i, 3 FROM generate_series(1, 20) i; +INSERT 0 20 +2: INSERT INTO ao_blkdir_test_rowcount SELECT i, 3 FROM ao_blkdir_test_rowcount; +INSERT 0 20 +2: INSERT INTO ao_blkdir_test_rowcount SELECT i, 3 FROM ao_blkdir_test_rowcount; +INSERT 0 40 +3: INSERT INTO ao_blkdir_test_rowcount SELECT i, 4 FROM generate_series(1, 30) i; +INSERT 0 30 +3: INSERT INTO ao_blkdir_test_rowcount SELECT i, 4 FROM ao_blkdir_test_rowcount; +INSERT 0 30 +3: INSERT INTO ao_blkdir_test_rowcount SELECT i, 4 FROM ao_blkdir_test_rowcount; +INSERT 0 60 +4: INSERT INTO ao_blkdir_test_rowcount SELECT i, 7 FROM generate_series(1, 50) i; +INSERT 0 50 +4: INSERT INTO ao_blkdir_test_rowcount SELECT i, 7 FROM ao_blkdir_test_rowcount; +INSERT 0 50 +4: INSERT INTO ao_blkdir_test_rowcount SELECT i, 7 FROM ao_blkdir_test_rowcount; +INSERT 0 100 +1: COMMIT; +COMMIT +2: COMMIT; +COMMIT +3: ABORT; +ROLLBACK +4: COMMIT; +COMMIT + +DELETE FROM ao_blkdir_test_rowcount WHERE j = 7; +DELETE 200 +VACUUM ANALYZE ao_blkdir_test_rowcount; +VACUUM + +SELECT segno, sum(row_count) AS totalrows FROM (SELECT (gp_toolkit.__gp_aoblkdir('ao_blkdir_test_rowcount')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0)s GROUP BY segno, columngroup_no ORDER BY segno; + segno | totalrows +-------+----------- + 1 | 40 + 2 | 80 +(2 rows) +SELECT segno, sum(tupcount) AS totalrows FROM gp_toolkit.__gp_aoseg('ao_blkdir_test_rowcount') WHERE segment_id = 0 GROUP BY segno; + segno | totalrows +-------+----------- + 1 | 40 + 2 | 80 + 3 | 0 + 4 | 0 +(4 rows) + +UPDATE ao_blkdir_test_rowcount SET i = i + 1; +UPDATE 120 +VACUUM ANALYZE ao_blkdir_test_rowcount; +VACUUM + +SELECT segno, sum(row_count) AS totalrows FROM (SELECT (gp_toolkit.__gp_aoblkdir('ao_blkdir_test_rowcount')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0)s GROUP BY segno, columngroup_no ORDER BY segno; + segno | totalrows +-------+----------- + 3 | 120 +(1 row) +SELECT segno, sum(tupcount) AS totalrows FROM gp_toolkit.__gp_aoseg('ao_blkdir_test_rowcount') WHERE segment_id = 0 GROUP BY segno; + segno | totalrows +-------+----------- + 1 | 0 + 2 | 0 + 3 | 120 + 4 | 0 +(4 rows) + +DROP TABLE ao_blkdir_test_rowcount; +DROP TABLE + +-- +-- Test tuple fetch with holes from ABORTs +-- +CREATE TABLE ao_fetch_hole(i int, j int) USING ao_row; +CREATE TABLE +CREATE INDEX ON ao_fetch_hole(i); +CREATE INDEX +INSERT INTO ao_fetch_hole VALUES (2, 0); +INSERT 0 1 +-- Create a hole after the last entry (of the last minipage) in the blkdir. +BEGIN; +BEGIN +INSERT INTO ao_fetch_hole SELECT 3, j FROM generate_series(1, 20) j; +INSERT 0 20 +ABORT; +ROLLBACK +SELECT (gp_toolkit.__gp_aoblkdir('ao_fetch_hole')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0 ORDER BY 1,2,3,4,5; + tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count +---------+-------+----------------+----------+--------------+-------------+----------- + (0,1) | 1 | 0 | 0 | 1 | 0 | 1 +(1 row) + +-- Ensure we will do an index scan. +SET enable_seqscan TO off; +SET +SET enable_indexonlyscan TO off; +SET +SET optimizer TO off; +SET +EXPLAIN SELECT count(*) FROM ao_fetch_hole WHERE i = 3; + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Finalize Aggregate (cost=39.80..39.81 rows=1 width=8) + -> Gather Motion 1:1 (slice1; segments: 1) (cost=39.74..39.79 rows=3 width=8) + -> Partial Aggregate (cost=39.74..39.75 rows=1 width=8) + -> Bitmap Heap Scan on ao_fetch_hole (cost=4.82..39.67 rows=29 width=0) + Recheck Cond: (i = 3) + -> Bitmap Index Scan on ao_fetch_hole_i_idx (cost=0.00..4.81 rows=29 width=0) + Index Cond: (i = 3) + Optimizer: Postgres query optimizer +(8 rows) + +SELECT gp_inject_fault_infinite('AppendOnlyBlockDirectory_GetEntry_sysscan', 'skip', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p'; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +SELECT count(*) FROM ao_fetch_hole WHERE i = 3; + count +------- + 0 +(1 row) +-- Since the hole is at the end of the minipage, we can't avoid a sysscan for +-- each tuple. +SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_sysscan', 'status', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p'; + gp_inject_fault +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Success: fault name:'AppendOnlyBlockDirectory_GetEntry_sysscan' fault type:'skip' ddl statement:'' database name:'' table name:'' start occurrence:'1' end occurrence:'-1' extra arg:'0' fault injection state:'triggered' num times hit:'20' + +(1 row) + +SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_sysscan', 'reset', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p'; + gp_inject_fault +----------------- + Success: +(1 row) + +-- Now do 1 more insert, so that the hole is sandwiched between two successive +-- minipage entries. +INSERT INTO ao_fetch_hole VALUES (4, 21); +INSERT 0 1 +SELECT (gp_toolkit.__gp_aoblkdir('ao_fetch_hole')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0 ORDER BY 1,2,3,4,5; + tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count +---------+-------+----------------+----------+--------------+-------------+----------- + (0,3) | 1 | 0 | 0 | 1 | 0 | 1 + (0,3) | 1 | 0 | 1 | 201 | 40 | 1 +(2 rows) + +SELECT gp_inject_fault_infinite('AppendOnlyBlockDirectory_GetEntry_sysscan', 'skip', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p'; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +SELECT gp_inject_fault_infinite('AppendOnlyBlockDirectory_GetEntry_inter_entry_hole', 'skip', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p'; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +SELECT count(*) FROM ao_fetch_hole WHERE i = 3; + count +------- + 0 +(1 row) +-- Since the hole is between two entries, we are always able to find the last +-- entry in the minipage, determine that the target row doesn't lie within it +-- and early return, thereby avoiding an expensive per-tuple sysscan. We only +-- do 1 sysscan - for the first tuple fetch in the hole and avoid it for all +-- subsequent fetches in the hole. +SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_sysscan', 'status', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p'; + gp_inject_fault +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Success: fault name:'AppendOnlyBlockDirectory_GetEntry_sysscan' fault type:'skip' ddl statement:'' database name:'' table name:'' start occurrence:'1' end occurrence:'-1' extra arg:'0' fault injection state:'triggered' num times hit:'1' + +(1 row) +SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_inter_entry_hole', 'status', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p'; + gp_inject_fault +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Success: fault name:'AppendOnlyBlockDirectory_GetEntry_inter_entry_hole' fault type:'skip' ddl statement:'' database name:'' table name:'' start occurrence:'1' end occurrence:'-1' extra arg:'0' fault injection state:'triggered' num times hit:'19' + +(1 row) + +SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_sysscan', 'reset', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p'; + gp_inject_fault +----------------- + Success: +(1 row) +SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_inter_entry_hole', 'reset', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p'; + gp_inject_fault +----------------- + Success: +(1 row) + +RESET enable_seqscan; +RESET +RESET enable_indexonlyscan; +RESET +RESET optimizer; +RESET -------------------------------------------------------------------------------- -- AOCO tables -------------------------------------------------------------------------------- CREATE TABLE aoco_blkdir_test(i int, j int) USING ao_column DISTRIBUTED BY (j); -CREATE +CREATE TABLE CREATE INDEX aoco_blkdir_test_idx ON aoco_blkdir_test(i); -CREATE +CREATE INDEX 1: INSERT INTO aoco_blkdir_test SELECT i, 2 FROM generate_series(1, 10) i; -INSERT 10 +INSERT 0 10 -- There should be 2 block directory rows with a single entry covering 10 rows, -- (1 for each column). SELECT (gp_toolkit.__gp_aoblkdir('aoco_blkdir_test')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0 ORDER BY 1,2,3,4,5; @@ -340,7 +614,7 @@ SELECT (gp_toolkit.__gp_aoblkdir('aoco_blkdir_test')).* FROM gp_dist_random('gp_ (2 rows) 1: INSERT INTO aoco_blkdir_test SELECT i, 2 FROM generate_series(11, 30) i; -INSERT 20 +INSERT 0 20 -- There should be 2 block directory rows, carrying 2 entries each. The rows -- from the previous INSERT should not be visible. The entries from the first -- INSERT should remain unchanged. @@ -356,11 +630,11 @@ SELECT (gp_toolkit.__gp_aoblkdir('aoco_blkdir_test')).* FROM gp_dist_random('gp_ 1: BEGIN; BEGIN 1: INSERT INTO aoco_blkdir_test SELECT i, 2 FROM generate_series(31, 60) i; -INSERT 30 +INSERT 0 30 2: BEGIN; BEGIN 2: INSERT INTO aoco_blkdir_test SELECT i, 2 FROM generate_series(71, 110) i; -INSERT 40 +INSERT 0 40 1: COMMIT; COMMIT 2: COMMIT; @@ -382,14 +656,12 @@ SELECT (gp_toolkit.__gp_aoblkdir('aoco_blkdir_test')).* FROM gp_dist_random('gp_ (8 rows) TRUNCATE aoco_blkdir_test; -TRUNCATE +TRUNCATE TABLE -- Insert enough rows to overflow the first block directory minipage by 2. set gp_appendonly_insert_files = 0; SET INSERT INTO aoco_blkdir_test SELECT i, 2 FROM generate_series(1, 1317143) i; -INSERT 1317143 -reset gp_appendonly_insert_files; -RESET +INSERT 0 1317143 -- There should be 2 block directory rows, 2 for each column, one with 161 -- entries covering 1317141 rows and the other with 1 entry covering the 2 -- overflow rows. @@ -724,9 +996,9 @@ SELECT (gp_toolkit.__gp_aoblkdir('aoco_blkdir_test')).* FROM gp_dist_random('gp_ -- Unique index white box tests DROP TABLE aoco_blkdir_test; -DROP +DROP TABLE CREATE TABLE aoco_blkdir_test(h int, i int UNIQUE, j int) USING ao_column DISTRIBUTED BY (i); -CREATE +CREATE TABLE SELECT gp_inject_fault('appendonly_insert', 'suspend', '', '', 'aoco_blkdir_test', 1, 1, 0, dbid) FROM gp_segment_configuration WHERE role = 'p' AND content = 0; gp_inject_fault @@ -777,7 +1049,7 @@ SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_confi Success: (1 row) 1<: <... completed> -INSERT 1 +INSERT 0 1 -- The placeholder row is invisible to the INSERTing transaction. Since the -- INSERT finished, there should be 3 visible blkdir rows representing the @@ -818,10 +1090,308 @@ COMMIT -- properly resolve uniqueness checks (by consulting the first non-dropped -- column's block directory row). 3<: <... completed> -ALTER +ALTER TABLE 4: INSERT INTO aoco_blkdir_test VALUES (2, 2); ERROR: duplicate key value violates unique constraint "aoco_blkdir_test_i_key" (seg0 192.168.0.148:7002 pid=176693) DETAIL: Key (i)=(2) already exists. DROP TABLE aoco_blkdir_test; -DROP +DROP TABLE + +-- Test `tupcount` in pg_ao(cs)seg == sum of number of `row_count` across all +-- aoblkdir entries for each . Test with commits, aborts +-- and deletes. + +-- Case1: without VACUUM ANALYZE +CREATE TABLE aoco_blkdir_test_rowcount(i int, j int) USING ao_column DISTRIBUTED BY (j); +CREATE TABLE +1: BEGIN; +BEGIN +2: BEGIN; +BEGIN +3: BEGIN; +BEGIN +4: BEGIN; +BEGIN +1: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 2 FROM generate_series(1, 10) i; +INSERT 0 10 +2: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 3 FROM generate_series(1, 20) i; +INSERT 0 20 +3: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 4 FROM generate_series(1, 30) i; +INSERT 0 30 +3: ABORT; +ROLLBACK +3: BEGIN; +BEGIN +3: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 4 FROM generate_series(1, 40) i; +INSERT 0 40 +4: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 7 FROM generate_series(1, 50) i; +INSERT 0 50 +1: COMMIT; +COMMIT +2: COMMIT; +COMMIT +3: COMMIT; +COMMIT +4: COMMIT; +COMMIT +DELETE FROM aoco_blkdir_test_rowcount WHERE j = 7; +DELETE 50 + +CREATE INDEX aoco_blkdir_test_rowcount_idx ON aoco_blkdir_test_rowcount(i); +CREATE INDEX + +SELECT segno, columngroup_no, sum(row_count) AS totalrows FROM (SELECT (gp_toolkit.__gp_aoblkdir('aoco_blkdir_test_rowcount')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0)s GROUP BY segno, columngroup_no ORDER BY segno, columngroup_no; + segno | columngroup_no | totalrows +-------+----------------+----------- + 1 | 0 | 10 + 1 | 1 | 10 + 2 | 0 | 20 + 2 | 1 | 20 + 3 | 0 | 40 + 3 | 1 | 40 + 4 | 0 | 50 + 4 | 1 | 50 +(8 rows) +SELECT segno, column_num, sum(tupcount) AS totalrows FROM gp_toolkit.__gp_aocsseg('aoco_blkdir_test_rowcount') WHERE segment_id = 0 GROUP BY segno, column_num; + segno | column_num | totalrows +-------+------------+----------- + 1 | 0 | 10 + 1 | 1 | 10 + 2 | 0 | 20 + 2 | 1 | 20 + 3 | 0 | 40 + 3 | 1 | 40 + 4 | 0 | 50 + 4 | 1 | 50 +(8 rows) + +-- Case2: with VACUUM ANALYZE +DROP TABLE aoco_blkdir_test_rowcount; +DROP TABLE +CREATE TABLE aoco_blkdir_test_rowcount(i int, j int) USING ao_column DISTRIBUTED BY (j); +CREATE TABLE +CREATE INDEX aoco_blkdir_test_rowcount_idx ON aoco_blkdir_test_rowcount(i); +CREATE INDEX +1: BEGIN; +BEGIN +2: BEGIN; +BEGIN +3: BEGIN; +BEGIN +4: BEGIN; +BEGIN +1: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 2 FROM generate_series(1, 10) i; +INSERT 0 10 +1: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 2 FROM aoco_blkdir_test_rowcount; +INSERT 0 10 +1: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 2 FROM aoco_blkdir_test_rowcount; +INSERT 0 20 +2: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 3 FROM generate_series(1, 20) i; +INSERT 0 20 +2: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 3 FROM aoco_blkdir_test_rowcount; +INSERT 0 20 +2: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 3 FROM aoco_blkdir_test_rowcount; +INSERT 0 40 +3: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 4 FROM generate_series(1, 30) i; +INSERT 0 30 +3: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 4 FROM aoco_blkdir_test_rowcount; +INSERT 0 30 +3: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 4 FROM aoco_blkdir_test_rowcount; +INSERT 0 60 +4: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 7 FROM generate_series(1, 50) i; +INSERT 0 50 +4: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 7 FROM aoco_blkdir_test_rowcount; +INSERT 0 50 +4: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 7 FROM aoco_blkdir_test_rowcount; +INSERT 0 100 +1: COMMIT; +COMMIT +2: COMMIT; +COMMIT +3: ABORT; +ROLLBACK +4: COMMIT; +COMMIT + +DELETE FROM aoco_blkdir_test_rowcount WHERE j = 7; +DELETE 200 +VACUUM ANALYZE aoco_blkdir_test_rowcount; +VACUUM + +SELECT segno, columngroup_no, sum(row_count) AS totalrows FROM (SELECT (gp_toolkit.__gp_aoblkdir('aoco_blkdir_test_rowcount')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0)s GROUP BY segno, columngroup_no ORDER BY segno, columngroup_no; + segno | columngroup_no | totalrows +-------+----------------+----------- + 1 | 0 | 40 + 1 | 1 | 40 + 2 | 0 | 80 + 2 | 1 | 80 +(4 rows) +SELECT segno, column_num, sum(tupcount) AS totalrows FROM gp_toolkit.__gp_aocsseg('aoco_blkdir_test_rowcount') WHERE segment_id = 0 GROUP BY segno, column_num; + segno | column_num | totalrows +-------+------------+----------- + 1 | 0 | 40 + 1 | 1 | 40 + 2 | 0 | 80 + 2 | 1 | 80 + 3 | 0 | 0 + 3 | 1 | 0 + 4 | 0 | 0 + 4 | 1 | 0 +(8 rows) + +UPDATE aoco_blkdir_test_rowcount SET i = i + 1; +UPDATE 120 +VACUUM ANALYZE aoco_blkdir_test_rowcount; +VACUUM + +SELECT segno, columngroup_no, sum(row_count) AS totalrows FROM (SELECT (gp_toolkit.__gp_aoblkdir('aoco_blkdir_test_rowcount')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0)s GROUP BY segno, columngroup_no ORDER BY segno, columngroup_no; + segno | columngroup_no | totalrows +-------+----------------+----------- + 3 | 0 | 120 + 3 | 1 | 120 +(2 rows) +SELECT segno, column_num, sum(tupcount) AS totalrows FROM gp_toolkit.__gp_aocsseg('aoco_blkdir_test_rowcount') WHERE segment_id = 0 GROUP BY segno, column_num; + segno | column_num | totalrows +-------+------------+----------- + 1 | 0 | 0 + 1 | 1 | 0 + 2 | 0 | 0 + 2 | 1 | 0 + 3 | 0 | 120 + 3 | 1 | 120 + 4 | 0 | 0 + 4 | 1 | 0 +(8 rows) + +DROP TABLE aoco_blkdir_test_rowcount; +DROP TABLE + +-- +-- Test tuple fetch with holes from ABORTs +-- +CREATE TABLE aoco_fetch_hole(i int, j int) USING ao_row; +CREATE TABLE +CREATE INDEX ON aoco_fetch_hole(i); +CREATE INDEX +INSERT INTO aoco_fetch_hole VALUES (2, 0); +INSERT 0 1 +-- Create a hole after the last entry (of the last minipage) in the blkdir. +BEGIN; +BEGIN +INSERT INTO aoco_fetch_hole SELECT 3, j FROM generate_series(1, 20) j; +INSERT 0 20 +ABORT; +ROLLBACK +SELECT (gp_toolkit.__gp_aoblkdir('aoco_fetch_hole')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0 ORDER BY 1,2,3,4,5; + tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count +---------+-------+----------------+----------+--------------+-------------+----------- + (0,1) | 1 | 0 | 0 | 1 | 0 | 1 +(1 row) + +-- Ensure we will do an index scan. +SET enable_seqscan TO off; +SET +SET enable_indexonlyscan TO off; +SET +SET optimizer TO off; +SET +EXPLAIN SELECT count(*) FROM aoco_fetch_hole WHERE i = 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Finalize Aggregate (cost=39.80..39.81 rows=1 width=8) + -> Gather Motion 1:1 (slice1; segments: 1) (cost=39.74..39.79 rows=3 width=8) + -> Partial Aggregate (cost=39.74..39.75 rows=1 width=8) + -> Bitmap Heap Scan on aoco_fetch_hole (cost=4.82..39.67 rows=29 width=0) + Recheck Cond: (i = 3) + -> Bitmap Index Scan on aoco_fetch_hole_i_idx (cost=0.00..4.81 rows=29 width=0) + Index Cond: (i = 3) + Optimizer: Postgres query optimizer +(8 rows) + +SELECT gp_inject_fault_infinite('AppendOnlyBlockDirectory_GetEntry_sysscan', 'skip', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p'; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +SELECT count(*) FROM aoco_fetch_hole WHERE i = 3; + count +------- + 0 +(1 row) +-- Since the hole is at the end of the minipage, we can't avoid a sysscan for +-- each tuple. +SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_sysscan', 'status', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p'; + gp_inject_fault +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Success: fault name:'AppendOnlyBlockDirectory_GetEntry_sysscan' fault type:'skip' ddl statement:'' database name:'' table name:'' start occurrence:'1' end occurrence:'-1' extra arg:'0' fault injection state:'triggered' num times hit:'20' + +(1 row) + +SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_sysscan', 'reset', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p'; + gp_inject_fault +----------------- + Success: +(1 row) + +-- Now do 1 more insert, so that the hole is sandwiched between two successive +-- minipage entries. +INSERT INTO aoco_fetch_hole VALUES (4, 21); +INSERT 0 1 +SELECT (gp_toolkit.__gp_aoblkdir('aoco_fetch_hole')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0 ORDER BY 1,2,3,4,5; + tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count +---------+-------+----------------+----------+--------------+-------------+----------- + (0,3) | 1 | 0 | 0 | 1 | 0 | 1 + (0,3) | 1 | 0 | 1 | 201 | 40 | 1 +(2 rows) + +SELECT gp_inject_fault_infinite('AppendOnlyBlockDirectory_GetEntry_sysscan', 'skip', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p'; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +SELECT gp_inject_fault_infinite('AppendOnlyBlockDirectory_GetEntry_inter_entry_hole', 'skip', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p'; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +SELECT count(*) FROM aoco_fetch_hole WHERE i = 3; + count +------- + 0 +(1 row) +-- Since the hole is between two entries, we are always able to find the last +-- entry in the minipage, determine that the target row doesn't lie within it +-- and early return, thereby avoiding an expensive per-tuple sysscan. We only +-- do 1 sysscan - for the first tuple fetch in the hole and avoid it for all +-- subsequent fetches in the hole. +SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_sysscan', 'status', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p'; + gp_inject_fault +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Success: fault name:'AppendOnlyBlockDirectory_GetEntry_sysscan' fault type:'skip' ddl statement:'' database name:'' table name:'' start occurrence:'1' end occurrence:'-1' extra arg:'0' fault injection state:'triggered' num times hit:'1' + +(1 row) +SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_inter_entry_hole', 'status', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p'; + gp_inject_fault +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Success: fault name:'AppendOnlyBlockDirectory_GetEntry_inter_entry_hole' fault type:'skip' ddl statement:'' database name:'' table name:'' start occurrence:'1' end occurrence:'-1' extra arg:'0' fault injection state:'triggered' num times hit:'19' + +(1 row) + +SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_sysscan', 'reset', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p'; + gp_inject_fault +----------------- + Success: +(1 row) +SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_inter_entry_hole', 'reset', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p'; + gp_inject_fault +----------------- + Success: +(1 row) + +RESET enable_seqscan; +RESET +RESET enable_indexonlyscan; +RESET +RESET optimizer; +RESET diff --git a/src/test/isolation2/expected/ao_index_build_progress.out b/src/test/isolation2/expected/ao_index_build_progress.out index 1048076ce9f..f98be86ea65 100644 --- a/src/test/isolation2/expected/ao_index_build_progress.out +++ b/src/test/isolation2/expected/ao_index_build_progress.out @@ -3,11 +3,15 @@ -- AO table CREATE TABLE ao_index_build_progress(i int, j bigint) USING ao_row WITH (compresstype=zstd, compresslevel=2); -CREATE +CREATE TABLE -- Insert all tuples to seg1. INSERT INTO ao_index_build_progress SELECT 0, i FROM generate_series(1, 100000) i; -INSERT 100000 +INSERT 0 100000 +INSERT INTO ao_index_build_progress SELECT 2, i FROM generate_series(1, 100000) i; +INSERT 0 100000 +INSERT INTO ao_index_build_progress SELECT 5, i FROM generate_series(1, 100000) i; +INSERT 0 100000 -- Suspend execution when some blocks have been read. SELECT gp_inject_fault('AppendOnlyStorageRead_ReadNextBlock_success', 'suspend', '', '', '', 10, 10, 0, dbid) FROM gp_segment_configuration WHERE content = 1 AND role = 'p'; @@ -40,15 +44,19 @@ SELECT gp_inject_fault('AppendOnlyStorageRead_ReadNextBlock_success', 'reset', d (1 row) 1<: <... completed> -CREATE +CREATE INDEX -- AOCO table CREATE TABLE aoco_index_build_progress(i int, j int ENCODING (compresstype=zstd, compresslevel=2)) USING ao_column; -CREATE +CREATE TABLE -- Insert all tuples to seg1. INSERT INTO aoco_index_build_progress SELECT 0, i FROM generate_series(1, 100000) i; -INSERT 100000 +INSERT 0 100000 +INSERT INTO aoco_index_build_progress SELECT 2, i FROM generate_series(1, 100000) i; +INSERT 0 100000 +INSERT INTO aoco_index_build_progress SELECT 5, i FROM generate_series(1, 100000) i; +INSERT 0 100000 -- Suspend execution when some blocks have been read. SELECT gp_inject_fault('AppendOnlyStorageRead_ReadNextBlock_success', 'suspend', '', '', '', 5, 5, 0, dbid) FROM gp_segment_configuration WHERE content = 1 AND role = 'p'; @@ -83,7 +91,7 @@ SELECT gp_inject_fault('AppendOnlyStorageRead_ReadNextBlock_success', 'reset', d (1 row) 1<: <... completed> -CREATE +CREATE INDEX -- Repeat the test for another index build @@ -120,4 +128,4 @@ SELECT gp_inject_fault('AppendOnlyStorageRead_ReadNextBlock_success', 'reset', d (1 row) 1<: <... completed> -CREATE +CREATE INDEX diff --git a/src/test/isolation2/expected/ao_partition_lock.out b/src/test/isolation2/expected/ao_partition_lock.out index 8802c69acc9..97f1ed97358 100644 --- a/src/test/isolation2/expected/ao_partition_lock.out +++ b/src/test/isolation2/expected/ao_partition_lock.out @@ -5,25 +5,25 @@ -- lock is not acquired. create table test_ao_partition_lock ( field_dk integer ,field_part integer) with (appendonly=true) DISTRIBUTED BY (field_dk) PARTITION BY LIST(field_part) ( partition val1 values(1), partition val2 values(2), partition val3 values(3) ); -CREATE +CREATE TABLE 1: begin; BEGIN 1: insert into test_ao_partition_lock_1_prt_val1 values(1,1); -INSERT 1 +INSERT 0 1 2: begin; BEGIN 2: alter table test_ao_partition_lock truncate partition for (2); -ALTER +ALTER TABLE 2: end; -END +COMMIT 1: end; -END +COMMIT 1q: ... 2q: ... drop table test_ao_partition_lock; -DROP +DROP TABLE diff --git a/src/test/isolation2/expected/ao_same_trans_truncate_crash.out b/src/test/isolation2/expected/ao_same_trans_truncate_crash.out index 318de3ce104..6999ec36dbc 100644 --- a/src/test/isolation2/expected/ao_same_trans_truncate_crash.out +++ b/src/test/isolation2/expected/ao_same_trans_truncate_crash.out @@ -43,11 +43,11 @@ CHECKPOINT 1: BEGIN; BEGIN 1: CREATE TABLE ao_same_trans_truncate(a int, b int) WITH (appendonly=true, orientation=column); -CREATE +CREATE TABLE 1: TRUNCATE TABLE ao_same_trans_truncate; -TRUNCATE +TRUNCATE TABLE 1: ABORT; -ABORT +ROLLBACK -- restart (immediate) to invoke crash recovery 1: SELECT pg_ctl(datadir, 'restart') FROM gp_segment_configuration WHERE role = 'p' AND content <> -1; pg_ctl diff --git a/src/test/isolation2/expected/ao_unique_index.out b/src/test/isolation2/expected/ao_unique_index.out index d737b1feedc..a1638304ee2 100644 --- a/src/test/isolation2/expected/ao_unique_index.out +++ b/src/test/isolation2/expected/ao_unique_index.out @@ -8,9 +8,9 @@ -- Case 1: Conflict with committed transaction---------------------------------- CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE INSERT INTO unique_index_ao_row SELECT * FROM generate_series(1, 329729); -INSERT 329729 +INSERT 0 329729 -- should conflict INSERT INTO unique_index_ao_row VALUES (1); ERROR: duplicate key value violates unique constraint "unique_index_ao_row_a_key" (seg1 192.168.0.148:7003 pid=205740) @@ -20,73 +20,73 @@ ERROR: duplicate key value violates unique constraint "unique_index_ao_row_a_ke DETAIL: Key (a)=(329729) already exists. -- should not conflict INSERT INTO unique_index_ao_row VALUES (329730); -INSERT 1 +INSERT 0 1 DROP TABLE unique_index_ao_row; -DROP +DROP TABLE -- Case 2: Conflict within the same transaction--------------------------------- CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE BEGIN; BEGIN INSERT INTO unique_index_ao_row SELECT * FROM generate_series(1, 329729); -INSERT 329729 +INSERT 0 329729 -- should conflict INSERT INTO unique_index_ao_row VALUES (1); ERROR: duplicate key value violates unique constraint "unique_index_ao_row_a_key" (seg0 192.168.0.148:7002 pid=205739) DETAIL: Key (a)=(1) already exists. END; -END +ROLLBACK DROP TABLE unique_index_ao_row; -DROP +DROP TABLE CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE BEGIN; BEGIN INSERT INTO unique_index_ao_row SELECT * FROM generate_series(1, 329729); -INSERT 329729 +INSERT 0 329729 -- should conflict INSERT INTO unique_index_ao_row VALUES (329729); ERROR: duplicate key value violates unique constraint "unique_index_ao_row_a_key" (seg0 192.168.0.148:7002 pid=205739) DETAIL: Key (a)=(329729) already exists. END; -END +ROLLBACK DROP TABLE unique_index_ao_row; -DROP +DROP TABLE CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE BEGIN; BEGIN INSERT INTO unique_index_ao_row SELECT * FROM generate_series(1, 329729); -INSERT 329729 +INSERT 0 329729 -- should not conflict INSERT INTO unique_index_ao_row VALUES (329730); -INSERT 1 +INSERT 0 1 END; -END +COMMIT DROP TABLE unique_index_ao_row; -DROP +DROP TABLE -- Case 3: Conflict with aborted transaction is not a conflict------------------ CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE BEGIN; BEGIN INSERT INTO unique_index_ao_row SELECT * FROM generate_series(1, 329729); -INSERT 329729 +INSERT 0 329729 ABORT; -ABORT +ROLLBACK -- should not conflict INSERT INTO unique_index_ao_row VALUES (1); -INSERT 1 +INSERT 0 1 INSERT INTO unique_index_ao_row VALUES (329729); -INSERT 1 +INSERT 0 1 INSERT INTO unique_index_ao_row VALUES (329730); -INSERT 1 +INSERT 0 1 DROP TABLE unique_index_ao_row; -DROP +DROP TABLE -- Case 4: Conflict with to-be-committed transaction---------------------------- -- @@ -106,21 +106,21 @@ DROP -- 9. Tx 1 commits -- CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE 1: BEGIN; BEGIN 1: INSERT INTO unique_index_ao_row VALUES (0); -INSERT 1 +INSERT 0 1 2: BEGIN; BEGIN 2: INSERT INTO unique_index_ao_row SELECT * FROM generate_series(1, 329729); -INSERT 329729 +INSERT 0 329729 3&: INSERT INTO unique_index_ao_row VALUES (1); 4&: INSERT INTO unique_index_ao_row VALUES (329728); 5&: INSERT INTO unique_index_ao_row VALUES (329729); -- should succeed immediately 6: INSERT INTO unique_index_ao_row VALUES (329730); -INSERT 1 +INSERT 0 1 2: COMMIT; COMMIT 3<: <... completed> @@ -135,7 +135,7 @@ DETAIL: Key (a)=(329729) already exists. 1: COMMIT; COMMIT DROP TABLE unique_index_ao_row; -DROP +DROP TABLE -- Case 5: Conflict with to-be-aborted transaction------------------------------ -- @@ -155,37 +155,37 @@ DROP -- 10. Tx 1 commits -- CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE 1: BEGIN; BEGIN 1: INSERT INTO unique_index_ao_row VALUES (0); -INSERT 1 +INSERT 0 1 2: BEGIN; BEGIN 2: INSERT INTO unique_index_ao_row SELECT * FROM generate_series(1, 329729); -INSERT 329729 +INSERT 0 329729 3&: INSERT INTO unique_index_ao_row VALUES (1); 4&: INSERT INTO unique_index_ao_row VALUES (329728); 5&: INSERT INTO unique_index_ao_row VALUES (329729); -- should succeed immediately 6: INSERT INTO unique_index_ao_row VALUES (329730); -INSERT 1 +INSERT 0 1 2: ABORT; -ABORT +ROLLBACK 3<: <... completed> -INSERT 1 +INSERT 0 1 4<: <... completed> -INSERT 1 +INSERT 0 1 5<: <... completed> -INSERT 1 +INSERT 0 1 1: COMMIT; COMMIT DROP TABLE unique_index_ao_row; -DROP +DROP TABLE -- Case 6: Conflict with aborted rows following some committed rows ------------ CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE -- 1. Tx 1 commits rows 1-100. -- 2. Tx 2 inserts rows 101-200 and then aborts. -- 3. Tx 3 tries to insert row in range [101,200] and is immediately successful. @@ -193,22 +193,22 @@ CREATE -- constraint violation. -- 5. Tx 5 tries to insert row in range [201, ) and is immediately successful. 1: INSERT INTO unique_index_ao_row SELECT generate_series(1, 100); -INSERT 100 +INSERT 0 100 2: BEGIN; BEGIN 2: INSERT INTO unique_index_ao_row SELECT generate_series(101, 200); -INSERT 100 +INSERT 0 100 2: ABORT; -ABORT +ROLLBACK 3: INSERT INTO unique_index_ao_row VALUES(102); -INSERT 1 +INSERT 0 1 4: INSERT INTO unique_index_ao_row VALUES(2); ERROR: duplicate key value violates unique constraint "unique_index_ao_row_a_key" (seg0 192.168.0.148:7002 pid=659656) DETAIL: Key (a)=(2) already exists. 5: INSERT INTO unique_index_ao_row VALUES(202); -INSERT 1 +INSERT 0 1 DROP TABLE unique_index_ao_row; -DROP +DROP TABLE -------------------------------------------------------------------------------- ----------------- More concurrent tests with fault injection ------------------ @@ -232,7 +232,7 @@ DROP -- 6. Tx 2 succeeds as Tx 1 aborted. CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE SELECT gp_inject_fault('appendonly_insert', 'suspend', '', '', 'unique_index_ao_row', 4, 4, 0, dbid) FROM gp_segment_configuration WHERE role = 'p' AND content <> -1; gp_inject_fault ----------------- @@ -252,9 +252,9 @@ SELECT gp_wait_until_triggered_fault('appendonly_insert', 4, dbid) FROM gp_segme (3 rows) 2&: INSERT INTO unique_index_ao_row VALUES(2); 4: INSERT INTO unique_index_ao_row VALUES(11); -INSERT 1 +INSERT 0 1 3: INSERT INTO unique_index_ao_row VALUES(4); -INSERT 1 +INSERT 0 1 SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_configuration WHERE role = 'p' AND content <> -1; gp_inject_fault ----------------- @@ -266,9 +266,9 @@ SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_confi ERROR: duplicate key value violates unique constraint "unique_index_ao_row_a_key" (seg1 192.168.0.148:7003 pid=828519) DETAIL: Key (a)=(4) already exists. 2<: <... completed> -INSERT 1 +INSERT 0 1 DROP TABLE unique_index_ao_row; -DROP +DROP TABLE -- Case 8: Conflict with to-be-committed transaction - generalization of case 7 -- where there are multiple minipages (and block directory rows) in play from @@ -293,7 +293,7 @@ DROP -- 7. All blocked Txs succeed. CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE SELECT gp_inject_fault('insert_new_entry_curr_minipage_full', 'suspend', '', '', '', 2, 2, 0, dbid) FROM gp_segment_configuration WHERE role = 'p' AND content <> -1; gp_inject_fault @@ -349,7 +349,7 @@ SELECT gp_wait_until_triggered_fault('appendonly_insert', 4, dbid) FROM gp_segme 8&: INSERT INTO unique_index_ao_row VALUES(661507); -- no index entry exists for it, so should not conflict. 9: INSERT INTO unique_index_ao_row VALUES(661510); -INSERT 1 +INSERT 0 1 SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_configuration WHERE role = 'p' AND content <> -1; gp_inject_fault @@ -363,29 +363,29 @@ SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_confi ERROR: duplicate key value violates unique constraint "unique_index_ao_row_a_key" (seg1 192.168.0.148:7003 pid=630215) DETAIL: Key (a)=(661510) already exists. 2<: <... completed> -INSERT 1 +INSERT 0 1 3<: <... completed> -INSERT 1 +INSERT 0 1 4<: <... completed> -INSERT 1 +INSERT 0 1 5<: <... completed> -INSERT 1 +INSERT 0 1 6<: <... completed> -INSERT 1 +INSERT 0 1 7<: <... completed> -INSERT 1 +INSERT 0 1 8<: <... completed> -INSERT 1 +INSERT 0 1 DROP TABLE unique_index_ao_row; -DROP +DROP TABLE -------------------------------------------------------------------------------- --------------------------- Smoke tests for COPY ------------------------------- -------------------------------------------------------------------------------- CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE 1: BEGIN; BEGIN @@ -405,23 +405,23 @@ CONTEXT: COPY unique_index_ao_row, line 1 2<: <... completed> COPY 1 1: END; -END +ROLLBACK DROP TABLE unique_index_ao_row; -DROP +DROP TABLE -------------------------------------------------------------------------------- -------------------- Smoke tests for subtransactions --------------------------- -------------------------------------------------------------------------------- CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE 1: BEGIN; BEGIN 1: SAVEPOINT a; SAVEPOINT 1: INSERT INTO unique_index_ao_row VALUES(1); -INSERT 1 +INSERT 0 1 -- concurrent tx inserting conflicting row should block. 2: BEGIN; @@ -429,7 +429,7 @@ BEGIN 2&: INSERT INTO unique_index_ao_row VALUES(1); -- concurrent tx inserting non-conflicting row should be successful. 3: INSERT INTO unique_index_ao_row VALUES(2); -INSERT 1 +INSERT 0 1 -- conflict should be detected within the same subtx. 1: INSERT INTO unique_index_ao_row VALUES(1); @@ -437,15 +437,15 @@ ERROR: duplicate key value violates unique constraint "unique_index_ao_row_a_ke DETAIL: Key (a)=(1) already exists. -- the concurrent tx should now succeed. 2<: <... completed> -INSERT 1 +INSERT 0 1 2: ABORT; -ABORT +ROLLBACK -- after rolling back to the savepoint, we should be able to re-insert the key 1: ROLLBACK TO SAVEPOINT a; ROLLBACK 1: INSERT INTO unique_index_ao_row VALUES(1); -INSERT 1 +INSERT 0 1 1: COMMIT; COMMIT @@ -457,7 +457,7 @@ SELECT * FROM unique_index_ao_row; (2 rows) DROP TABLE unique_index_ao_row; -DROP +DROP TABLE -------------------------------------------------------------------------------- -------------------- Smoke tests for repeatable read --------------------------- @@ -467,7 +467,7 @@ DROP -- boundaries. CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE -- Begin two txs with tx level snapshot taken early. 1: BEGIN ISOLATION LEVEL REPEATABLE READ; @@ -487,11 +487,11 @@ BEGIN 3: BEGIN; BEGIN 3: INSERT INTO unique_index_ao_row VALUES(1); -INSERT 1 +INSERT 0 1 -- And another transaction inserts a key and commits. INSERT INTO unique_index_ao_row VALUES(2); -INSERT 1 +INSERT 0 1 -- Tx should block on insert of conflicting key, even though it can't "see" the -- conflicting key due to its isolation level. @@ -502,11 +502,11 @@ INSERT 1 1&: INSERT INTO unique_index_ao_row VALUES(1); 3: ABORT; -ABORT +ROLLBACK 1<: <... completed> -INSERT 1 +INSERT 0 1 1: ABORT; -ABORT +ROLLBACK -- Tx should raise a conflict, even though it can't "see" the conflicting key -- due to its isolation level. @@ -522,52 +522,3 @@ ABORT DROP TABLE unique_index_ao_row; DROP - - --------------------------------------------------------------------------------- ------------------------ Smoke tests for ADD CONSTRAINT ------------------------ --------------------------------------------------------------------------------- -CREATE TABLE unique_index_ao_row (a INT) USING ao_row DISTRIBUTED REPLICATED; -CREATE -INSERT INTO unique_index_ao_row SELECT * FROM generate_series(1, 5); -INSERT 5 - -ALTER table unique_index_ao_row ADD CONSTRAINT a_unique UNIQUE(a); -ALTER --- should conflict -INSERT INTO unique_index_ao_row VALUES (1); -DETAIL: Key (a)=(1) already exists. -ERROR: duplicate key value violates unique constraint "a_unique" -ALTER table unique_index_ao_row DROP CONSTRAINT a_unique; -ALTER - -INSERT INTO unique_index_ao_row VALUES (1); -INSERT 1 --- should failed -ALTER table unique_index_ao_row ADD CONSTRAINT a_unique UNIQUE(a); -DETAIL: Key (a)=(1) is duplicated. -ERROR: could not create unique index "a_unique" - -DROP TABLE unique_index_ao_row; -DROP - - --------------------------------------------------------------------------------- ------------------------ Smoke tests for Multiple Key --------------------------- --------------------------------------------------------------------------------- -CREATE TABLE unique_index_ao_row (a INT, b INT) USING ao_row DISTRIBUTED REPLICATED; -CREATE -INSERT INTO unique_index_ao_row SELECT i,i FROM generate_series(1, 5) i; -INSERT 5 - -CREATE UNIQUE INDEX a_b_unique ON unique_index_ao_row(a,b); -CREATE --- should not conflict -INSERT INTO unique_index_ao_row VALUES (1,2); -INSERT 1 --- should conflict -INSERT INTO unique_index_ao_row VALUES (1,1); -DETAIL: Key (a, b)=(1, 1) already exists. -ERROR: duplicate key value violates unique constraint "a_b_unique" -DROP TABLE unique_index_ao_row; -DROP diff --git a/src/test/isolation2/expected/aoco_column_rewrite.out b/src/test/isolation2/expected/aoco_column_rewrite.out index 58c20004c57..2d3933ee7eb 100644 --- a/src/test/isolation2/expected/aoco_column_rewrite.out +++ b/src/test/isolation2/expected/aoco_column_rewrite.out @@ -9,3 +9,1709 @@ * So when we cherry-pick the commit 20f39c76f3dd03b0feb8b05011c1e0637df6c77e, please * also bring the changes in 17b0aac07bebc771b2d3a32bbd22cc8318201f57, or we will lost it. */ +-------------------------------------------------------------------------------- +-- Tests for various scenarios with the column rewrite optimization +-- for AT on AOCO tables +-------------------------------------------------------------------------------- + +PREPARE attribute_encoding_check AS SELECT c.relname, a.attname, e.filenum, e.attoptions FROM pg_attribute_encoding e, pg_class c, pg_attribute a WHERE e.attrelid = c.oid AND e.attnum = a.attnum and e.attrelid = a.attrelid AND c.relname LIKE $1; +PREPARE + +CREATE TABLE if not exists relfilenodecheck(segid int, relname text, relfilenodebefore int, relfilenodeafter int, casename text); +CREATE TABLE + +PREPARE capturerelfilenodebefore AS INSERT INTO relfilenodecheck SELECT -1 segid, relname, pg_relation_filenode(relname::text) as relfilenode, NULL::int, $1 as casename FROM pg_class WHERE relname LIKE $2 UNION SELECT gp_segment_id segid, relname, pg_relation_filenode(relname::text) as relfilenode, NULL::int, $1 as casename FROM gp_dist_random('pg_class') WHERE relname LIKE $2 ORDER BY segid; +PREPARE + +PREPARE checkrelfilenodediff AS SELECT a.segid, b.casename, b.relname, (relfilenodebefore != a.relfilenode) rewritten FROM ( SELECT -1 segid, relname, pg_relation_filenode(relname::text) as relfilenode FROM pg_class WHERE relname LIKE $2 UNION SELECT gp_segment_id segid, relname, pg_relation_filenode(relname::text) as relfilenode FROM gp_dist_random('pg_class') WHERE relname LIKE $2 ORDER BY segid )a, relfilenodecheck b WHERE b.casename LIKE $1 and b.relname LIKE $2 and a.segid = b.segid; +PREPARE + +-------------------------------------------------------------------------------- +-- Test if ALTER COLUMN TYPE and ADD COLUMN on AOCO doesn't rewrite the entire table +-------------------------------------------------------------------------------- + +CREATE TABLE alter_type_aoco(a int, b int, c int) using ao_column; +CREATE TABLE +INSERT INTO alter_type_aoco VALUES (20,1,2); +INSERT 0 1 +EXECUTE attribute_encoding_check ('alter_type_aoco'); + relname | attname | filenum | attoptions +-----------------+---------+---------+------------------------------------------------------------- + alter_type_aoco | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_type_aoco | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_type_aoco | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(3 rows) +SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco') ORDER BY segment_id, column_num; + segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state +------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+------- + 1 | 1 | 0 | 1 | 1 | 48 | 48 | 1 | 3 | 1 + 1 | 1 | 1 | 129 | 1 | 48 | 48 | 1 | 3 | 1 + 1 | 1 | 2 | 257 | 1 | 48 | 48 | 1 | 3 | 1 +(3 rows) +EXECUTE capturerelfilenodebefore ('alter_column', 'alter_type_aoco'); +INSERT 0 4 +SELECT * FROM alter_type_aoco; + a | b | c +----+---+--- + 20 | 1 | 2 +(1 row) + +ALTER TABLE alter_type_aoco ALTER COLUMN b TYPE text; +ALTER TABLE + +EXECUTE attribute_encoding_check ('alter_type_aoco'); + relname | attname | filenum | attoptions +-----------------+---------+---------+------------------------------------------------------------- + alter_type_aoco | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_type_aoco | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_type_aoco | b | 1602 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(3 rows) +SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco') ORDER BY segment_id, column_num; + segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state +------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+------- + 1 | 1 | 0 | 1 | 1 | 48 | 48 | 2 | 3 | 1 + 1 | 1 | 1 | 204929 | 1 | 48 | 48 | 2 | 3 | 1 + 1 | 1 | 2 | 257 | 1 | 48 | 48 | 2 | 3 | 1 +(3 rows) +EXECUTE checkrelfilenodediff ('alter_column', 'alter_type_aoco'); + segid | casename | relname | rewritten +-------+--------------+-----------------+----------- + 0 | alter_column | alter_type_aoco | f + 1 | alter_column | alter_type_aoco | f + -1 | alter_column | alter_type_aoco | f + 2 | alter_column | alter_type_aoco | f +(4 rows) +-- data is intact +SELECT * FROM alter_type_aoco; + a | b | c +----+---+--- + 20 | 1 | 2 +(1 row) +INSERT INTO alter_type_aoco VALUES (20,'1',2); +INSERT 0 1 +-- data is intact +SELECT * FROM alter_type_aoco; + a | b | c +----+---+--- + 20 | 1 | 2 + 20 | 1 | 2 +(2 rows) + +ALTER TABLE alter_type_aoco ADD COLUMN d int; +ALTER TABLE + +INSERT INTO alter_type_aoco VALUES (20,'1',2, 3); +INSERT 0 1 +-- check if we chose correct filenum for newly added column +EXECUTE attribute_encoding_check ('alter_type_aoco'); + relname | attname | filenum | attoptions +-----------------+---------+---------+------------------------------------------------------------- + alter_type_aoco | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_type_aoco | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_type_aoco | b | 1602 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_type_aoco | d | 4 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(4 rows) +SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco') ORDER BY segment_id, column_num; + segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state +------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+------- + 1 | 1 | 0 | 1 | 3 | 144 | 144 | 5 | 3 | 1 + 1 | 1 | 1 | 204929 | 3 | 144 | 144 | 5 | 3 | 1 + 1 | 1 | 2 | 257 | 3 | 144 | 144 | 5 | 3 | 1 + 1 | 1 | 3 | 385 | 3 | 144 | 144 | 5 | 3 | 1 +(4 rows) +DROP TABLE alter_type_aoco; +DROP TABLE +CHECKPOINT; +CHECKPOINT +-- check if all files are dropped correctly +SELECT * FROM gp_toolkit.gp_check_orphaned_files WHERE split_part(filename,'.',1) = (SELECT oid::text FROM pg_class WHERE relname = 'alter_type_aoco'); + gp_segment_id | tablespace | filename +---------------+------------+---------- +(0 rows) + +-------------------------------------------------------------------------------- +-- Test if column rewrite handles deleted rows in blockdirectory correctly for +-- more than 1 minipage + +-- We create a table and its blkdir and insert enough data to have more than one +-- minipage in the block directory, and check if the column rewrite rewrites the +-- blockdirectory correctly +-------------------------------------------------------------------------------- +CREATE TABLE alter_type_aoco_delete(a int, b int, c int) USING ao_column; +CREATE TABLE +CREATE INDEX at_aoco_idx on alter_type_aoco_delete(c); +CREATE INDEX +INSERT INTO alter_type_aoco_delete SELECT 1,i,i FROM generate_series(1,10000)i; +INSERT 0 10000 +DELETE FROM alter_type_aoco_delete WHERE b%3 = 1; +DELETE 3334 +EXECUTE capturerelfilenodebefore ('alter_column', 'alter_type_aoco_delete'); +INSERT 0 4 +SELECT count(*) FROM alter_type_aoco_delete; + count +------- + 6666 +(1 row) + +-- test both ALTER COLUMN TYPE and ALTER COLUMN SET ENCODING together +ALTER TABLE alter_type_aoco_delete ALTER COLUMN b TYPE text, ALTER COLUMN c SET ENCODING (compresstype=rle_type, compresslevel=4); +ALTER TABLE + +EXECUTE attribute_encoding_check ('alter_type_aoco_delete'); + relname | attname | filenum | attoptions +------------------------+---------+---------+----------------------------------------------------------------- + alter_type_aoco_delete | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_type_aoco_delete | c | 1603 | ['compresstype=rle_type', 'blocksize=32768', 'compresslevel=4'] + alter_type_aoco_delete | b | 1602 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(3 rows) +SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco_delete') ORDER BY segment_id, column_num; + segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state +------------+-------+------------+----------------+----------+-------+------------------+----------+---------------+------- + 1 | 1 | 0 | 1 | 10000 | 40088 | 40088 | 3 | 3 | 1 + 1 | 1 | 1 | 204929 | 10000 | 48984 | 48984 | 3 | 3 | 1 + 1 | 1 | 2 | 205057 | 10000 | 88 | 40047 | 3 | 3 | 1 +(3 rows) +SELECT gp_segment_id, (gp_toolkit.__gp_aoblkdir('alter_type_aoco_delete')).* FROM gp_dist_random('gp_id'); + gp_segment_id | tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count +---------------+---------+-------+----------------+----------+--------------+-------------+----------- + 1 | (0,1) | 1 | 0 | 0 | 1 | 0 | 8181 + 1 | (0,1) | 1 | 0 | 1 | 8182 | 32768 | 1819 + 1 | (0,4) | 1 | 1 | 0 | 1 | 0 | 6766 + 1 | (0,4) | 1 | 1 | 1 | 6767 | 32768 | 3234 + 1 | (0,5) | 1 | 2 | 0 | 1 | 0 | 10000 +(5 rows) +EXECUTE checkrelfilenodediff ('alter_column', 'alter_type_aoco_delete'); + segid | casename | relname | rewritten +-------+--------------+------------------------+----------- + 2 | alter_column | alter_type_aoco_delete | f + 0 | alter_column | alter_type_aoco_delete | f + 1 | alter_column | alter_type_aoco_delete | f + -1 | alter_column | alter_type_aoco_delete | f +(4 rows) +SELECT count(b) FROM alter_type_aoco_delete; + count +------- + 6666 +(1 row) +SELECT count(*) FROM alter_type_aoco_delete; + count +------- + 6666 +(1 row) + +-------------------------------------------------------------------------------- +-- Test if column rewrite handles blockdirectory and visimap +-- for deleted rows correctly with multiple blocks in same segfile + +-- Here, we insert data into two different blocks and delete all rows from first +-- block. We test if that block is still replicated in the rewritten col +-------------------------------------------------------------------------------- +CREATE TABLE alter_type_aoco_delete1(a int, b int, c int) USING ao_column; +CREATE TABLE +CREATE INDEX at_aoco_idx1 on alter_type_aoco_delete1(c); +CREATE INDEX +INSERT INTO alter_type_aoco_delete1 VALUES (1,2,2); +INSERT 0 1 +INSERT INTO alter_type_aoco_delete1 VALUES (1,3,3); +INSERT 0 1 +DELETE FROM alter_type_aoco_delete1 WHERE b = 2; +DELETE 1 +EXECUTE attribute_encoding_check ('alter_type_aoco_delete1'); + relname | attname | filenum | attoptions +-------------------------+---------+---------+------------------------------------------------------------- + alter_type_aoco_delete1 | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_type_aoco_delete1 | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_type_aoco_delete1 | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(3 rows) +SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco_delete1') ORDER BY segment_id, column_num; + segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state +------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+------- + 1 | 1 | 0 | 1 | 2 | 96 | 96 | 3 | 3 | 1 + 1 | 1 | 1 | 129 | 2 | 96 | 96 | 3 | 3 | 1 + 1 | 1 | 2 | 257 | 2 | 96 | 96 | 3 | 3 | 1 +(3 rows) +SELECT (gp_toolkit.__gp_aovisimap('alter_type_aoco_delete1')).* FROM gp_dist_random('gp_id'); + tid | segno | row_num +--------------+-------+--------- + (33554432,2) | 1 | 1 +(1 row) +SELECT gp_segment_id, (gp_toolkit.__gp_aoblkdir('alter_type_aoco_delete1')).* FROM gp_dist_random('gp_id'); + gp_segment_id | tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count +---------------+---------+-------+----------------+----------+--------------+-------------+----------- + 1 | (0,4) | 1 | 0 | 0 | 1 | 0 | 1 + 1 | (0,4) | 1 | 0 | 1 | 101 | 48 | 1 + 1 | (0,5) | 1 | 1 | 0 | 1 | 0 | 1 + 1 | (0,5) | 1 | 1 | 1 | 101 | 48 | 1 + 1 | (0,6) | 1 | 2 | 0 | 1 | 0 | 1 + 1 | (0,6) | 1 | 2 | 1 | 101 | 48 | 1 +(6 rows) +EXECUTE capturerelfilenodebefore ('alter_column', 'alter_type_aoco_delete1'); +INSERT 0 4 +SELECT * FROM alter_type_aoco_delete1; + a | b | c +---+---+--- + 1 | 3 | 3 +(1 row) + +-- test both ALTER COLUMN TYPE and ALTER COLUMN SET ENCODING together +ALTER TABLE alter_type_aoco_delete1 ALTER COLUMN b TYPE text, ALTER COLUMN c SET ENCODING (compresstype=rle_type, compresslevel=4); +ALTER TABLE + +EXECUTE attribute_encoding_check ('alter_type_aoco_delete1'); + relname | attname | filenum | attoptions +-------------------------+---------+---------+----------------------------------------------------------------- + alter_type_aoco_delete1 | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_type_aoco_delete1 | c | 1603 | ['compresstype=rle_type', 'blocksize=32768', 'compresslevel=4'] + alter_type_aoco_delete1 | b | 1602 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(3 rows) +SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco_delete1') ORDER BY segment_id, column_num; + segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state +------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+------- + 1 | 1 | 0 | 1 | 2 | 96 | 96 | 4 | 3 | 1 + 1 | 1 | 1 | 204929 | 2 | 96 | 96 | 4 | 3 | 1 + 1 | 1 | 2 | 205057 | 2 | 96 | 96 | 4 | 3 | 1 +(3 rows) +SELECT (gp_toolkit.__gp_aovisimap('alter_type_aoco_delete1')).* FROM gp_dist_random('gp_id'); + tid | segno | row_num +--------------+-------+--------- + (33554432,2) | 1 | 1 +(1 row) +SELECT gp_segment_id, (gp_toolkit.__gp_aoblkdir('alter_type_aoco_delete1')).* FROM gp_dist_random('gp_id'); + gp_segment_id | tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count +---------------+---------+-------+----------------+----------+--------------+-------------+----------- + 1 | (0,4) | 1 | 0 | 0 | 1 | 0 | 1 + 1 | (0,4) | 1 | 0 | 1 | 101 | 48 | 1 + 1 | (0,7) | 1 | 1 | 0 | 1 | 0 | 1 + 1 | (0,7) | 1 | 1 | 1 | 101 | 48 | 1 + 1 | (0,8) | 1 | 2 | 0 | 1 | 0 | 1 + 1 | (0,8) | 1 | 2 | 1 | 101 | 48 | 1 +(6 rows) +EXECUTE checkrelfilenodediff ('alter_column', 'alter_type_aoco_delete1'); + segid | casename | relname | rewritten +-------+--------------+-------------------------+----------- + 2 | alter_column | alter_type_aoco_delete1 | f + -1 | alter_column | alter_type_aoco_delete1 | f + 0 | alter_column | alter_type_aoco_delete1 | f + 1 | alter_column | alter_type_aoco_delete1 | f +(4 rows) +SELECT b FROM alter_type_aoco_delete1; + b +--- + 3 +(1 row) +SELECT * FROM alter_type_aoco_delete1; + a | b | c +---+---+--- + 1 | 3 | 3 +(1 row) + +-------------------------------------------------------------------------------- +-- Test if column rewrite handles blockdirectory and visimap +-- for deleted rows correctly with multiple blocks in same segfile + +-- Here, we insert data into two different blocks and delete all rows from second +-- block. We test if that block is still replicated in the rewritten col +-------------------------------------------------------------------------------- +CREATE TABLE alter_type_aoco_delete2(a int, b int, c int) USING ao_column; +CREATE TABLE +CREATE INDEX at_aoco_idx2 on alter_type_aoco_delete2(c); +CREATE INDEX +INSERT INTO alter_type_aoco_delete2 VALUES (1,2,2); +INSERT 0 1 +INSERT INTO alter_type_aoco_delete2 VALUES (1,3,3); +INSERT 0 1 +DELETE FROM alter_type_aoco_delete2 WHERE b = 3; +DELETE 1 +EXECUTE attribute_encoding_check ('alter_type_aoco_delete2'); + relname | attname | filenum | attoptions +-------------------------+---------+---------+------------------------------------------------------------- + alter_type_aoco_delete2 | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_type_aoco_delete2 | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_type_aoco_delete2 | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(3 rows) +SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco_delete2') ORDER BY segment_id, column_num; + segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state +------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+------- + 1 | 1 | 0 | 1 | 2 | 96 | 96 | 3 | 3 | 1 + 1 | 1 | 1 | 129 | 2 | 96 | 96 | 3 | 3 | 1 + 1 | 1 | 2 | 257 | 2 | 96 | 96 | 3 | 3 | 1 +(3 rows) +SELECT (gp_toolkit.__gp_aovisimap('alter_type_aoco_delete2')).* FROM gp_dist_random('gp_id'); + tid | segno | row_num +----------------+-------+--------- + (33554432,102) | 1 | 101 +(1 row) +SELECT gp_segment_id, (gp_toolkit.__gp_aoblkdir('alter_type_aoco_delete2')).* FROM gp_dist_random('gp_id'); + gp_segment_id | tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count +---------------+---------+-------+----------------+----------+--------------+-------------+----------- + 1 | (0,4) | 1 | 0 | 0 | 1 | 0 | 1 + 1 | (0,4) | 1 | 0 | 1 | 101 | 48 | 1 + 1 | (0,5) | 1 | 1 | 0 | 1 | 0 | 1 + 1 | (0,5) | 1 | 1 | 1 | 101 | 48 | 1 + 1 | (0,6) | 1 | 2 | 0 | 1 | 0 | 1 + 1 | (0,6) | 1 | 2 | 1 | 101 | 48 | 1 +(6 rows) +EXECUTE capturerelfilenodebefore ('alter_column', 'alter_type_aoco_delete2'); +INSERT 0 4 +SELECT * FROM alter_type_aoco_delete2; + a | b | c +---+---+--- + 1 | 2 | 2 +(1 row) + +-- test both ALTER COLUMN TYPE and ALTER COLUMN SET ENCODING together +ALTER TABLE alter_type_aoco_delete2 ALTER COLUMN b TYPE text, ALTER COLUMN c SET ENCODING (compresstype=rle_type, compresslevel=4); +ALTER TABLE + +EXECUTE attribute_encoding_check ('alter_type_aoco_delete2'); + relname | attname | filenum | attoptions +-------------------------+---------+---------+----------------------------------------------------------------- + alter_type_aoco_delete2 | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_type_aoco_delete2 | c | 1603 | ['compresstype=rle_type', 'blocksize=32768', 'compresslevel=4'] + alter_type_aoco_delete2 | b | 1602 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(3 rows) +SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco_delete2') ORDER BY segment_id, column_num; + segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state +------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+------- + 1 | 1 | 0 | 1 | 2 | 96 | 96 | 4 | 3 | 1 + 1 | 1 | 1 | 204929 | 2 | 96 | 96 | 4 | 3 | 1 + 1 | 1 | 2 | 205057 | 2 | 96 | 96 | 4 | 3 | 1 +(3 rows) +SELECT (gp_toolkit.__gp_aovisimap('alter_type_aoco_delete2')).* FROM gp_dist_random('gp_id'); + tid | segno | row_num +----------------+-------+--------- + (33554432,102) | 1 | 101 +(1 row) +SELECT gp_segment_id, (gp_toolkit.__gp_aoblkdir('alter_type_aoco_delete2')).* FROM gp_dist_random('gp_id'); + gp_segment_id | tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count +---------------+---------+-------+----------------+----------+--------------+-------------+----------- + 1 | (0,4) | 1 | 0 | 0 | 1 | 0 | 1 + 1 | (0,4) | 1 | 0 | 1 | 101 | 48 | 1 + 1 | (0,7) | 1 | 1 | 0 | 1 | 0 | 1 + 1 | (0,7) | 1 | 1 | 1 | 101 | 48 | 1 + 1 | (0,8) | 1 | 2 | 0 | 1 | 0 | 1 + 1 | (0,8) | 1 | 2 | 1 | 101 | 48 | 1 +(6 rows) +EXECUTE checkrelfilenodediff ('alter_column', 'alter_type_aoco_delete2'); + segid | casename | relname | rewritten +-------+--------------+-------------------------+----------- + 0 | alter_column | alter_type_aoco_delete2 | f + 1 | alter_column | alter_type_aoco_delete2 | f + -1 | alter_column | alter_type_aoco_delete2 | f + 2 | alter_column | alter_type_aoco_delete2 | f +(4 rows) +SELECT b FROM alter_type_aoco_delete2; + b +--- + 2 +(1 row) +SELECT * FROM alter_type_aoco_delete2; + a | b | c +---+---+--- + 1 | 2 | 2 +(1 row) + +-------------------------------------------------------------------------------- +-- Test if AT ALTER COLUMN TYPE works fine when we need a full table rewrite. + +-- We perform a AT subcmd which requires a full table rewrite, and check results +-- for the AT ALTER COLUMN TYPE after the table is fully rewritten +-------------------------------------------------------------------------------- + + +CREATE TABLE alter_type_aoco_fullrewrite(a int, b int, c int) using ao_column; +CREATE TABLE +INSERT INTO alter_type_aoco_fullrewrite VALUES (20,1,2); +INSERT 0 1 +EXECUTE attribute_encoding_check ('alter_type_aoco_fullrewrite'); + relname | attname | filenum | attoptions +-----------------------------+---------+---------+------------------------------------------------------------- + alter_type_aoco_fullrewrite | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_type_aoco_fullrewrite | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_type_aoco_fullrewrite | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(3 rows) +SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco_fullrewrite') ORDER BY segment_id, column_num; + segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state +------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+------- + 1 | 1 | 0 | 1 | 1 | 48 | 48 | 1 | 3 | 1 + 1 | 1 | 1 | 129 | 1 | 48 | 48 | 1 | 3 | 1 + 1 | 1 | 2 | 257 | 1 | 48 | 48 | 1 | 3 | 1 +(3 rows) +EXECUTE capturerelfilenodebefore ('alter_column', 'alter_type_aoco_fullrewrite'); +INSERT 0 4 +SELECT * FROM alter_type_aoco_fullrewrite; + a | b | c +----+---+--- + 20 | 1 | 2 +(1 row) + +ALTER TABLE alter_type_aoco_fullrewrite ALTER COLUMN b TYPE text, SET UNLOGGED; +ALTER TABLE + +EXECUTE attribute_encoding_check ('alter_type_aoco_fullrewrite'); + relname | attname | filenum | attoptions +-----------------------------+---------+---------+------------------------------------------------------------- + alter_type_aoco_fullrewrite | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_type_aoco_fullrewrite | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_type_aoco_fullrewrite | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(3 rows) +SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco_fullrewrite') ORDER BY segment_id, column_num; + segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state +------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+------- + 1 | 0 | 0 | 0 | 1 | 48 | 48 | 1 | 3 | 1 + 1 | 0 | 1 | 128 | 1 | 48 | 48 | 1 | 3 | 1 + 1 | 0 | 2 | 256 | 1 | 48 | 48 | 1 | 3 | 1 +(3 rows) +EXECUTE checkrelfilenodediff ('alter_column', 'alter_type_aoco_fullrewrite'); + segid | casename | relname | rewritten +-------+--------------+-----------------------------+----------- + 0 | alter_column | alter_type_aoco_fullrewrite | t + 1 | alter_column | alter_type_aoco_fullrewrite | t + 2 | alter_column | alter_type_aoco_fullrewrite | t + -1 | alter_column | alter_type_aoco_fullrewrite | t +(4 rows) +-- data is intact +SELECT * FROM alter_type_aoco_fullrewrite; + a | b | c +----+---+--- + 20 | 1 | 2 +(1 row) +INSERT INTO alter_type_aoco_fullrewrite VALUES (20,'1',2); +INSERT 0 1 +-- data is intact +SELECT * FROM alter_type_aoco_fullrewrite; + a | b | c +----+---+--- + 20 | 1 | 2 + 20 | 1 | 2 +(2 rows) + +-------------------------------------------------------------------------------- +-- Test if AT ALTER COLUMN TYPE reindexes rewrite-affected indexes + +-- We create indexes on columns and test if these indexes are rewritten +-- when any of the columns are rewritten on which the indexes depend on +-- but other indexes are unaffected +-------------------------------------------------------------------------------- + +CREATE TABLE alter_type_aoco(a int, b int, c int, d int) using ao_column; +CREATE TABLE + +INSERT INTO alter_type_aoco VALUES (20, 1, 2, 3); +INSERT 0 1 + +CREATE UNIQUE INDEX idx1 on alter_type_aoco(a,b); +CREATE INDEX +CREATE INDEX idx2 on alter_type_aoco using btree(c); +CREATE INDEX +CREATE INDEX idx3 on alter_type_aoco using bitmap(a,b,c,d); +CREATE INDEX + +EXECUTE capturerelfilenodebefore ('alter_column_b', 'idx1'); +INSERT 0 4 +EXECUTE capturerelfilenodebefore ('alter_column_b', 'idx2'); +INSERT 0 4 +EXECUTE capturerelfilenodebefore ('alter_column_b', 'idx3'); +INSERT 0 4 + +ALTER TABLE alter_type_aoco ALTER COLUMN b TYPE text; +ALTER TABLE + +EXECUTE checkrelfilenodediff ('alter_column_b', 'idx1'); + segid | casename | relname | rewritten +-------+----------------+---------+----------- + 2 | alter_column_b | idx1 | t + -1 | alter_column_b | idx1 | t + 0 | alter_column_b | idx1 | t + 1 | alter_column_b | idx1 | t +(4 rows) +EXECUTE checkrelfilenodediff ('alter_column_b', 'idx2'); + segid | casename | relname | rewritten +-------+----------------+---------+----------- + 2 | alter_column_b | idx2 | f + 0 | alter_column_b | idx2 | f + 1 | alter_column_b | idx2 | f + -1 | alter_column_b | idx2 | f +(4 rows) +EXECUTE checkrelfilenodediff ('alter_column_b', 'idx3'); + segid | casename | relname | rewritten +-------+----------------+---------+----------- + 2 | alter_column_b | idx3 | t + -1 | alter_column_b | idx3 | t + 0 | alter_column_b | idx3 | t + 1 | alter_column_b | idx3 | t +(4 rows) +INSERT INTO alter_type_aoco VALUES (20, '2', 3, 4); +INSERT 0 1 +EXECUTE capturerelfilenodebefore ('alter_column_c', 'idx1'); +INSERT 0 4 +EXECUTE capturerelfilenodebefore ('alter_column_c', 'idx2'); +INSERT 0 4 +EXECUTE capturerelfilenodebefore ('alter_column_c', 'idx3'); +INSERT 0 4 + +ALTER TABLE alter_type_aoco ALTER COLUMN c TYPE text; +ALTER TABLE + +EXECUTE checkrelfilenodediff ('alter_column_c', 'idx1'); + segid | casename | relname | rewritten +-------+----------------+---------+----------- + -1 | alter_column_c | idx1 | f + 2 | alter_column_c | idx1 | f + 0 | alter_column_c | idx1 | f + 1 | alter_column_c | idx1 | f +(4 rows) +EXECUTE checkrelfilenodediff ('alter_column_c', 'idx2'); + segid | casename | relname | rewritten +-------+----------------+---------+----------- + 2 | alter_column_c | idx2 | t + 0 | alter_column_c | idx2 | t + 1 | alter_column_c | idx2 | t + -1 | alter_column_c | idx2 | t +(4 rows) +EXECUTE checkrelfilenodediff ('alter_column_c', 'idx3'); + segid | casename | relname | rewritten +-------+----------------+---------+----------- + 0 | alter_column_c | idx3 | t + 1 | alter_column_c | idx3 | t + -1 | alter_column_c | idx3 | t + 2 | alter_column_c | idx3 | t +(4 rows) +INSERT INTO alter_type_aoco VALUES (20, '3', '4', 5); +INSERT 0 1 +EXECUTE capturerelfilenodebefore ('alter_column_d', 'idx1'); +INSERT 0 4 +EXECUTE capturerelfilenodebefore ('alter_column_d', 'idx2'); +INSERT 0 4 +EXECUTE capturerelfilenodebefore ('alter_column_d', 'idx3'); +INSERT 0 4 + +ALTER TABLE alter_type_aoco ALTER COLUMN d TYPE text; +ALTER TABLE + +EXECUTE checkrelfilenodediff ('alter_column_d', 'idx1'); + segid | casename | relname | rewritten +-------+----------------+---------+----------- + 2 | alter_column_d | idx1 | f + 0 | alter_column_d | idx1 | f + 1 | alter_column_d | idx1 | f + -1 | alter_column_d | idx1 | f +(4 rows) +EXECUTE checkrelfilenodediff ('alter_column_d', 'idx2'); + segid | casename | relname | rewritten +-------+----------------+---------+----------- + 0 | alter_column_d | idx2 | f + 1 | alter_column_d | idx2 | f + 2 | alter_column_d | idx2 | f + -1 | alter_column_d | idx2 | f +(4 rows) +EXECUTE checkrelfilenodediff ('alter_column_d', 'idx3'); + segid | casename | relname | rewritten +-------+----------------+---------+----------- + 0 | alter_column_d | idx3 | t + 1 | alter_column_d | idx3 | t + -1 | alter_column_d | idx3 | t + 2 | alter_column_d | idx3 | t +(4 rows) +INSERT INTO alter_type_aoco VALUES (20, '4', '5', '6'); +INSERT 0 1 +-- data is intact +SELECT * FROM alter_type_aoco; + a | b | c | d +----+---+---+--- + 20 | 1 | 2 | 3 + 20 | 2 | 3 | 4 + 20 | 3 | 4 | 5 + 20 | 4 | 5 | 6 +(4 rows) + + +-------------------------------------------------------------------------------- +-- Test if AT ALTER COLUMN TYPE for partitioned table + +-- create 2 level partitions with same schema (regular case) and create index on some column +-- alter column on the partition table root and on the partitions and we check rewrite status and data status +-- filenum for partition roots +-------------------------------------------------------------------------------- +CREATE TABLE part_alter_col(a int, b int, c int) PARTITION BY RANGE (A) (partition aa start (1) end (5) every (1)) USING ao_column; +CREATE TABLE +INSERT INTO part_alter_col VALUES (1,2,3); +INSERT 0 1 +CREATE INDEX part_alter_col_idx1 on part_alter_col(b); +CREATE INDEX +CREATE INDEX part_alter_col_idx2 on part_alter_col(c); +CREATE INDEX +EXECUTE capturerelfilenodebefore ('alter_column_b', 'part_alter_col_1_prt_aa_1'); +INSERT 0 4 +EXECUTE capturerelfilenodebefore ('alter_column_b', 'part_alter_col_1_prt_aa_1_b_idx'); +INSERT 0 4 +EXECUTE capturerelfilenodebefore ('alter_column_b', 'part_alter_col_1_prt_aa_1_c_idx'); +INSERT 0 4 +EXECUTE attribute_encoding_check ('part_alter_col'); + relname | attname | filenum | attoptions +----------------+---------+---------+------------------------------------------------------------- + part_alter_col | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + part_alter_col | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + part_alter_col | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(3 rows) + +ALTER TABLE part_alter_col ALTER COLUMN b TYPE text; +ALTER TABLE + +EXECUTE attribute_encoding_check ('part_alter_col'); + relname | attname | filenum | attoptions +----------------+---------+---------+------------------------------------------------------------- + part_alter_col | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + part_alter_col | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + part_alter_col | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(3 rows) +EXECUTE checkrelfilenodediff ('alter_column_b', 'part_alter_col_1_prt_aa_1'); + segid | casename | relname | rewritten +-------+----------------+---------------------------+----------- + 2 | alter_column_b | part_alter_col_1_prt_aa_1 | f + -1 | alter_column_b | part_alter_col_1_prt_aa_1 | f + 0 | alter_column_b | part_alter_col_1_prt_aa_1 | f + 1 | alter_column_b | part_alter_col_1_prt_aa_1 | f +(4 rows) +EXECUTE checkrelfilenodediff ('alter_column_b', 'part_alter_col_1_prt_aa_1_b_idx'); + segid | casename | relname | rewritten +-------+----------------+---------------------------------+----------- + 2 | alter_column_b | part_alter_col_1_prt_aa_1_b_idx | t + -1 | alter_column_b | part_alter_col_1_prt_aa_1_b_idx | t + 0 | alter_column_b | part_alter_col_1_prt_aa_1_b_idx | t + 1 | alter_column_b | part_alter_col_1_prt_aa_1_b_idx | t +(4 rows) +EXECUTE checkrelfilenodediff ('alter_column_b', 'part_alter_col_1_prt_aa_1_c_idx'); + segid | casename | relname | rewritten +-------+----------------+---------------------------------+----------- + -1 | alter_column_b | part_alter_col_1_prt_aa_1_c_idx | f + 2 | alter_column_b | part_alter_col_1_prt_aa_1_c_idx | f + 0 | alter_column_b | part_alter_col_1_prt_aa_1_c_idx | f + 1 | alter_column_b | part_alter_col_1_prt_aa_1_c_idx | f +(4 rows) +SELECT * FROM part_alter_col; + a | b | c +---+---+--- + 1 | 2 | 3 +(1 row) +DROP TABLE part_alter_col; +DROP TABLE +CHECKPOINT; +CHECKPOINT +-- check if all files are dropped correctly +SELECT * FROM gp_toolkit.gp_check_orphaned_files WHERE split_part(filename,'.',1) = (SELECT oid::text FROM pg_class WHERE relname = 'part_alter_col'); + gp_segment_id | tablespace | filename +---------------+------------+---------- +(0 rows) +-------------------------------------------------------------------------------- +-- Test if column rewrite works when AT ALTER COLUMN TYPE for a column +-- and then alter it back to the original type + +-- Check reloptions, pg_attribute_encoding, visimap, blkdirectory alongside the rewrite +-------------------------------------------------------------------------------- +CREATE TABLE alter_column_back(a int, b int ENCODING (compresstype='zlib', compresslevel=5), c int) using ao_column with (compresstype='zlib', compresslevel=2); +CREATE TABLE +INSERT INTO alter_column_back VALUES (1,2,3), (1,2,4), (1,2,5); +INSERT 0 3 +CREATE INDEX alter_column_back_idx1 ON alter_column_back(a,c); +CREATE INDEX +DELETE FROM alter_column_back WHERE c=5; +DELETE 1 +EXECUTE capturerelfilenodebefore ('alter_column', 'alter_column_back'); +INSERT 0 4 +SELECT atttypid::regtype FROM pg_attribute WHERE attrelid='alter_column_back'::regclass AND attname='b'; + atttypid +---------- + integer +(1 row) + +ALTER TABLE alter_column_back ALTER COLUMN b TYPE text; +ALTER TABLE + +SELECT c.relname, c.reloptions FROM pg_class c WHERE c.relname LIKE 'alter_column_back'; + relname | reloptions +-------------------+------------------------------------------------------------------------------ + alter_column_back | ['compresstype=zlib', 'compresslevel=2', 'blocksize=32768', 'checksum=true'] +(1 row) +EXECUTE checkrelfilenodediff ('alter_column', 'alter_column_back'); + segid | casename | relname | rewritten +-------+--------------+-------------------+----------- + 2 | alter_column | alter_column_back | f + 0 | alter_column | alter_column_back | f + 1 | alter_column | alter_column_back | f + -1 | alter_column | alter_column_back | f +(4 rows) +SELECT atttypid::regtype FROM pg_attribute WHERE attrelid='alter_column_back'::regclass AND attname='b'; + atttypid +---------- + text +(1 row) +INSERT INTO alter_column_back VALUES (1,'2',3); +INSERT 0 1 +DELETE FROM alter_column_back where c=4; +DELETE 1 +EXECUTE capturerelfilenodebefore ('alter_column_back', 'alter_column_back'); +INSERT 0 4 + +ALTER TABLE alter_column_back ALTER COLUMN b TYPE int using b::int; +ALTER TABLE + +SELECT c.relname, c.reloptions FROM pg_class c WHERE c.relname LIKE 'alter_column_back'; + relname | reloptions +-------------------+------------------------------------------------------------------------------ + alter_column_back | ['compresstype=zlib', 'compresslevel=2', 'blocksize=32768', 'checksum=true'] +(1 row) +EXECUTE attribute_encoding_check ('alter_column_back'); + relname | attname | filenum | attoptions +-------------------+---------+---------+------------------------------------------------------------- + alter_column_back | a | 1 | ['compresstype=zlib', 'compresslevel=2', 'blocksize=32768'] + alter_column_back | c | 3 | ['compresstype=zlib', 'compresslevel=2', 'blocksize=32768'] + alter_column_back | b | 2 | ['compresstype=zlib', 'compresslevel=5', 'blocksize=32768'] +(3 rows) +SELECT (gp_toolkit.__gp_aovisimap('alter_column_back')).* FROM gp_dist_random('gp_id'); + tid | segno | row_num +--------------+-------+--------- + (33554432,3) | 1 | 2 + (33554432,4) | 1 | 3 +(2 rows) +SELECT gp_segment_id, (gp_toolkit.__gp_aoblkdir('alter_column_back')).* FROM gp_dist_random('gp_id'); + gp_segment_id | tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count +---------------+---------+-------+----------------+----------+--------------+-------------+----------- + 1 | (0,5) | 1 | 0 | 0 | 1 | 0 | 3 + 1 | (0,5) | 1 | 0 | 1 | 101 | 48 | 1 + 1 | (0,7) | 1 | 2 | 0 | 1 | 0 | 3 + 1 | (0,7) | 1 | 2 | 1 | 101 | 48 | 1 + 1 | (0,8) | 1 | 1 | 0 | 1 | 0 | 3 + 1 | (0,8) | 1 | 1 | 1 | 101 | 48 | 1 +(6 rows) +EXECUTE checkrelfilenodediff ('alter_column_back', 'alter_column_back'); + segid | casename | relname | rewritten +-------+-------------------+-------------------+----------- + 2 | alter_column_back | alter_column_back | f + 0 | alter_column_back | alter_column_back | f + 1 | alter_column_back | alter_column_back | f + -1 | alter_column_back | alter_column_back | f +(4 rows) +SELECT atttypid::regtype FROM pg_attribute WHERE attrelid='alter_column_back'::regclass AND attname='b'; + atttypid +---------- + integer +(1 row) +SELECT * FROM alter_column_back; + a | b | c +---+---+--- + 1 | 2 | 3 + 1 | 2 | 3 +(2 rows) +DROP TABLE alter_column_back; +DROP TABLE +CHECKPOINT; +CHECKPOINT +-- check if all files are dropped correctly +SELECT * FROM gp_toolkit.gp_check_orphaned_files WHERE split_part(filename,'.',1) = (SELECT oid::text FROM pg_class WHERE relname = 'alter_column_back'); + gp_segment_id | tablespace | filename +---------------+------------+---------- +(0 rows) + +-------------------------------------------------------------------------------- +-- Test if ALTER COLUMN TYPE and SET ACCESS METHOD can be done in the same command +-- Verify if we rewrite the table +-------------------------------------------------------------------------------- +CREATE TABLE alter_column_set_am(a int, b int, c int) using ao_column; +CREATE TABLE +INSERT INTO alter_column_set_am VALUES (1,2,3); +INSERT 0 1 +EXECUTE capturerelfilenodebefore ('alter_column_set_am_aorow', 'alter_column_set_am'); +INSERT 0 4 +EXECUTE attribute_encoding_check ('alter_column_set_am'); + relname | attname | filenum | attoptions +---------------------+---------+---------+------------------------------------------------------------- + alter_column_set_am | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_column_set_am | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_column_set_am | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(3 rows) + +ALTER TABLE alter_column_set_am SET ACCESS METHOD ao_row, ALTER COLUMN b TYPE text; +ALTER TABLE + +EXECUTE attribute_encoding_check ('alter_column_set_am'); + relname | attname | filenum | attoptions +---------+---------+---------+------------ +(0 rows) +EXECUTE checkrelfilenodediff ('alter_column_set_am_aorow', 'alter_column_set_am'); + segid | casename | relname | rewritten +-------+---------------------------+---------------------+----------- + -1 | alter_column_set_am_aorow | alter_column_set_am | t + 2 | alter_column_set_am_aorow | alter_column_set_am | t + 0 | alter_column_set_am_aorow | alter_column_set_am | t + 1 | alter_column_set_am_aorow | alter_column_set_am | t +(4 rows) +SELECT * FROM alter_column_set_am; + a | b | c +---+---+--- + 1 | 2 | 3 +(1 row) +INSERT INTO alter_column_set_am VALUES (1,'2',3); +INSERT 0 1 +EXECUTE capturerelfilenodebefore ('alter_column_set_am_aocol', 'alter_column_set_am'); +INSERT 0 4 +EXECUTE attribute_encoding_check ('alter_column_set_am'); + relname | attname | filenum | attoptions +---------+---------+---------+------------ +(0 rows) + +ALTER TABLE alter_column_set_am SET ACCESS METHOD ao_column, ALTER COLUMN c TYPE text; +ALTER TABLE + +EXECUTE attribute_encoding_check ('alter_column_set_am'); + relname | attname | filenum | attoptions +---------------------+---------+---------+------------------------------------------------------------- + alter_column_set_am | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_column_set_am | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_column_set_am | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(3 rows) +EXECUTE checkrelfilenodediff ('alter_column_set_am_aocol', 'alter_column_set_am'); + segid | casename | relname | rewritten +-------+---------------------------+---------------------+----------- + -1 | alter_column_set_am_aocol | alter_column_set_am | t + 2 | alter_column_set_am_aocol | alter_column_set_am | t + 0 | alter_column_set_am_aocol | alter_column_set_am | t + 1 | alter_column_set_am_aocol | alter_column_set_am | t +(4 rows) +SELECT * FROM alter_column_set_am; + a | b | c +---+---+--- + 1 | 2 | 3 + 1 | 2 | 3 +(2 rows) + +-------------------------------------------------------------------------------- +-- Test if ALTER COLUMN TYPE and ADD COLUMN can be done in the same command +-- Verify if we don't rewrite the table +-------------------------------------------------------------------------------- +CREATE TABLE alter_column_add_col(a int, b int, c int) using ao_column; +CREATE TABLE +INSERT INTO alter_column_add_col VALUES (1,2,3); +INSERT 0 1 +EXECUTE capturerelfilenodebefore ('alter_col_add_col', 'alter_column_add_col'); +INSERT 0 4 +EXECUTE attribute_encoding_check ('alter_column_add_col'); + relname | attname | filenum | attoptions +----------------------+---------+---------+------------------------------------------------------------- + alter_column_add_col | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_column_add_col | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_column_add_col | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(3 rows) + +ALTER TABLE alter_column_add_col ADD COLUMN d int, ALTER COLUMN b TYPE text; +ALTER TABLE + +EXECUTE attribute_encoding_check ('alter_column_add_col'); + relname | attname | filenum | attoptions +----------------------+---------+---------+------------------------------------------------------------- + alter_column_add_col | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_column_add_col | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_column_add_col | b | 1602 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_column_add_col | d | 4 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(4 rows) +EXECUTE checkrelfilenodediff ('alter_column_add_col', 'alter_column_add_col'); + segid | casename | relname | rewritten +-------+----------+---------+----------- +(0 rows) +SELECT * FROM alter_column_add_col; + a | b | c | d +---+---+---+--- + 1 | 2 | 3 | +(1 row) +INSERT INTO alter_column_add_col VALUES (1,'2',3, 4); +INSERT 0 1 +SELECT * FROM alter_column_add_col; + a | b | c | d +---+---+---+--- + 1 | 2 | 3 | + 1 | 2 | 3 | 4 +(2 rows) + +-------------------------------------------------------------------------------- +-- Test if ALTER COLUMN TYPE and other AT commands can be done in the same command +-- Verify if we rewrite the table +-------------------------------------------------------------------------------- +CREATE TABLE alter_column_other(a int, b int, c int) using ao_column; +CREATE TABLE +INSERT INTO alter_column_other VALUES (1,2,3); +INSERT 0 1 +EXECUTE capturerelfilenodebefore ('alter_column_other', 'alter_column_other'); +INSERT 0 4 +EXECUTE attribute_encoding_check ('alter_column_other'); + relname | attname | filenum | attoptions +--------------------+---------+---------+------------------------------------------------------------- + alter_column_other | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_column_other | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_column_other | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(3 rows) + +ALTER TABLE alter_column_other ALTER COLUMN b TYPE text, ALTER COLUMN c SET DEFAULT 5; +ALTER TABLE + +EXECUTE attribute_encoding_check ('alter_column_other'); + relname | attname | filenum | attoptions +--------------------+---------+---------+------------------------------------------------------------- + alter_column_other | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_column_other | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + alter_column_other | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(3 rows) +EXECUTE checkrelfilenodediff ('alter_column_other', 'alter_column_other'); + segid | casename | relname | rewritten +-------+--------------------+--------------------+----------- + 2 | alter_column_other | alter_column_other | t + -1 | alter_column_other | alter_column_other | t + 0 | alter_column_other | alter_column_other | t + 1 | alter_column_other | alter_column_other | t +(4 rows) +SELECT * FROM alter_column_other; + a | b | c +---+---+--- + 1 | 2 | 3 +(1 row) +INSERT INTO alter_column_other VALUES (1,'2'); +INSERT 0 1 +SELECT * FROM alter_column_other; + a | b | c +---+---+--- + 1 | 2 | 3 + 1 | 2 | 5 +(2 rows) + +-------------------------------------------------------------------------------- +-- Test if column rewrite works after vacuum on deleted rows +-------------------------------------------------------------------------------- +CREATE TABLE alter_column_vacuum(a int, b int) using ao_column; +CREATE TABLE +INSERT INTO alter_column_vacuum SELECT 1,i FROM generate_series(1,1000)i; +INSERT 0 1000 +DELETE FROM alter_column_vacuum WHERE b>10; +DELETE 990 +VACUUM alter_column_vacuum; +VACUUM +SELECT * FROM gp_toolkit.__gp_aocsseg('alter_column_vacuum'); + segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state +------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+------- + 1 | 1 | 0 | 1 | 0 | 0 | 0 | 2 | 3 | 1 + 1 | 1 | 1 | 129 | 0 | 0 | 0 | 2 | 3 | 1 + 1 | 2 | 0 | 2 | 10 | 80 | 80 | 0 | 3 | 1 + 1 | 2 | 1 | 130 | 10 | 80 | 80 | 0 | 3 | 1 +(4 rows) +-- should succeed +ALTER TABLE alter_column_vacuum ALTER COLUMN b TYPE text; +ALTER TABLE +SELECT * FROM gp_toolkit.__gp_aocsseg('alter_column_vacuum'); + segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state +------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+------- + 1 | 1 | 0 | 1 | 0 | 0 | 0 | 2 | 3 | 1 + 1 | 1 | 1 | 204929 | 0 | 0 | 0 | 2 | 3 | 1 + 1 | 2 | 0 | 2 | 10 | 80 | 80 | 1 | 3 | 1 + 1 | 2 | 1 | 204930 | 10 | 64 | 64 | 1 | 3 | 1 +(4 rows) + +-------------------------------------------------------------------------------- +-- Test if ALTER COLUMN TYPE works correctly when constraints are involved +-------------------------------------------------------------------------------- +CREATE TABLE alter_column_constraints(a int, b int check (b > 0)) USING ao_column; +CREATE TABLE +INSERT INTO alter_column_constraints SELECT i,i FROM generate_series(1,10)i; +INSERT 0 10 +-- should error +ALTER TABLE alter_column_constraints ALTER COLUMN b TYPE text; +ERROR: operator does not exist: text > integer +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +-- should succeed, and constraint remains +EXECUTE capturerelfilenodebefore ('alter_column_constraints_col_rewrite', 'alter_column_constraints'); +INSERT 0 4 +ALTER TABLE alter_column_constraints ALTER COLUMN b TYPE bigint; +ALTER TABLE +EXECUTE checkrelfilenodediff ('alter_column_constraints_col_rewrite', 'alter_column_constraints'); + segid | casename | relname | rewritten +-------+--------------------------------------+--------------------------+----------- + 2 | alter_column_constraints_col_rewrite | alter_column_constraints | f + -1 | alter_column_constraints_col_rewrite | alter_column_constraints | f + 0 | alter_column_constraints_col_rewrite | alter_column_constraints | f + 1 | alter_column_constraints_col_rewrite | alter_column_constraints | f +(4 rows) + +EXECUTE capturerelfilenodebefore ('alter_column_constraints_fullrewrite', 'alter_column_constraints'); +INSERT 0 4 +-- should succeed and relfile changed (not using the column rewrite optimization because there's other command) +ALTER TABLE alter_column_constraints ADD CONSTRAINT checkb2 CHECK (b < 100), ALTER COLUMN b TYPE int; +ALTER TABLE +EXECUTE checkrelfilenodediff ('alter_column_constraints_fullrewrite', 'alter_column_constraints'); + segid | casename | relname | rewritten +-------+--------------------------------------+--------------------------+----------- + -1 | alter_column_constraints_fullrewrite | alter_column_constraints | t + 0 | alter_column_constraints_fullrewrite | alter_column_constraints | t + 1 | alter_column_constraints_fullrewrite | alter_column_constraints | t + 2 | alter_column_constraints_fullrewrite | alter_column_constraints | t +(4 rows) + +-------------------------------------------------------------------------------- +-- Test if ALTER COLUMN TYPE works correctly when seg0 has some data +-- Check if we handle rewrite on seg0 +-------------------------------------------------------------------------------- +CREATE TABLE alter_column_seg0(a int, b int) USING ao_column; +CREATE TABLE +1: BEGIN; +BEGIN +1: ALTER TABLE alter_column_seg0 ADD COLUMN c int; +ALTER TABLE +1: INSERT INTO alter_column_seg0 SELECT 1,i,i FROM generate_series(1,10)i; +INSERT 0 10 +1: COMMIT; +COMMIT +INSERT INTO alter_column_seg0 SELECT 1,i,i FROM generate_series(1,10)i; +INSERT 0 10 +ALTER TABLE alter_column_seg0 ALTER COLUMN b TYPE text; +ALTER TABLE +SELECT count(*) FROM alter_column_seg0; + count +------- + 20 +(1 row) +SELECT * FROM gp_toolkit.__gp_aocsseg('alter_column_seg0'); + segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state +------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+------- + 1 | 0 | 0 | 0 | 10 | 80 | 80 | 2 | 3 | 1 + 1 | 0 | 1 | 204928 | 10 | 64 | 64 | 2 | 3 | 1 + 1 | 0 | 2 | 256 | 10 | 80 | 80 | 2 | 3 | 1 + 1 | 1 | 0 | 1 | 10 | 80 | 80 | 2 | 3 | 1 + 1 | 1 | 1 | 204929 | 10 | 64 | 64 | 2 | 3 | 1 + 1 | 1 | 2 | 257 | 10 | 80 | 80 | 2 | 3 | 1 +(6 rows) +DROP TABLE alter_column_seg0; +DROP TABLE +CHECKPOINT; +CHECKPOINT +SELECT * FROM gp_toolkit.gp_check_orphaned_files WHERE split_part(filename,'.',1) = (SELECT oid::text FROM pg_class WHERE relname = 'alter_column_seg0'); + gp_segment_id | tablespace | filename +---------------+------------+---------- +(0 rows) + +-------------------------------------------------------------------------------- +-- Test if ALTER COLUMN TYPE works correctly multiple segfiles are created +-- due to multiple concurrency +-- Check if we handle rewrite on each segfile correctly +-------------------------------------------------------------------------------- +CREATE TABLE alter_column_multiple_concurrency(a int, b int) USING ao_column; +CREATE TABLE +1: BEGIN; +BEGIN +2: BEGIN; +BEGIN +1: INSERT INTO alter_column_multiple_concurrency SELECT 1,i FROM generate_series(1,10)i; +INSERT 0 10 +2: INSERT INTO alter_column_multiple_concurrency SELECT 1,i FROM generate_series(1,10)i; +INSERT 0 10 +1: COMMIT; +COMMIT +2: COMMIT; +COMMIT +ALTER TABLE alter_column_multiple_concurrency ALTER COLUMN b TYPE text; +ALTER TABLE +SELECT count(*) FROM alter_column_multiple_concurrency; + count +------- + 20 +(1 row) +SELECT * FROM gp_toolkit.__gp_aocsseg('alter_column_multiple_concurrency'); + segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state +------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+------- + 1 | 1 | 0 | 1 | 10 | 80 | 80 | 2 | 3 | 1 + 1 | 1 | 1 | 204929 | 10 | 64 | 64 | 2 | 3 | 1 + 1 | 2 | 0 | 2 | 10 | 80 | 80 | 2 | 3 | 1 + 1 | 2 | 1 | 204930 | 10 | 64 | 64 | 2 | 3 | 1 +(4 rows) +DROP TABLE alter_column_multiple_concurrency; +DROP TABLE +CHECKPOINT; +CHECKPOINT +SELECT * FROM gp_toolkit.gp_check_orphaned_files WHERE split_part(filename,'.',1) = (SELECT oid::text FROM pg_class WHERE relname = 'alter_column_multiple_concurrency'); + gp_segment_id | tablespace | filename +---------------+------------+---------- +(0 rows) + +-------------------------------------------------------------------------------- +-- Test if ALTER COLUMN TYPE works correctly when a segfile is in AWAITING_DROP state +-- Check if we handle rewrite on each segfile correctly +-------------------------------------------------------------------------------- +CREATE TABLE alter_column_awaiting_drop(a int, b int) USING ao_column; +CREATE TABLE +1: BEGIN; +BEGIN +2: BEGIN; +BEGIN +1: INSERT INTO alter_column_awaiting_drop SELECT 1,i FROM generate_series(1,10)i; +INSERT 0 10 +2: INSERT INTO alter_column_awaiting_drop SELECT 1,i FROM generate_series(11,20)i; +INSERT 0 10 +1: COMMIT; +COMMIT +2: COMMIT; +COMMIT +DELETE FROM alter_column_awaiting_drop WHERE b > 10; +DELETE 10 +VACUUM alter_column_awaiting_drop; +VACUUM +SELECT * FROM gp_toolkit.__gp_aocsseg('alter_column_awaiting_drop'); + segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state +------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+------- + 1 | 1 | 0 | 1 | 10 | 80 | 80 | 2 | 3 | 1 + 1 | 1 | 1 | 129 | 10 | 80 | 80 | 2 | 3 | 1 + 1 | 2 | 0 | 2 | 0 | 0 | 0 | 1 | 3 | 1 + 1 | 2 | 1 | 130 | 0 | 0 | 0 | 1 | 3 | 1 + 1 | 3 | 0 | 3 | 0 | 0 | 0 | 0 | 3 | 1 + 1 | 3 | 1 | 131 | 0 | 0 | 0 | 0 | 3 | 1 +(6 rows) +ALTER TABLE alter_column_awaiting_drop ALTER COLUMN b TYPE text; +ALTER TABLE +SELECT count(*) FROM alter_column_awaiting_drop; + count +------- + 10 +(1 row) +SELECT * FROM gp_toolkit.__gp_aocsseg('alter_column_awaiting_drop'); + segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state +------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+------- + 1 | 1 | 0 | 1 | 10 | 80 | 80 | 3 | 3 | 1 + 1 | 1 | 1 | 204929 | 10 | 64 | 64 | 3 | 3 | 1 + 1 | 2 | 0 | 2 | 0 | 0 | 0 | 1 | 3 | 1 + 1 | 2 | 1 | 204930 | 0 | 0 | 0 | 1 | 3 | 1 + 1 | 3 | 0 | 3 | 0 | 0 | 0 | 0 | 3 | 1 + 1 | 3 | 1 | 204931 | 0 | 0 | 0 | 0 | 3 | 1 +(6 rows) +DROP TABLE alter_column_awaiting_drop; +DROP TABLE +CHECKPOINT; +CHECKPOINT +SELECT * FROM gp_toolkit.gp_check_orphaned_files WHERE split_part(filename,'.',1) = (SELECT oid::text FROM pg_class WHERE relname = 'alter_column_awaiting_drop'); + gp_segment_id | tablespace | filename +---------------+------------+---------- +(0 rows) + +-------------------------------------------------------------------------------- +-- Test if ALTER COLUMN TYPE works correctly for 0 inserted rows +-------------------------------------------------------------------------------- +CREATE TABLE alter_column_zero_tupcount(a int, b int) USING ao_column; +CREATE TABLE +1: BEGIN; +BEGIN +2: BEGIN; +BEGIN +1: INSERT INTO alter_column_zero_tupcount SELECT 1,i FROM generate_series(1,10)i; +INSERT 0 10 +2: INSERT INTO alter_column_zero_tupcount SELECT 1,i FROM generate_series(1,10)i; +INSERT 0 10 +1: ABORT; +ROLLBACK +2: ABORT; +ROLLBACK +ALTER TABLE alter_column_zero_tupcount ALTER COLUMN b TYPE text; +ALTER TABLE +SELECT count(*) FROM alter_column_zero_tupcount; + count +------- + 0 +(1 row) +SELECT * FROM gp_toolkit.__gp_aocsseg('alter_column_zero_tupcount'); + segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state +------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+------- + 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 3 | 1 + 1 | 1 | 1 | 204929 | 0 | 0 | 0 | 0 | 3 | 1 + 1 | 2 | 0 | 2 | 0 | 0 | 0 | 0 | 3 | 1 + 1 | 2 | 1 | 204930 | 0 | 0 | 0 | 0 | 3 | 1 +(4 rows) +DROP TABLE alter_column_zero_tupcount; +DROP TABLE +CHECKPOINT; +CHECKPOINT +SELECT * FROM gp_toolkit.gp_check_orphaned_files WHERE split_part(filename,'.',1) = (SELECT oid::text FROM pg_class WHERE relname = 'alter_column_zero_tupcount'); + gp_segment_id | tablespace | filename +---------------+------------+---------- +(0 rows) + +-------------------------------------------------------------------------------- +-- Test if ALTER COLUMN TYPE works correctly for generated columns. +-- Check if we error out on ALTERing type columns that have dependent generated columns +-------------------------------------------------------------------------------- +CREATE TABLE alter_column_generated_cols(a int, b int, c int GENERATED ALWAYS AS (a+b) STORED, d int GENERATED ALWAYS AS (tableoid::regclass) STORED) USING ao_column; +CREATE TABLE +INSERT INTO alter_column_generated_cols SELECT 1,i FROM generate_series(1,5)i; +INSERT 0 5 +SELECT attname, atttypid::regtype FROM pg_attribute WHERE attrelid='alter_column_generated_cols'::regclass and attname in ('b','c','d'); + attname | atttypid +---------+---------- + b | integer + c | integer + d | integer +(3 rows) +-- b shouldn't be allowed for alter type +ALTER TABLE alter_column_generated_cols ALTER COLUMN b TYPE text; +ERROR: cannot alter type of a column used by a generated column +DETAIL: Column "b" is used by generated column "c". +SELECT attname, atttypid::regtype FROM pg_attribute WHERE attrelid='alter_column_generated_cols'::regclass and attname in ('b','c','d'); + attname | atttypid +---------+---------- + b | integer + c | integer + d | integer +(3 rows) +ALTER TABLE alter_column_generated_cols ALTER COLUMN c TYPE text; +ALTER TABLE +SELECT attname, atttypid::regtype FROM pg_attribute WHERE attrelid='alter_column_generated_cols'::regclass and attname in ('b','c','d'); + attname | atttypid +---------+---------- + b | integer + c | text + d | integer +(3 rows) +ALTER TABLE alter_column_generated_cols ALTER COLUMN d TYPE text; +ALTER TABLE +SELECT attname, atttypid::regtype FROM pg_attribute WHERE attrelid='alter_column_generated_cols'::regclass and attname in ('b','c','d'); + attname | atttypid +---------+---------- + b | integer + c | text + d | text +(3 rows) + +-------------------------------------------------------------------------------- +-- Test if ALTER COLUMN TYPE blocks concurrent INSERT, and vice versa +-------------------------------------------------------------------------------- +CREATE TABLE aoco_concurrent_inserts(a int, b int, c int) USING ao_column; +CREATE TABLE +INSERT INTO aoco_concurrent_inserts SELECT i,i,i FROM generate_series(1,10)i; +INSERT 0 10 +1: BEGIN; +BEGIN +1: INSERT INTO aoco_concurrent_inserts SELECT i,i,i FROM generate_series(1,10)i; +INSERT 0 10 +2&: ALTER TABLE aoco_concurrent_inserts ALTER COLUMN b TYPE text; +1: END; +COMMIT +2<: <... completed> +ALTER TABLE +-- should see 20 rows +SELECT count(*) FROM aoco_concurrent_inserts; + count +------- + 20 +(1 row) +1: BEGIN; +BEGIN +1: ALTER TABLE aoco_concurrent_inserts ALTER COLUMN c TYPE text; +ALTER TABLE +2&: INSERT INTO aoco_concurrent_inserts SELECT i,i,i FROM generate_series(1,10)i; +1: END; +COMMIT +2<: <... completed> +INSERT 0 10 +-- should see 30 rows +SELECT count(*) FROM aoco_concurrent_inserts; + count +------- + 30 +(1 row) + +-------------------------------------------------------------------------------- +-- Tests for ALTER COLUMN SET ENCODING +-------------------------------------------------------------------------------- + +-- +-- Basic testing +-- +create table atsetenc(c1 int, c2 int) using ao_column distributed replicated; +CREATE TABLE +-- first check an empty table +-- check the initial encoding settings +execute attribute_encoding_check('atsetenc'); + relname | attname | filenum | attoptions +----------+---------+---------+------------------------------------------------------------- + atsetenc | c1 | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + atsetenc | c2 | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(2 rows) +-- no table rewrite +execute capturerelfilenodebefore('set encoding - empty', 'atsetenc'); +INSERT 0 4 +alter table atsetenc alter column c1 set encoding (compresstype=zlib,compresslevel=9); +ALTER TABLE +execute checkrelfilenodediff('set encoding - empty', 'atsetenc'); + segid | casename | relname | rewritten +-------+----------------------+----------+----------- + 0 | set encoding - empty | atsetenc | f + 1 | set encoding - empty | atsetenc | f + 2 | set encoding - empty | atsetenc | f + -1 | set encoding - empty | atsetenc | f +(4 rows) +execute attribute_encoding_check('atsetenc'); + relname | attname | filenum | attoptions +----------+---------+---------+------------------------------------------------------------- + atsetenc | c1 | 1601 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c2 | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(2 rows) +select * from atsetenc; + c1 | c2 +----+---- +(0 rows) + +-- now insert some data and check +insert into atsetenc values(1,2); +INSERT 0 1 +-- no table rewrite setting encoding +execute capturerelfilenodebefore('set encoding - basic', 'atsetenc'); +INSERT 0 4 +alter table atsetenc alter column c2 set encoding (compresstype=zlib,compresslevel=9); +ALTER TABLE +-- result intact +select * from atsetenc; + c1 | c2 +----+---- + 1 | 2 +(1 row) +execute checkrelfilenodediff('set encoding - basic', 'atsetenc'); + segid | casename | relname | rewritten +-------+----------------------+----------+----------- + 0 | set encoding - basic | atsetenc | f + 1 | set encoding - basic | atsetenc | f + -1 | set encoding - basic | atsetenc | f + 2 | set encoding - basic | atsetenc | f +(4 rows) +execute attribute_encoding_check('atsetenc'); + relname | attname | filenum | attoptions +----------+---------+---------+------------------------------------------------------------- + atsetenc | c1 | 1601 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c2 | 1602 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] +(2 rows) + +-- check if the encoding takes actual effect +alter table atsetenc add column c3 text default 'a'; +ALTER TABLE +insert into atsetenc values (1,2,repeat('a',10000)); +INSERT 0 1 +-- before alter encoding, no compression by default +execute attribute_encoding_check('atsetenc'); + relname | attname | filenum | attoptions +----------+---------+---------+------------------------------------------------------------- + atsetenc | c1 | 1601 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c2 | 1602 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c3 | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(3 rows) +select relname, attnum, size, compression_ratio from gp_toolkit.gp_column_size where relid::regclass::text = 'atsetenc' and gp_segment_id = 0 and attnum = 3; + relname | attnum | size | compression_ratio +----------+--------+-------+------------------- + atsetenc | 3 | 10096 | 1.00 +(1 row) +execute capturerelfilenodebefore('set encoding - compress effect', 'atsetenc'); +INSERT 0 4 +alter table atsetenc alter column c3 set encoding (compresstype=zlib,compresslevel=9); +ALTER TABLE +execute capturerelfilenodebefore('set encoding - compress effect', 'atsetenc'); +INSERT 0 4 +-- after alter encoding, size is reduced +execute attribute_encoding_check('atsetenc'); + relname | attname | filenum | attoptions +----------+---------+---------+------------------------------------------------------------- + atsetenc | c1 | 1601 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c2 | 1602 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c3 | 1603 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] +(3 rows) +select relname,attnum,size,compression_ratio from gp_toolkit.gp_column_size where relid::regclass::text = 'atsetenc' and gp_segment_id = 0 and attnum = 3; + relname | attnum | size | compression_ratio +----------+--------+------+------------------- + atsetenc | 3 | 120 | 84.13 +(1 row) +select length(c3) from atsetenc; + length +-------- + 1 + 10000 +(2 rows) + +-- check if we'll re-index the index for the rewritten column, and not others +create index atsetenc_idx2 on atsetenc(c2); +CREATE INDEX +create index atsetenc_idx3 on atsetenc(c3); +CREATE INDEX +execute capturerelfilenodebefore ('alter_column_c2', 'atsetenc_idx2'); +INSERT 0 4 +execute capturerelfilenodebefore ('alter_column_c2', 'atsetenc_idx3'); +INSERT 0 4 +alter table atsetenc alter column c2 set encoding (compresstype=zlib,compresslevel=1); +ALTER TABLE +execute checkrelfilenodediff('alter_column_c2', 'atsetenc_idx2'); + segid | casename | relname | rewritten +-------+-----------------+---------------+----------- + -1 | alter_column_c2 | atsetenc_idx2 | t + 2 | alter_column_c2 | atsetenc_idx2 | t + 0 | alter_column_c2 | atsetenc_idx2 | t + 1 | alter_column_c2 | atsetenc_idx2 | t +(4 rows) +execute checkrelfilenodediff('alter_column_c2', 'atsetenc_idx3'); + segid | casename | relname | rewritten +-------+-----------------+---------------+----------- + 2 | alter_column_c2 | atsetenc_idx3 | f + -1 | alter_column_c2 | atsetenc_idx3 | f + 0 | alter_column_c2 | atsetenc_idx3 | f + 1 | alter_column_c2 | atsetenc_idx3 | f +(4 rows) + +-- +-- mixed AT commands +-- +-- 1. with ALTER COLUMN TYPE +alter table atsetenc add column c4 int default 4, add column c5 int default 5; +ALTER TABLE +execute capturerelfilenodebefore('set encoding - withaltercoltype', 'atsetenc'); +INSERT 0 4 +-- alter column type + alter column set encoding. The subcommands' order shouldn't matter. +alter table atsetenc alter column c4 type text, alter column c4 set encoding (compresstype=zlib,compresslevel=9); +ALTER TABLE +alter table atsetenc alter column c5 set encoding (compresstype=zlib,compresslevel=9), alter column c5 type text; +ALTER TABLE +-- no rewrite +execute checkrelfilenodediff('set encoding - withaltercoltype', 'atsetenc'); + segid | casename | relname | rewritten +-------+---------------------------------+----------+----------- + 2 | set encoding - withaltercoltype | atsetenc | f + 0 | set encoding - withaltercoltype | atsetenc | f + 1 | set encoding - withaltercoltype | atsetenc | f + -1 | set encoding - withaltercoltype | atsetenc | f +(4 rows) +execute attribute_encoding_check('atsetenc'); + relname | attname | filenum | attoptions +----------+---------+---------+------------------------------------------------------------- + atsetenc | c1 | 1601 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c2 | 2 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1'] + atsetenc | c3 | 1603 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c4 | 1604 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c5 | 1605 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] +(5 rows) +select c4, c5 from atsetenc; + c4 | c5 +----+---- + 4 | 5 + 4 | 5 +(2 rows) + +-- 2. with ADD COLUMN +execute capturerelfilenodebefore('set encoding - withaddcol', 'atsetenc'); +INSERT 0 4 +alter table atsetenc add column c6 int default 6, alter column c5 set encoding (compresstype=zlib,compresslevel=1); +ALTER TABLE +-- no rewrite +execute checkrelfilenodediff('set encoding - withaddcol', 'atsetenc'); + segid | casename | relname | rewritten +-------+---------------------------+----------+----------- + 0 | set encoding - withaddcol | atsetenc | f + 1 | set encoding - withaddcol | atsetenc | f + 2 | set encoding - withaddcol | atsetenc | f + -1 | set encoding - withaddcol | atsetenc | f +(4 rows) +execute attribute_encoding_check('atsetenc'); + relname | attname | filenum | attoptions +----------+---------+---------+------------------------------------------------------------- + atsetenc | c1 | 1601 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c2 | 2 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1'] + atsetenc | c3 | 1603 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c4 | 1604 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c5 | 5 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1'] + atsetenc | c6 | 6 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] +(6 rows) +select c5, c6 from atsetenc; + c5 | c6 +----+---- + 5 | 6 + 5 | 6 +(2 rows) + +-- 3. with DROP COLUMN +alter table atsetenc add column c7 int default 7; +ALTER TABLE +execute capturerelfilenodebefore('set encoding - withdropcol', 'atsetenc'); +INSERT 0 4 +-- alter and drop the same column, should complaint +alter table atsetenc alter column c7 set encoding (compresstype=zlib,compresslevel=9), drop column c7; +ERROR: column "c7" of relation "atsetenc" does not exist +-- alter and drop different columns, should work and no rewrite +alter table atsetenc alter column c7 set encoding (compresstype=zlib,compresslevel=9), drop column c3; +ALTER TABLE +execute checkrelfilenodediff('set encoding - withdropcol', 'atsetenc'); + segid | casename | relname | rewritten +-------+----------------------------+----------+----------- + 0 | set encoding - withdropcol | atsetenc | f + 1 | set encoding - withdropcol | atsetenc | f + 2 | set encoding - withdropcol | atsetenc | f + -1 | set encoding - withdropcol | atsetenc | f +(4 rows) +execute attribute_encoding_check('atsetenc'); + relname | attname | filenum | attoptions +----------+------------------------------+---------+------------------------------------------------------------- + atsetenc | c1 | 1601 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c2 | 2 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1'] + atsetenc | c4 | 1604 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c5 | 5 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1'] + atsetenc | c6 | 6 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + atsetenc | c7 | 1607 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | ........pg.dropped.3........ | 1603 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] +(7 rows) +-- should error out +select c3 from atsetenc; +ERROR: column "c3" does not exist +LINE 1: select c3 from atsetenc; + ^ +select c7 from atsetenc; + c7 +---- + 7 + 7 +(2 rows) + +-- 4. with AT commands that rewrite table +alter table atsetenc add column c8 int default 8; +ALTER TABLE +-- changing to another AM, should complaint +alter table atsetenc set access method heap, alter column c8 set encoding (compresstype=zlib,compresslevel=9); +ERROR: ALTER COLUMN SET ENCODING operation is only applicable to AOCO tables +DETAIL: New access method for "atsetenc" is not ao_column +-- reorganize, should rewrite +execute capturerelfilenodebefore('set encoding - reorg', 'atsetenc'); +INSERT 0 4 +alter table atsetenc set with (reorganize=true), alter column c8 set encoding (compresstype=zlib,compresslevel=9); +ALTER TABLE +execute checkrelfilenodediff('set encoding - reorg', 'atsetenc'); + segid | casename | relname | rewritten +-------+----------------------+----------+----------- + 2 | set encoding - reorg | atsetenc | t + -1 | set encoding - reorg | atsetenc | t + 0 | set encoding - reorg | atsetenc | t + 1 | set encoding - reorg | atsetenc | t +(4 rows) +execute attribute_encoding_check('atsetenc'); + relname | attname | filenum | attoptions +----------+------------------------------+---------+------------------------------------------------------------- + atsetenc | c1 | 1 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c2 | 2 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1'] + atsetenc | c4 | 4 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c5 | 5 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1'] + atsetenc | c6 | 6 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + atsetenc | c7 | 7 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c8 | 8 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | ........pg.dropped.3........ | 3 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] +(8 rows) + +-- 5. multiple SET ENCODING commands +-- not rewrite +execute capturerelfilenodebefore('set encoding - multiple', 'atsetenc'); +INSERT 0 4 +alter table atsetenc alter column c7 set encoding (compresstype=rle_type,compresslevel=3), alter column c8 set encoding (compresstype=rle_type,compresslevel=4); +ALTER TABLE +execute checkrelfilenodediff('set encoding - multiple', 'atsetenc'); + segid | casename | relname | rewritten +-------+-------------------------+----------+----------- + 0 | set encoding - multiple | atsetenc | f + 1 | set encoding - multiple | atsetenc | f + -1 | set encoding - multiple | atsetenc | f + 2 | set encoding - multiple | atsetenc | f +(4 rows) +execute attribute_encoding_check('atsetenc'); + relname | attname | filenum | attoptions +----------+------------------------------+---------+----------------------------------------------------------------- + atsetenc | c1 | 1 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c2 | 2 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1'] + atsetenc | c4 | 4 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c5 | 5 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1'] + atsetenc | ........pg.dropped.3........ | 3 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] + atsetenc | c6 | 6 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + atsetenc | c7 | 1607 | ['compresstype=rle_type', 'blocksize=32768', 'compresslevel=3'] + atsetenc | c8 | 1608 | ['compresstype=rle_type', 'blocksize=32768', 'compresslevel=4'] +(8 rows) + +-- results all good +select * from atsetenc; + c1 | c2 | c4 | c5 | c6 | c7 | c8 +----+----+----+----+----+----+---- + 1 | 2 | 4 | 5 | 6 | 7 | 8 + 1 | 2 | 4 | 5 | 6 | 7 | 8 +(2 rows) + +-- +-- partition table +-- +create table atsetencpart (a int, b int) using ao_column partition by range(b); +CREATE TABLE +create table atsetencpart_p1 partition of atsetencpart for values from (0) to (10); +CREATE TABLE +create table atsetencpart_p2 partition of atsetencpart for values from (10) to (20); +CREATE TABLE +create table atsetencpart_def partition of atsetencpart default; +CREATE TABLE +insert into atsetencpart select 1,i from generate_series(1,100)i; +INSERT 0 100 +execute capturerelfilenodebefore('set enc', 'atsetencpart_p1'); +INSERT 0 4 +execute capturerelfilenodebefore('set enc', 'atsetencpart_p2'); +INSERT 0 4 +execute capturerelfilenodebefore('set enc', 'atsetencpart_def'); +INSERT 0 4 +-- alter root table will alter all children +alter table atsetencpart alter column b set encoding (compresstype=zlib,compresslevel=9); +ALTER TABLE +-- alter a child partition just alter that partition +alter table atsetencpart_p2 alter column b set encoding (compresslevel=1); +ALTER TABLE +-- no table rewrite and the options are changed +execute checkrelfilenodediff('set enc', 'atsetencpart_p1'); + segid | casename | relname | rewritten +-------+----------+-----------------+----------- + 2 | set enc | atsetencpart_p1 | f + -1 | set enc | atsetencpart_p1 | f + 0 | set enc | atsetencpart_p1 | f + 1 | set enc | atsetencpart_p1 | f +(4 rows) +execute checkrelfilenodediff('set enc', 'atsetencpart_p2'); + segid | casename | relname | rewritten +-------+----------+-----------------+----------- + 2 | set enc | atsetencpart_p2 | f + -1 | set enc | atsetencpart_p2 | f + 0 | set enc | atsetencpart_p2 | f + 1 | set enc | atsetencpart_p2 | f +(4 rows) +execute checkrelfilenodediff('set enc', 'atsetencpart_def'); + segid | casename | relname | rewritten +-------+----------+------------------+----------- + 2 | set enc | atsetencpart_def | f + -1 | set enc | atsetencpart_def | f + 0 | set enc | atsetencpart_def | f + 1 | set enc | atsetencpart_def | f +(4 rows) +execute attribute_encoding_check('atsetencpart_p1'); + relname | attname | filenum | attoptions +-----------------+---------+---------+------------------------------------------------------------- + atsetencpart_p1 | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + atsetencpart_p1 | b | 1602 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] +(2 rows) +execute attribute_encoding_check('atsetencpart_p2'); + relname | attname | filenum | attoptions +-----------------+---------+---------+------------------------------------------------------------- + atsetencpart_p2 | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + atsetencpart_p2 | b | 2 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1'] +(2 rows) +execute attribute_encoding_check('atsetencpart_def'); + relname | attname | filenum | attoptions +------------------+---------+---------+------------------------------------------------------------- + atsetencpart_def | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0'] + atsetencpart_def | b | 1602 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9'] +(2 rows) +-- results are expected +select sum(a), sum(b) from atsetencpart; + sum | sum +-----+------ + 100 | 5050 +(1 row) + diff --git a/src/test/isolation2/expected/aocs_unique_index.out b/src/test/isolation2/expected/aocs_unique_index.out index 88a81e3220a..25cd89a8575 100644 --- a/src/test/isolation2/expected/aocs_unique_index.out +++ b/src/test/isolation2/expected/aocs_unique_index.out @@ -8,9 +8,9 @@ -- Case 1: Conflict with committed transaction---------------------------------- CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE INSERT INTO unique_index_ao_column SELECT * FROM generate_series(1, 658491); -INSERT 658491 +INSERT 0 658491 -- should conflict INSERT INTO unique_index_ao_column VALUES (1); ERROR: duplicate key value violates unique constraint "unique_index_ao_column_a_key" (seg0 192.168.0.148:7002 pid=721860) @@ -20,73 +20,73 @@ ERROR: duplicate key value violates unique constraint "unique_index_ao_column_a DETAIL: Key (a)=(658491) already exists. -- should not conflict INSERT INTO unique_index_ao_column VALUES (658492); -INSERT 1 +INSERT 0 1 DROP TABLE unique_index_ao_column; -DROP +DROP TABLE -- Case 2: Conflict within the same transaction--------------------------------- CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE BEGIN; BEGIN INSERT INTO unique_index_ao_column SELECT * FROM generate_series(1, 658491); -INSERT 658491 +INSERT 0 658491 -- should conflict INSERT INTO unique_index_ao_column VALUES (1); ERROR: duplicate key value violates unique constraint "unique_index_ao_column_a_key" (seg1 192.168.0.148:7003 pid=721861) DETAIL: Key (a)=(1) already exists. END; -END +ROLLBACK DROP TABLE unique_index_ao_column; -DROP +DROP TABLE CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE BEGIN; BEGIN INSERT INTO unique_index_ao_column SELECT * FROM generate_series(1, 658491); -INSERT 658491 +INSERT 0 658491 -- should conflict INSERT INTO unique_index_ao_column VALUES (658491); ERROR: duplicate key value violates unique constraint "unique_index_ao_column_a_key" (seg0 192.168.0.148:7002 pid=721860) DETAIL: Key (a)=(658491) already exists. END; -END +ROLLBACK DROP TABLE unique_index_ao_column; -DROP +DROP TABLE CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE BEGIN; BEGIN INSERT INTO unique_index_ao_column SELECT * FROM generate_series(1, 658491); -INSERT 658491 +INSERT 0 658491 -- should not conflict INSERT INTO unique_index_ao_column VALUES (658492); -INSERT 1 +INSERT 0 1 END; -END +COMMIT DROP TABLE unique_index_ao_column; -DROP +DROP TABLE -- Case 3: Conflict with aborted transaction is not a conflict------------------ CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE BEGIN; BEGIN INSERT INTO unique_index_ao_column SELECT * FROM generate_series(1, 658491); -INSERT 658491 +INSERT 0 658491 ABORT; -ABORT +ROLLBACK -- should not conflict INSERT INTO unique_index_ao_column VALUES (1); -INSERT 1 +INSERT 0 1 INSERT INTO unique_index_ao_column VALUES (658491); -INSERT 1 +INSERT 0 1 INSERT INTO unique_index_ao_column VALUES (658492); -INSERT 1 +INSERT 0 1 DROP TABLE unique_index_ao_column; -DROP +DROP TABLE -- Case 4: Conflict with to-be-committed transaction---------------------------- -- @@ -106,21 +106,21 @@ DROP -- 10. Tx 1 commits -- CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE 1: BEGIN; BEGIN 1: INSERT INTO unique_index_ao_column VALUES (0); -INSERT 1 +INSERT 0 1 2: BEGIN; BEGIN 2: INSERT INTO unique_index_ao_column SELECT * FROM generate_series(1, 658491); -INSERT 658491 +INSERT 0 658491 3&: INSERT INTO unique_index_ao_column VALUES (1); 4&: INSERT INTO unique_index_ao_column VALUES (658490); 5&: INSERT INTO unique_index_ao_column VALUES (658491); -- should succeed immediately 6: INSERT INTO unique_index_ao_column VALUES (658492); -INSERT 1 +INSERT 0 1 2: COMMIT; COMMIT 3<: <... completed> @@ -135,7 +135,7 @@ DETAIL: Key (a)=(658491) already exists. 1: COMMIT; COMMIT DROP TABLE unique_index_ao_column; -DROP +DROP TABLE -- Case 5: Conflict with to-be-aborted transaction------------------------------ -- @@ -155,37 +155,37 @@ DROP -- 10. Tx 1 commits -- CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE 1: BEGIN; BEGIN 1: INSERT INTO unique_index_ao_column VALUES (0); -INSERT 1 +INSERT 0 1 2: BEGIN; BEGIN 2: INSERT INTO unique_index_ao_column SELECT * FROM generate_series(1, 658491); -INSERT 658491 +INSERT 0 658491 3&: INSERT INTO unique_index_ao_column VALUES (1); 4&: INSERT INTO unique_index_ao_column VALUES (658490); 5&: INSERT INTO unique_index_ao_column VALUES (658491); -- should succeed immediately 6: INSERT INTO unique_index_ao_column VALUES (658492); -INSERT 1 +INSERT 0 1 2: ABORT; -ABORT +ROLLBACK 3<: <... completed> -INSERT 1 +INSERT 0 1 4<: <... completed> -INSERT 1 +INSERT 0 1 5<: <... completed> -INSERT 1 +INSERT 0 1 1: COMMIT; COMMIT DROP TABLE unique_index_ao_column; -DROP +DROP TABLE -- Case 6: Conflict with aborted rows following some committed rows ------------ CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE -- 1. Tx 1 commits rows 1-100. -- 2. Tx 2 inserts rows 101-200 and then aborts. -- 3. Tx 3 tries to insert row in range [101,200] and is immediately successful. @@ -193,22 +193,22 @@ CREATE -- constraint violation. -- 5. Tx 5 tries to insert row in range [201, ) and is immediately successful. 1: INSERT INTO unique_index_ao_column SELECT generate_series(1, 100); -INSERT 100 +INSERT 0 100 2: BEGIN; BEGIN 2: INSERT INTO unique_index_ao_column SELECT generate_series(101, 200); -INSERT 100 +INSERT 0 100 2: ABORT; -ABORT +ROLLBACK 3: INSERT INTO unique_index_ao_column VALUES(102); -INSERT 1 +INSERT 0 1 4: INSERT INTO unique_index_ao_column VALUES(2); ERROR: duplicate key value violates unique constraint "unique_index_ao_column_a_key" (seg0 192.168.0.148:7002 pid=659656) DETAIL: Key (a)=(2) already exists. 5: INSERT INTO unique_index_ao_column VALUES(202); -INSERT 1 +INSERT 0 1 DROP TABLE unique_index_ao_column; -DROP +DROP TABLE -------------------------------------------------------------------------------- ----------------- More concurrent tests with fault injection ------------------ @@ -232,7 +232,7 @@ DROP -- 6. Tx 2 succeeds as Tx 1 aborted. CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE SELECT gp_inject_fault('appendonly_insert', 'suspend', '', '', 'unique_index_ao_column', 4, 4, 0, dbid) FROM gp_segment_configuration WHERE role = 'p' AND content <> -1; gp_inject_fault ----------------- @@ -252,9 +252,9 @@ SELECT gp_wait_until_triggered_fault('appendonly_insert', 4, dbid) FROM gp_segme (3 rows) 2&: INSERT INTO unique_index_ao_column VALUES(2); 4: INSERT INTO unique_index_ao_column VALUES(11); -INSERT 1 +INSERT 0 1 3: INSERT INTO unique_index_ao_column VALUES(4); -INSERT 1 +INSERT 0 1 SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_configuration WHERE role = 'p' AND content <> -1; gp_inject_fault ----------------- @@ -266,9 +266,9 @@ SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_confi ERROR: duplicate key value violates unique constraint "unique_index_ao_column_a_key" (seg1 192.168.0.148:7003 pid=828519) DETAIL: Key (a)=(4) already exists. 2<: <... completed> -INSERT 1 +INSERT 0 1 DROP TABLE unique_index_ao_column; -DROP +DROP TABLE -- Case 8: Conflict with to-be-committed transaction - generalization of case 7 -- where there are multiple minipages (and block directory rows) in play from @@ -293,7 +293,7 @@ DROP -- 7. All blocked Txs succeed. CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE SELECT gp_inject_fault('insert_new_entry_curr_minipage_full', 'suspend', '', '', '', 2, 2, 0, dbid) FROM gp_segment_configuration WHERE role = 'p' AND content <> -1; gp_inject_fault @@ -349,7 +349,7 @@ SELECT gp_wait_until_triggered_fault('appendonly_insert', 4, dbid) FROM gp_segme 8&: INSERT INTO unique_index_ao_column VALUES(1321071); -- no index entry exists for it, so should not conflict. 9: INSERT INTO unique_index_ao_column VALUES(1321075); -INSERT 1 +INSERT 0 1 SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_configuration WHERE role = 'p' AND content <> -1; gp_inject_fault @@ -363,29 +363,29 @@ SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_confi ERROR: duplicate key value violates unique constraint "unique_index_ao_column_a_key" (seg2 192.168.0.148:7004 pid=735802) DETAIL: Key (a)=(1321075) already exists. 2<: <... completed> -INSERT 1 +INSERT 0 1 3<: <... completed> -INSERT 1 +INSERT 0 1 4<: <... completed> -INSERT 1 +INSERT 0 1 5<: <... completed> -INSERT 1 +INSERT 0 1 6<: <... completed> -INSERT 1 +INSERT 0 1 7<: <... completed> -INSERT 1 +INSERT 0 1 8<: <... completed> -INSERT 1 +INSERT 0 1 DROP TABLE unique_index_ao_column; -DROP +DROP TABLE -------------------------------------------------------------------------------- --------------------------- Smoke tests for COPY ------------------------------- -------------------------------------------------------------------------------- CREATE TABLE unique_index_ao_column (a INT unique) USING ao_column DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE 1: BEGIN; BEGIN @@ -405,23 +405,23 @@ CONTEXT: COPY unique_index_ao_column, line 1 2<: <... completed> COPY 1 1: END; -END +ROLLBACK DROP TABLE unique_index_ao_column; -DROP +DROP TABLE -------------------------------------------------------------------------------- -------------------- Smoke tests for subtransactions --------------------------- -------------------------------------------------------------------------------- CREATE TABLE unique_index_ao_column (a INT unique) USING ao_column DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE 1: BEGIN; BEGIN 1: SAVEPOINT a; SAVEPOINT 1: INSERT INTO unique_index_ao_column VALUES(1); -INSERT 1 +INSERT 0 1 -- concurrent tx inserting conflicting row should block. 2: BEGIN; @@ -429,7 +429,7 @@ BEGIN 2&: INSERT INTO unique_index_ao_column VALUES(1); -- concurrent tx inserting non-conflicting row should be successful. 3: INSERT INTO unique_index_ao_column VALUES(2); -INSERT 1 +INSERT 0 1 -- conflict should be detected within the same subtx. 1: INSERT INTO unique_index_ao_column VALUES(1); @@ -437,15 +437,15 @@ ERROR: duplicate key value violates unique constraint "unique_index_ao_column_a DETAIL: Key (a)=(1) already exists. -- the concurrent tx should now succeed. 2<: <... completed> -INSERT 1 +INSERT 0 1 2: ABORT; -ABORT +ROLLBACK -- after rolling back to the savepoint, we should be able to re-insert the key 1: ROLLBACK TO SAVEPOINT a; ROLLBACK 1: INSERT INTO unique_index_ao_column VALUES(1); -INSERT 1 +INSERT 0 1 1: COMMIT; COMMIT @@ -457,7 +457,7 @@ SELECT * FROM unique_index_ao_column; (2 rows) DROP TABLE unique_index_ao_column; -DROP +DROP TABLE -------------------------------------------------------------------------------- -------------------- Smoke tests for repeatable read --------------------------- @@ -467,7 +467,7 @@ DROP -- boundaries. CREATE TABLE unique_index_ao_column (a INT unique) USING ao_column DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE -- Begin two txs with tx level snapshot taken early. 1: BEGIN ISOLATION LEVEL REPEATABLE READ; @@ -487,11 +487,11 @@ BEGIN 3: BEGIN; BEGIN 3: INSERT INTO unique_index_ao_column VALUES(1); -INSERT 1 +INSERT 0 1 -- And another transaction inserts a key and commits. INSERT INTO unique_index_ao_column VALUES(2); -INSERT 1 +INSERT 0 1 -- Tx should block on insert of conflicting key, even though it can't "see" the -- conflicting key due to its isolation level. @@ -502,11 +502,11 @@ INSERT 1 1&: INSERT INTO unique_index_ao_column VALUES(1); 3: ABORT; -ABORT +ROLLBACK 1<: <... completed> -INSERT 1 +INSERT 0 1 1: ABORT; -ABORT +ROLLBACK -- Tx should raise a conflict, even though it can't "see" the conflicting key -- due to its isolation level. @@ -522,52 +522,3 @@ ABORT DROP TABLE unique_index_ao_column; DROP - - --------------------------------------------------------------------------------- ------------------------ Smoke tests for ADD CONSTRAINT ------------------------ --------------------------------------------------------------------------------- -CREATE TABLE unique_index_ao_column (a INT) USING ao_column DISTRIBUTED REPLICATED; -CREATE -INSERT INTO unique_index_ao_column SELECT * FROM generate_series(1, 5); -INSERT 5 - -ALTER table unique_index_ao_column ADD CONSTRAINT a_unique UNIQUE(a); -ALTER --- should conflict -INSERT INTO unique_index_ao_column VALUES (1); -DETAIL: Key (a)=(1) already exists. -ERROR: duplicate key value violates unique constraint "a_unique" -ALTER table unique_index_ao_column DROP CONSTRAINT a_unique; -ALTER - -INSERT INTO unique_index_ao_column VALUES (1); -INSERT 1 --- should failed -ALTER table unique_index_ao_column ADD CONSTRAINT a_unique UNIQUE(a); -DETAIL: Key (a)=(1) is duplicated. -ERROR: could not create unique index "a_unique" - -DROP TABLE unique_index_ao_column; -DROP - - --------------------------------------------------------------------------------- ------------------------ Smoke tests for Multiple Key --------------------------- --------------------------------------------------------------------------------- -CREATE TABLE unique_index_ao_column (a INT, b INT) USING ao_column DISTRIBUTED REPLICATED; -CREATE -INSERT INTO unique_index_ao_column SELECT i,i FROM generate_series(1, 5) i; -INSERT 5 - -CREATE UNIQUE INDEX a_b_unique ON unique_index_ao_column(a,b); -CREATE --- should not conflict -INSERT INTO unique_index_ao_column VALUES (1,2); -INSERT 1 --- should conflict -INSERT INTO unique_index_ao_column VALUES (1,1); -DETAIL: Key (a, b)=(1, 1) already exists. -ERROR: duplicate key value violates unique constraint "a_b_unique" -DROP TABLE unique_index_ao_column; -DROP diff --git a/src/test/isolation2/expected/bitmap_index_ao_sparse.out b/src/test/isolation2/expected/bitmap_index_ao_sparse.out index 933aaad5552..8f9a1a15b51 100644 --- a/src/test/isolation2/expected/bitmap_index_ao_sparse.out +++ b/src/test/isolation2/expected/bitmap_index_ao_sparse.out @@ -9,7 +9,7 @@ -- Test AO table. CREATE TABLE ao_sparse (id int) with(appendonly = true) DISTRIBUTED BY (id); -CREATE +CREATE TABLE 1: begin; BEGIN @@ -17,9 +17,9 @@ BEGIN BEGIN 1: INSERT INTO ao_sparse SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 2: INSERT INTO ao_sparse SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 1: commit; COMMIT @@ -34,7 +34,7 @@ SELECT count(*) FROM ao_sparse WHERE id >= 97 and id <= 99 and gp_segment_id = 0 (1 row) CREATE INDEX idx_ao_sparse_id ON ao_sparse USING bitmap (id); -CREATE +CREATE INDEX -- Should generate Bitmap Heap Scan on the bitmap index. @@ -74,7 +74,7 @@ SET -- Test AOCS table. CREATE TABLE aocs_sparse (id int) with(appendonly = true, orientation = COLUMN) DISTRIBUTED BY (id); -CREATE +CREATE TABLE 1: begin; BEGIN @@ -82,9 +82,9 @@ BEGIN BEGIN 1: INSERT INTO aocs_sparse SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 2: INSERT INTO aocs_sparse SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 1: commit; COMMIT diff --git a/src/test/isolation2/expected/bitmap_index_concurrent.out b/src/test/isolation2/expected/bitmap_index_concurrent.out index b87cae7f476..e46b8673437 100644 --- a/src/test/isolation2/expected/bitmap_index_concurrent.out +++ b/src/test/isolation2/expected/bitmap_index_concurrent.out @@ -25,7 +25,7 @@ -- will generate two bitmap pages, and the first page is a full page. -- Use heap table, delete tuples and then vacuum should be the same. But it needs huge tuples. CREATE TABLE bmupdate (id int) with(appendonly = true) DISTRIBUTED BY (id); -CREATE +CREATE TABLE 1: begin; BEGIN @@ -73,49 +73,49 @@ BEGIN BEGIN 1: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 2: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 3: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 4: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 5: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 6: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 7: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 8: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 9: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 10: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 11: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 12: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 13: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 14: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 15: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 16: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 17: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 18: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 19: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 20: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 21: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 22: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; -INSERT 1000000 +INSERT 0 1000000 1: commit; COMMIT @@ -170,7 +170,7 @@ SELECT count(*) FROM bmupdate WHERE id = 97; (1 row) CREATE INDEX idx_bmupdate__id ON bmupdate USING bitmap (id); -CREATE +CREATE INDEX -- -- Test 1, run Bitmap Heap Scan on the bitmap index when there's @@ -214,7 +214,7 @@ SET -- The reason it not insert at the end of bitmap LOV is because right now only one -- transaction doing the insert, and it'll insert to small seg file number. 2: INSERT INTO bmupdate VALUES (97); -INSERT 1 +INSERT 0 1 -- Query should read the first page(buffer lock released), and then INSERT insert to -- the first page which will trigger rearrange words. @@ -294,7 +294,7 @@ SET -- The reason it not insert at the end of bitmap LOV is because right now only one -- transaction doing the insert, and it'll insert to small seg file number. 2: INSERT INTO bmupdate VALUES (97); -INSERT 1 +INSERT 0 1 -- Query should read the first page(buffer lock released), and then INSERT insert to -- the first page which will trigger rearrange words. @@ -374,9 +374,9 @@ SELECT gp_inject_fault_infinite('rearrange_word_to_next_bitmap_page', 'skip', db -- transaction doing the insert, and it'll insert to small seg file number. -- Here insert both values to make sure update on full bitmap happens for one LOV. 2: INSERT INTO bmupdate VALUES (97); -INSERT 1 +INSERT 0 1 2: INSERT INTO bmupdate VALUES (99); -INSERT 1 +INSERT 0 1 -- Query should read the first page(buffer lock released), and then INSERT insert to -- the first page which will trigger rearrange words. @@ -461,9 +461,9 @@ SET -- transaction doing the insert, and it'll insert to small seg file number. -- Here insert both values to make sure update on full bitmap happens for one LOV. 2: INSERT INTO bmupdate SELECT 97 FROM generate_series(1, 1000); -INSERT 1000 +INSERT 0 1000 2: INSERT INTO bmupdate SELECT 99 FROM generate_series(1, 1000); -INSERT 1000 +INSERT 0 1000 -- Query should read the first page(buffer lock released), and then INSERT insert to -- the first page which will trigger rearrange words. @@ -506,7 +506,7 @@ SELECT count(*) FROM bmupdate WHERE id >= 97 and id <= 99 and gp_segment_id = 0; (1 row) DROP TABLE bmupdate; -DROP +DROP TABLE -- Regression test, when large amount of inserts concurrent inserts happen, diff --git a/src/test/isolation2/expected/bitmap_index_crash.out b/src/test/isolation2/expected/bitmap_index_crash.out index 7dce1ae6d31..b49ff50a065 100644 --- a/src/test/isolation2/expected/bitmap_index_crash.out +++ b/src/test/isolation2/expected/bitmap_index_crash.out @@ -5,7 +5,7 @@ -- subsequent flush of the metapage will lead to an inadvertent -- overwrite. 1:CREATE EXTENSION IF NOT EXISTS gp_inject_fault; -CREATE +CREATE EXTENSION -- skip FTS probes for this test to avoid segment being marked down on restart 1:SELECT gp_inject_fault_infinite('fts_probe', 'skip', dbid) FROM gp_segment_configuration WHERE role='p' AND content=-1; gp_inject_fault_infinite @@ -20,9 +20,9 @@ CREATE -- test setup 1:CREATE TABLE bm(a int); -CREATE +CREATE TABLE 1:CREATE INDEX ON bm USING bitmap (a); -CREATE +CREATE INDEX -- pause checkpoint to make sure CRASH RECOVERY happens for bitmap index replay 1:SELECT gp_inject_fault_infinite('checkpoint', 'skip', dbid) FROM gp_segment_configuration WHERE role='p'; gp_inject_fault_infinite @@ -37,7 +37,7 @@ CHECKPOINT -- this insert's WAL we wish to replay 1:insert into bm select generate_series(1, 5000); -INSERT 5000 +INSERT 0 5000 1U:select pg_relation_size(oid)/current_setting('block_size')::bigint from pg_class where relname = 'bm_a_idx'; ?column? ---------- @@ -46,7 +46,7 @@ INSERT 5000 -- set small shared_buffers to make sure META_PAGE of bitmap index evicts out 1U: ALTER SYSTEM set shared_buffers to 20; -ALTER +ALTER SYSTEM 1:SELECT pg_ctl(datadir, 'restart') from gp_segment_configuration where role = 'p' and content = 1; pg_ctl -------- @@ -73,7 +73,7 @@ SET -- teardown cleanup for the test 1Uq: ... 1U:ALTER SYSTEM reset shared_buffers; -ALTER +ALTER SYSTEM 2:SELECT pg_ctl(datadir, 'restart') from gp_segment_configuration where role = 'p' and content = 1; pg_ctl -------- diff --git a/src/test/isolation2/expected/bitmap_index_inspect.out b/src/test/isolation2/expected/bitmap_index_inspect.out index fa925fcad97..39f3865b035 100644 --- a/src/test/isolation2/expected/bitmap_index_inspect.out +++ b/src/test/isolation2/expected/bitmap_index_inspect.out @@ -3,14 +3,16 @@ -- inspect functions run against a single node, as opposed to the entire GP cluster) -- Setup +1U: CREATE EXTENSION pageinspect; +CREATE EXTENSION 1U: CREATE TABLE bmtest_t1(i int, bmfield int); -CREATE +CREATE TABLE 1U: CREATE INDEX bmtest_i1 ON bmtest_t1 USING bitmap(bmfield); -CREATE +CREATE INDEX 1U: INSERT INTO bmtest_t1 SELECT i,1 FROM generate_series(1, 1000) i; -INSERT 1000 +INSERT 0 1000 1U: INSERT INTO bmtest_t1 SELECT i,2 FROM generate_series(1, 1000) i; -INSERT 1000 +INSERT 0 1000 -- start_matchsubs -- m/bmfuncs.c:\d+/ @@ -61,4 +63,6 @@ ERROR: block 1 is not a bitmap page, it is a LOV item page (bmfuncs.c:507) -- cleanup 1U: DROP TABLE bmtest_t1; -DROP +DROP TABLE +1U: DROP EXTENSION pageinspect; +DROP EXTENSION diff --git a/src/test/isolation2/expected/bitmap_update_words_backup_block.out b/src/test/isolation2/expected/bitmap_update_words_backup_block.out index 950f91d36be..98567439ee1 100644 --- a/src/test/isolation2/expected/bitmap_update_words_backup_block.out +++ b/src/test/isolation2/expected/bitmap_update_words_backup_block.out @@ -1,6 +1,6 @@ -- Setup fault injectors. CREATE EXTENSION IF NOT EXISTS gp_inject_fault; -CREATE +CREATE EXTENSION -- Skip FTS probes for this test to avoid segment being marked down on restart. 1:SELECT gp_inject_fault_infinite('fts_probe', 'skip', dbid) FROM gp_segment_configuration WHERE role='p' AND content=-1; @@ -14,38 +14,38 @@ CREATE t (1 row) CREATE TABLE bm_update_words_backup_block (id int) WITH (appendonly = true); -CREATE +CREATE TABLE 1: BEGIN; BEGIN 2: BEGIN; BEGIN 1: INSERT INTO bm_update_words_backup_block SELECT i%100 FROM generate_series(1, 200) AS i; -INSERT 200 +INSERT 0 200 2: INSERT INTO bm_update_words_backup_block SELECT i%100 FROM generate_series(1, 200) AS i; -INSERT 200 +INSERT 0 200 1: COMMIT; COMMIT 2: COMMIT; COMMIT CREATE INDEX bm_update_words_backup_block_idx ON bm_update_words_backup_block USING bitmap (id); -CREATE +CREATE INDEX -- INSERTs will attempt to add a bitmap page but will cause a word -- expansion and a bitmap page split due to overflow. See bitmap -- function updatesetbit_inpage(). 2: INSERT INTO bm_update_words_backup_block VALUES (97); -INSERT 1 +INSERT 0 1 2: INSERT INTO bm_update_words_backup_block VALUES (97), (99); -INSERT 2 +INSERT 0 2 -- Run a CHECKPOINT to force this next INSERT to add backup blocks of -- the two bitmap pages to its XLOG_BITMAP_UPDATEWORDS record. 2: CHECKPOINT; CHECKPOINT 2: INSERT INTO bm_update_words_backup_block VALUES (97); -INSERT 1 +INSERT 0 1 -- Do an immediate restart to force crash recovery. The above INSERT -- should be replayed with the backup blocks. @@ -55,7 +55,7 @@ INSERT 1 OK (1 row) 3: INSERT INTO bm_update_words_backup_block VALUES (97); -INSERT 1 +INSERT 0 1 -- Turn FTS back on. 3:SELECT gp_inject_fault('fts_probe', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=-1; diff --git a/src/test/isolation2/expected/brin_heap.out b/src/test/isolation2/expected/brin_heap.out index 83aa84b8e69..e656f3a16f5 100644 --- a/src/test/isolation2/expected/brin_heap.out +++ b/src/test/isolation2/expected/brin_heap.out @@ -2,6 +2,8 @@ -- White-box tests are necessary to ensure that summarization is done -- successfully (to avoid cases where ranges have brin data tuples without -- values or where the range is not covered by the revmap etc) +CREATE EXTENSION pageinspect; +CREATE EXTENSION -- Turn off sequential scans to force usage of BRIN indexes for scans. SET enable_seqscan TO off; @@ -11,9 +13,9 @@ SET -- by another transaction, while summarization was in flight. CREATE TABLE brin_range_extended_heap(i int) USING heap; -CREATE +CREATE TABLE CREATE INDEX ON brin_range_extended_heap USING brin(i) WITH (pages_per_range=5); -CREATE +CREATE INDEX -- Insert 9 blocks of data on 1 QE; 8 blocks full, 1 block with 1 tuple. SELECT populate_pages('brin_range_extended_heap', 1, tid '(8, 0)'); @@ -105,9 +107,9 @@ SELECT gp_inject_fault('summarize_last_partial_range', 'reset', dbid) FROM gp_se -- Test build/summarize with aborted rows. CREATE TABLE brin_abort_heap(i int); -CREATE +CREATE TABLE CREATE INDEX ON brin_abort_heap USING brin(i) WITH (pages_per_range=1); -CREATE +CREATE INDEX BEGIN; BEGIN -- Create 3 blocks all on 1 QE, in 1 aoseg: 2 blocks full, 1 block with 1 tuple. @@ -117,7 +119,7 @@ SELECT populate_pages('brin_abort_heap', 1, tid '(2, 0)'); (1 row) ABORT; -ABORT +ROLLBACK -- Sanity: There is 1 revmap page and 1 data page, with 1 range (summarized). -- This first range being summarized highlights a difference with AO/CO tables. @@ -260,9 +262,9 @@ SELECT gp_inject_fault('brin_bitmap_page_added', 'reset', dbid) FROM gp_segment_ -- Drop and re-create the index to test build. DROP INDEX brin_abort_heap_i_idx; -DROP +DROP INDEX CREATE INDEX ON brin_abort_heap USING brin(i) WITH (pages_per_range=1); -CREATE +CREATE INDEX -- Sanity: There is 1 revmap page and 1 data page, with 4 ranges. Only the last -- two ranges (covering the committed rows) have non-empty tuples. @@ -315,3 +317,5 @@ SELECT gp_inject_fault('brin_bitmap_page_added', 'reset', dbid) FROM gp_segment_ RESET enable_seqscan; RESET +DROP EXTENSION pageinspect; +DROP EXTENSION diff --git a/src/test/isolation2/expected/cancel_plpython.out b/src/test/isolation2/expected/cancel_plpython.out index 37964e3b04c..fff46cf807b 100644 --- a/src/test/isolation2/expected/cancel_plpython.out +++ b/src/test/isolation2/expected/cancel_plpython.out @@ -3,25 +3,25 @@ CREATE LANGUAGE plpython3u; CREATE -- end_ignore CREATE OR REPLACE FUNCTION pybusyloop() RETURNS double precision AS $$ import math while True: a = 1 return 1 $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION CREATE OR REPLACE FUNCTION pysleep() RETURNS double precision AS $$ import time time.sleep(100) return 1 $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION CREATE OR REPLACE FUNCTION pyspisleep() RETURNS double precision AS $$ # container: plc_python_shared rv = plpy.execute("select pg_sleep(100)") return 1 $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION CREATE OR REPLACE FUNCTION pynestsleep() RETURNS double precision AS $$ # container: plc_python_shared rv = plpy.execute("select pyspisleep()") return 1 $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION CREATE OR REPLACE FUNCTION pynestsleep2() RETURNS double precision AS $$ # container: plc_python_shared rv = plpy.execute("select pysleep()") return 1 $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION CREATE TABLE a(i int); -CREATE +CREATE TABLE insert into a values(1),(10),(20),(100); -INSERT 4 +INSERT 0 4 1&: select pybusyloop(); 2&: select pybusyloop() from a; diff --git a/src/test/isolation2/expected/cancel_query.out b/src/test/isolation2/expected/cancel_query.out index 38bda39fe4e..2833da2dc05 100644 --- a/src/test/isolation2/expected/cancel_query.out +++ b/src/test/isolation2/expected/cancel_query.out @@ -1,14 +1,14 @@ CREATE EXTENSION IF NOT EXISTS gp_inject_fault; -CREATE +CREATE EXTENSION 0:CREATE TABLE a_partition_table_for_analyze_cancellation ( a_date date NOT NULL, a_bigint bigint NOT NULL, b_bigint bigint NOT NULL ) WITH (appendonly='true', orientation='column') DISTRIBUTED BY (a_bigint, b_bigint) PARTITION BY RANGE(a_date) ( PARTITION p1 START ('2018-01-01'::date) END ('2018-12-31'::date) WITH (appendonly='true', orientation='column') COLUMN a_date ENCODING (compresstype=zlib) COLUMN a_bigint ENCODING (compresstype=zlib) COLUMN b_bigint ENCODING (compresstype=zlib), PARTITION p2 START ('2019-01-01'::date) END ('2019-12-31'::date) WITH (appendonly='true', orientation='column') COLUMN a_date ENCODING (compresstype=zlib) COLUMN a_bigint ENCODING (compresstype=zlib) COLUMN b_bigint ENCODING (compresstype=zlib), PARTITION p3 START ('2020-01-01'::date) END ('2020-12-31'::date) WITH (appendonly='true', orientation='column') COLUMN a_date ENCODING (compresstype=zlib) COLUMN a_bigint ENCODING (compresstype=zlib) COLUMN b_bigint ENCODING (compresstype=zlib) ); -CREATE +CREATE TABLE 0:INSERT INTO a_partition_table_for_analyze_cancellation VALUES(timestamp '2018-01-01 10:00:00', 1, 3); -INSERT 1 +INSERT 0 1 0:INSERT INTO a_partition_table_for_analyze_cancellation VALUES(timestamp '2019-01-01 12:00:00', 2, 4); -INSERT 1 +INSERT 0 1 0:INSERT INTO a_partition_table_for_analyze_cancellation VALUES(timestamp '2020-01-01 13:00:00', 3, 5); -INSERT 1 +INSERT 0 1 0: SELECT gp_inject_fault('zlib_decompress_after_decompress_fn', 'sleep', '', '', '', 1, -1, 3600, dbid) FROM gp_segment_configuration WHERE content=1 AND role='p'; gp_inject_fault diff --git a/src/test/isolation2/expected/checkpoint_dtx_info.out b/src/test/isolation2/expected/checkpoint_dtx_info.out index e17e8706631..9e69d64006c 100644 --- a/src/test/isolation2/expected/checkpoint_dtx_info.out +++ b/src/test/isolation2/expected/checkpoint_dtx_info.out @@ -34,7 +34,7 @@ 1: begin; BEGIN 1: create table twopcbug(i int, j int); -CREATE +CREATE TABLE 1&: commit; -- wait to make sure the commit is taking place and blocked at start_insertedDistributedCommitted 2: select gp_wait_until_triggered_fault('start_insertedDistributedCommitted', 1, 1); @@ -102,7 +102,7 @@ server closed the connection unexpectedly -- failure on coordinator. The solution is adding the expected length -- in SizeOfXLogRecordDataHeaderLong also, to fixup the missing condition. create table ckpt_xlog_len_tbl(a int, b int); -CREATE +CREATE TABLE -- Need to start at least 18 concurrent sessions to create a long header -- CHECKPOINT WAL record, which size is not less than 256. @@ -118,115 +118,115 @@ CREATE 10: begin; BEGIN 10: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 10&: commit; 11: begin; BEGIN 11: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 11&: commit; 12: begin; BEGIN 12: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 12&: commit; 13: begin; BEGIN 13: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 13&: commit; 14: begin; BEGIN 14: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 14&: commit; 15: begin; BEGIN 15: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 15&: commit; 16: begin; BEGIN 16: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 16&: commit; 17: begin; BEGIN 17: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 17&: commit; 18: begin; BEGIN 18: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 18&: commit; 19: begin; BEGIN 19: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 19&: commit; 20: begin; BEGIN 20: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 20&: commit; 21: begin; BEGIN 21: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 21&: commit; 22: begin; BEGIN 22: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 22&: commit; 23: begin; BEGIN 23: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 23&: commit; 24: begin; BEGIN 24: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 24&: commit; 25: begin; BEGIN 25: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 25&: commit; 26: begin; BEGIN 26: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 26&: commit; 27: begin; BEGIN 27: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 27&: commit; 28: begin; BEGIN 28: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 28&: commit; -- wait to make sure the commit is taking place and blocked at start_insertedDistributedCommitted @@ -362,4 +362,8 @@ server closed the connection unexpectedly (1 row) 3: drop table ckpt_xlog_len_tbl; -DROP +DROP TABLE + +-- start_ignore +alter resource group admin_group set concurrency 10; +-- end_ignore diff --git a/src/test/isolation2/expected/commit_transaction_block_checkpoint.out b/src/test/isolation2/expected/commit_transaction_block_checkpoint.out index 0857b3b7a2a..83bd051083a 100644 --- a/src/test/isolation2/expected/commit_transaction_block_checkpoint.out +++ b/src/test/isolation2/expected/commit_transaction_block_checkpoint.out @@ -13,7 +13,7 @@ CHECKPOINT 2: begin; BEGIN 2: create table t_commit_transaction_block_checkpoint (c int) distributed by (c); -CREATE +CREATE TABLE 2&: commit; -- wait for the fault to trigger since following checkpoint could be faster @@ -53,7 +53,7 @@ CHECKPOINT 2: begin; BEGIN 2: drop table t_commit_transaction_block_checkpoint; -DROP +DROP TABLE 2&: commit; -- wait for the fault to trigger since following checkpoint could be faster diff --git a/src/test/isolation2/expected/concurrent_drop_truncate_tablespace.out b/src/test/isolation2/expected/concurrent_drop_truncate_tablespace.out index 501f188fc7d..7fa8607eeff 100644 --- a/src/test/isolation2/expected/concurrent_drop_truncate_tablespace.out +++ b/src/test/isolation2/expected/concurrent_drop_truncate_tablespace.out @@ -23,82 +23,8 @@ CREATE TABLESPACE concurrent_tblspace LOCATION '/tmp/concurrent_tblspace'; CREATE --- test 1: --- when creating a table using a tablespace, after the tuple of tablespace --- is locked, the tablespace is not allowed to drop -2: begin; -BEGIN -2: CREATE TABLE t_in_tablespace(a int, b int) TABLESPACE concurrent_tblspace; -CREATE - --- drop tablespace will fail: can't acuqire the lock -DROP TABLESPACE concurrent_tblspace; -ERROR: could not lock tablespace "concurrent_tblspace" -2: rollback; -ROLLBACK - --- test 2: --- if DROP TABLESPACE acquires lock first and rollback, the blocking CREATE --- TABLE will be successful. - --- suspend execution after tablespace lock is acquired -SELECT gp_inject_fault('drop_tablespace_after_acquire_lock', 'suspend', dbid) FROM gp_segment_configuration WHERE content <> -1 and role='p'; - gp_inject_fault ------------------ - Success: - Success: - Success: -(3 rows) -1&: DROP TABLESPACE concurrent_tblspace; - --- wait for the fault to be triggered -SELECT gp_wait_until_triggered_fault('drop_tablespace_after_acquire_lock', 1, dbid) from gp_segment_configuration where content <> -1 and role='p'; - gp_wait_until_triggered_fault -------------------------------- - Success: - Success: - Success: -(3 rows) - -2&: CREATE TABLE t_in_tablespace(a int, b int) TABLESPACE concurrent_tblspace; --- inject an error to ensure that the above DROP command will rollback -SELECT gp_inject_fault('after_xlog_tblspc_drop', 'error', dbid) FROM gp_segment_configuration WHERE content <> -1 and role='p'; - gp_inject_fault ------------------ - Success: - Success: - Success: -(3 rows) -SELECT gp_inject_fault('drop_tablespace_after_acquire_lock', 'reset', dbid) FROM gp_segment_configuration WHERE content <> -1 and role='p'; - gp_inject_fault ------------------ - Success: - Success: - Success: -(3 rows) --- fail -1<: <... completed> -ERROR: fault triggered, fault name:'after_xlog_tblspc_drop' fault type:'error' --- success -2<: <... completed> -CREATE --- drop the above table, so the tablespace is empty. -2: DROP TABLE t_in_tablespace; -DROP -SELECT gp_inject_fault('after_xlog_tblspc_drop', 'reset', dbid) FROM gp_segment_configuration WHERE content <> -1 and role='p'; - gp_inject_fault ------------------ - Success: - Success: - Success: -(3 rows) - --- test 3: --- if DROP TABLESPACE acquires lock first and going to drop, any CREATE TABLE --- will fail - --- suspend execution after tablespace lock is acquired -SELECT gp_inject_fault('drop_tablespace_after_acquire_lock', 'suspend', dbid) FROM gp_segment_configuration WHERE content <> -1 and role='p'; +-- suspend execution after TablespaceCreateLock is released +SELECT gp_inject_fault('AfterTablespaceCreateLockRelease', 'suspend', dbid) FROM gp_segment_configuration WHERE content <> -1 and role='p'; gp_inject_fault ----------------- Success: @@ -117,9 +43,12 @@ SELECT gp_wait_until_triggered_fault('drop_tablespace_after_acquire_lock', 1, db (3 rows) -- create a table in the same tablespace which is being dropped via a concurrent session -2&:CREATE TABLE drop_tablespace_tbl(a int, b int) TABLESPACE concurrent_tblspace DISTRIBUTED BY (a); --- reset the fault, drop tablespace command will continue -SELECT gp_inject_fault('drop_tablespace_after_acquire_lock', 'reset', dbid) FROM gp_segment_configuration WHERE content <> -1 and role='p'; +CREATE TABLE drop_tablespace_tbl(a int, b int) TABLESPACE concurrent_tblspace DISTRIBUTED BY (a); +CREATE TABLE +INSERT INTO drop_tablespace_tbl SELECT i, i FROM generate_series(1,100)i; +INSERT 0 100 +-- reset the fault, drop tablespace command will not delete the data files on the tablespace +SELECT gp_inject_fault('AfterTablespaceCreateLockRelease', 'reset', dbid) FROM gp_segment_configuration WHERE content <> -1 and role='p'; gp_inject_fault ----------------- Success: @@ -128,7 +57,18 @@ SELECT gp_inject_fault('drop_tablespace_after_acquire_lock', 'reset', dbid) FROM (3 rows) -- success 1<: <... completed> -DROP --- fail -2<: <... completed> -ERROR: could not create directory "pg_tblspc/33175/GPDB_1_302501601/32799": No such file or directory +DROP TABLESPACE +-- check data exists +SELECT count(*) FROM drop_tablespace_tbl; + count +------- + 100 +(1 row) +-- move to another tablespace and check the data. +ALTER TABLE drop_tablespace_tbl SET TABLESPACE pg_default; +ALTER TABLE +SELECT count(*) FROM drop_tablespace_tbl; + count +------- + 100 +(1 row) diff --git a/src/test/isolation2/expected/concurrent_index_creation_should_not_deadlock.out b/src/test/isolation2/expected/concurrent_index_creation_should_not_deadlock.out index ebcc4248a48..0491f0ce3d9 100644 --- a/src/test/isolation2/expected/concurrent_index_creation_should_not_deadlock.out +++ b/src/test/isolation2/expected/concurrent_index_creation_should_not_deadlock.out @@ -1,9 +1,7 @@ -- Test to make sure non-first concurrent index creations don't deadlock -- Create an append only table, popluated with data CREATE TABLE index_deadlocking_test_table (value int) WITH (appendonly=true); -CREATE -CREATE INDEX index_deadlocking_test_table_initial_index on index_deadlocking_test_table (value); -CREATE +CREATE TABLE -- Setup a fault to ensure that both sessions pauses while creating an index, -- ensuring a concurrent index creation. @@ -29,6 +27,6 @@ SELECT gp_inject_fault('defineindex_before_acquire_lock', 'reset', 1); -- Both index creation attempts should succeed 1<: <... completed> -CREATE +CREATE INDEX 2<: <... completed> -CREATE +CREATE INDEX diff --git a/src/test/isolation2/expected/crash_recovery.out b/src/test/isolation2/expected/crash_recovery.out index a5078dd4657..7882e3f7149 100644 --- a/src/test/isolation2/expected/crash_recovery.out +++ b/src/test/isolation2/expected/crash_recovery.out @@ -1,5 +1,5 @@ 1:CREATE TABLE crash_test_table(c1 int); -CREATE +CREATE TABLE 1:SELECT role, preferred_role, content, status FROM gp_segment_configuration; role | preferred_role | content | status diff --git a/src/test/isolation2/expected/crash_recovery_dtm.out b/src/test/isolation2/expected/crash_recovery_dtm.out index 31b1e86391c..153ee28e91f 100644 --- a/src/test/isolation2/expected/crash_recovery_dtm.out +++ b/src/test/isolation2/expected/crash_recovery_dtm.out @@ -15,7 +15,7 @@ -- Make the test faster and also make some queries fail as expected after -- 2pc retry PANIC (do not finish earlier before PANIC happens). alter system set dtx_phase2_retry_second to 5; -ALTER +ALTER SYSTEM select pg_reload_conf(); pg_reload_conf ---------------- @@ -83,7 +83,7 @@ server closed the connection unexpectedly ---+--- (0 rows) 4: INSERT INTO commit_phase1_panic select i,i from generate_series(1, 10)i; -INSERT 10 +INSERT 0 10 4: SELECT count(*) from commit_phase1_panic; count ------- @@ -184,9 +184,9 @@ LINE 1: SELECT count(*) from abort_fatal_fault_test_table; -- should cause master to broadcast abort and QEs handle the abort in -- DTX_CONTEXT_LOCAL_ONLY context. 11: CREATE TABLE QE_panic_test_table(a int, b int); -CREATE +CREATE TABLE 11: INSERT INTO QE_panic_test_table SELECT * from generate_series(0, 9); -INSERT 10 +INSERT 0 10 -- To help speedy recovery 11: CHECKPOINT; CHECKPOINT @@ -194,7 +194,7 @@ CHECKPOINT -- system is required to set the GUC and can't be set on session level -- as session reset happens for every abort retry. 11: alter system set dtx_phase2_retry_second to 600; -ALTER +ALTER SYSTEM 11: select pg_reload_conf(); pg_reload_conf ---------------- @@ -269,7 +269,7 @@ ERROR: Error on receive from seg0 127.0.0.1:7002 pid=5600: server closed the co Success: (1 row) 13: alter system reset dtx_phase2_retry_second; -ALTER +ALTER SYSTEM 13: select pg_reload_conf(); pg_reload_conf ---------------- @@ -278,8 +278,8 @@ ALTER -- Scenario 5: QD panics when a QE process is doing prepare but not yet finished. -- This should cause dtx recovery finally aborts the orphaned prepared transaction. -15: CREATE TABLE master_reset(a int); -CREATE +15: CREATE TABLE coordinator_reset(a int); +CREATE TABLE 15: SELECT gp_inject_fault_infinite('before_xlog_xact_prepare', 'suspend', dbid) from gp_segment_configuration where role = 'p' and content = 1; gp_inject_fault_infinite -------------------------- @@ -299,9 +299,9 @@ CREATE -- set gucs to speed up testing 15: ALTER SYSTEM SET gp_dtx_recovery_prepared_period to 0; -ALTER +ALTER SYSTEM 15: ALTER SYSTEM SET gp_dtx_recovery_interval to 5; -ALTER +ALTER SYSTEM 15: SELECT pg_reload_conf(); pg_reload_conf ---------------- @@ -385,28 +385,12 @@ server closed the connection unexpectedly -------------------------- Success: (1 row) --- verify orphaned prepared transacion is aborted -19: SELECT gp_wait_until_triggered_fault('after_orphaned_check', 1, dbid) from gp_segment_configuration where role = 'p' and content = -1; - gp_wait_until_triggered_fault -------------------------------- - Success: -(1 row) -19: select * from gp_stat_progress_dtx_recovery; - phase | recover_commited_dtx_total | recover_commited_dtx_completed | in_doubt_tx_total | in_doubt_tx_in_progress | in_doubt_tx_aborted ------------------------------------------+----------------------------+--------------------------------+-------------------+-------------------------+--------------------- - managing in-doubt orphaned transactions | 0 | 0 | 1 | 0 | 1 -(1 row) -19: SELECT gp_inject_fault_infinite('after_orphaned_check', 'reset', dbid) from gp_segment_configuration where role = 'p' and content = -1; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -19: DROP TABLE master_reset; -DROP +19: DROP TABLE coordinator_reset; +DROP TABLE 19: ALTER SYSTEM RESET gp_dtx_recovery_interval; -ALTER +ALTER SYSTEM 19: ALTER SYSTEM RESET gp_dtx_recovery_prepared_period; -ALTER +ALTER SYSTEM 19: SELECT pg_reload_conf(); pg_reload_conf ---------------- @@ -424,11 +408,11 @@ ALTER -- speed up testing by setting some gucs. 20: ALTER SYSTEM SET gp_dtx_recovery_prepared_period to 0; -ALTER +ALTER SYSTEM 20: ALTER SYSTEM SET gp_dtx_recovery_interval to 5; -ALTER +ALTER SYSTEM 20: ALTER SYSTEM SET dtx_phase2_retry_second to 5; -ALTER +ALTER SYSTEM 20: SELECT pg_reload_conf(); pg_reload_conf ---------------- @@ -436,7 +420,7 @@ ALTER (1 row) 20: CREATE TABLE test_retry_abort(a int); -CREATE +CREATE TABLE -- master: set fault to trigger abort prepare -- primary 0: set fault so that retry prepared abort fails. @@ -535,11 +519,11 @@ ERROR: fault triggered, fault name:'dtm_broadcast_prepare' fault type:'error' -- cleanup 20: ALTER SYSTEM RESET gp_dtx_recovery_interval; -ALTER +ALTER SYSTEM 20: ALTER SYSTEM RESET gp_dtx_recovery_prepared_period; -ALTER +ALTER SYSTEM 20: ALTER SYSTEM RESET dtx_phase2_retry_second; -ALTER +ALTER SYSTEM 20: SELECT pg_reload_conf(); pg_reload_conf ---------------- @@ -556,4 +540,4 @@ ALTER Success: (1 row) 20: DROP TABLE test_retry_abort; -DROP +DROP TABLE diff --git a/src/test/isolation2/expected/crash_recovery_redundant_dtx.out b/src/test/isolation2/expected/crash_recovery_redundant_dtx.out index 3db508d527f..b78e362f44f 100644 --- a/src/test/isolation2/expected/crash_recovery_redundant_dtx.out +++ b/src/test/isolation2/expected/crash_recovery_redundant_dtx.out @@ -1,5 +1,5 @@ 1:CREATE TABLE crash_test_redundant(c1 int); -CREATE +CREATE TABLE 1:SELECT role, preferred_role, content, status FROM gp_segment_configuration; role | preferred_role | content | status diff --git a/src/test/isolation2/expected/create_memory_accounting_tables.out b/src/test/isolation2/expected/create_memory_accounting_tables.out index f311164b13b..3867fd2f886 100644 --- a/src/test/isolation2/expected/create_memory_accounting_tables.out +++ b/src/test/isolation2/expected/create_memory_accounting_tables.out @@ -1,413 +1,413 @@ CREATE TABLE lineitem ( l_orderkey INT8 NOT NULL, l_partkey INTEGER NOT NULL, l_suppkey INTEGER NOT NULL, l_linenumber INTEGER NOT NULL, l_quantity DECIMAL(15, 2) NOT NULL, l_extendedprice DECIMAL(15, 2) NOT NULL, l_discount DECIMAL(15, 2) NOT NULL, l_tax DECIMAL(15, 2) NOT NULL, l_returnflag CHAR(1) NOT NULL, l_linestatus CHAR(1) NOT NULL, l_shipdate DATE NOT NULL, l_commitdate DATE NOT NULL, l_receiptdate DATE NOT NULL, l_shipinstruct CHAR(25) NOT NULL, l_shipmode CHAR(10) NOT NULL, l_comment VARCHAR(44) NOT NULL )PARTITION by range(l_shipdate) (partition p1 start('1992-01-01') end('1998-12-02') every(interval '1 month')); -CREATE +CREATE TABLE CREATE TABLE nation ( n_nationkey INTEGER, n_name CHAR(25), n_regionkey INTEGER, n_comment VARCHAR(152) ); -CREATE +CREATE TABLE CREATE TABLE Customer ( C_CUSTKEY INTEGER , C_NAME VARCHAR(25) , C_ADDRESS VARCHAR(40) , C_NATIONKEY INTEGER , C_PHONE CHAR(15) , C_ACCTBAL DECIMAL(15,2) , C_MKTSEGMENT CHAR(10) , C_COMMENT VARCHAR(117) ); -CREATE +CREATE TABLE CREATE TABLE region ( r_regionkey INTEGER, r_name CHAR(25), r_comment VARCHAR(152) ); -CREATE +CREATE TABLE CREATE TABLE orders ( o_orderkey INTEGER, o_custkey INTEGER, o_orderstatus CHAR(1), o_totalprice DECIMAL(15, 2), o_orderdate DATE, o_orderpriority CHAR(15), o_clerk CHAR(15), o_shippriority INTEGER, o_comment VARCHAR(79) ) ; -CREATE +CREATE TABLE CREATE TABLE supplier ( s_suppkey INTEGER, s_name CHAR(25), s_address VARCHAR(40), s_nationkey INTEGER, s_phone CHAR(15), s_acctbal DECIMAL(15, 2), s_comment VARCHAR(101) ); -CREATE +CREATE TABLE CREATE TABLE partsupp ( ps_partkey INTEGER, ps_suppkey INTEGER, ps_availqty INTEGER, ps_supplycost DECIMAL(15, 2), ps_comment VARCHAR(199) ) ; -CREATE +CREATE TABLE INSERT INTO lineitem VALUES (2949,695,89,2,50,79784.50,0.05,0.04,'A','F','1994-08-04','1994-06-23','1994-08-17','TAKE BACK RETURN','FOB','gular courts cajole across t'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2949,1795,80,3,38,64478.02,0.02,0.06,'R','F','1994-05-22','1994-05-25','1994-05-27','COLLECT COD','REG AIR','se slyly requests. carefull'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2950,1295,96,1,32,38281.28,0.01,0.05,'N','O','1997-09-21','1997-08-25','1997-10-08','DELIVER IN PERSON','REG AIR','its wake carefully slyly final ideas.'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2950,658,59,2,18,28055.70,0.10,0.01,'N','O','1997-07-19','1997-08-29','1997-08-17','COLLECT COD','TRUCK','uests cajole furio'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2950,527,28,3,14,19985.28,0.01,0.02,'N','O','1997-07-29','1997-08-05','1997-07-31','TAKE BACK RETURN','MAIL','ccounts haggle carefully according'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2950,1864,65,4,45,79463.70,0.08,0.00,'N','O','1997-09-05','1997-09-23','1997-09-11','NONE','FOB','ides the b'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2950,610,11,5,46,69488.06,0.02,0.05,'N','O','1997-07-15','1997-09-30','1997-07-25','COLLECT COD','RAIL','to the regular accounts are slyly carefu'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2950,1736,37,6,27,44218.71,0.01,0.03,'N','O','1997-10-01','1997-09-13','1997-10-08','NONE','TRUCK','are alongside of the carefully silent'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2951,21,72,1,5,4605.10,0.03,0.03,'N','O','1996-03-27','1996-04-16','1996-03-30','NONE','REG AIR','to beans wake ac'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2951,1360,99,2,24,30272.64,0.07,0.03,'N','O','1996-03-24','1996-04-16','1996-04-08','NONE','SHIP','ironic multipliers. express, regular'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2951,1861,91,3,40,70514.40,0.02,0.07,'N','O','1996-05-03','1996-04-20','1996-05-22','COLLECT COD','REG AIR','ial deposits wake fluffily about th'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2951,722,55,4,21,34077.12,0.06,0.08,'N','O','1996-04-12','1996-04-27','1996-04-14','DELIVER IN PERSON','REG AIR','nt instructions toward the f'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2951,502,63,5,15,21037.50,0.07,0.00,'N','O','1996-03-25','1996-04-23','1996-03-27','COLLECT COD','REG AIR','inal account'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2951,1371,86,6,18,22902.66,0.06,0.00,'N','O','1996-04-04','1996-04-27','1996-04-06','COLLECT COD','FOB','ep about the final, even package'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2976,86,37,1,32,31554.56,0.06,0.00,'A','F','1994-01-26','1994-02-13','1994-02-10','NONE','MAIL','nding, ironic deposits sleep f'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2976,31,32,2,24,22344.72,0.00,0.03,'A','F','1994-03-19','1994-01-26','1994-04-18','COLLECT COD','TRUCK','ronic pinto beans. slyly bol'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2976,98,49,3,35,34933.15,0.10,0.07,'R','F','1993-12-19','1994-02-14','1994-01-11','NONE','RAIL','boost slyly about the regular, regular re'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2976,811,78,4,22,37659.82,0.00,0.04,'A','F','1994-02-08','1994-03-03','1994-02-12','TAKE BACK RETURN','FOB','ncies kindle furiously. carefull'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2976,1333,10,5,13,16046.29,0.00,0.06,'A','F','1994-02-06','1994-02-02','1994-02-19','NONE','FOB','furiously final courts boost'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2976,1084,20,6,30,29552.40,0.08,0.03,'R','F','1994-03-27','1994-02-01','1994-04-26','TAKE BACK RETURN','RAIL','c ideas! unusual'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2977,698,92,1,25,39967.25,0.03,0.07,'N','O','1996-09-21','1996-10-06','1996-10-13','TAKE BACK RETURN','RAIL','furiously pe'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2978,897,98,1,29,52138.81,0.00,0.08,'A','F','1995-06-03','1995-07-25','1995-06-06','NONE','SHIP','ecial ideas promise slyly'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2978,1270,8,2,42,49193.34,0.01,0.06,'N','O','1995-08-19','1995-07-18','1995-09-07','DELIVER IN PERSON','MAIL','ial requests nag blithely alongside of th'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2978,430,18,3,26,34591.18,0.07,0.05,'N','O','1995-07-29','1995-07-22','1995-08-20','COLLECT COD','REG AIR','as haggle against the carefully express dep'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2978,271,53,4,7,8198.89,0.00,0.00,'N','O','1995-07-18','1995-07-03','1995-07-23','NONE','FOB','. final ideas are blithe'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2978,285,67,5,33,39114.24,0.09,0.03,'R','F','1995-05-06','1995-07-23','1995-05-16','COLLECT COD','FOB','s. blithely unusual pack'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2978,1671,13,6,4,6290.68,0.08,0.04,'N','O','1995-07-06','1995-07-31','1995-07-19','COLLECT COD','AIR','ffily unusual'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2979,81,57,1,8,7848.64,0.00,0.08,'N','O','1996-06-18','1996-05-21','1996-07-06','COLLECT COD','REG AIR','st blithely; blithely regular gifts dazz'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2979,107,8,2,47,47333.70,0.05,0.00,'N','O','1996-03-25','1996-05-13','1996-04-04','TAKE BACK RETURN','SHIP','iously unusual dependencies wake across'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2979,1879,9,3,35,62330.45,0.04,0.03,'N','O','1996-05-25','1996-06-11','1996-06-24','DELIVER IN PERSON','MAIL','old ideas beneath the blit'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2979,1641,83,4,28,43193.92,0.05,0.08,'N','O','1996-06-04','1996-04-23','1996-06-24','DELIVER IN PERSON','FOB','ing, regular pinto beans. blithel'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2980,364,93,1,2,2528.72,0.09,0.03,'N','O','1996-11-18','1996-10-22','1996-11-27','TAKE BACK RETURN','SHIP','enly across the special, pending packag'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2980,96,72,2,48,47812.32,0.04,0.05,'N','O','1996-09-25','1996-12-09','1996-10-12','NONE','REG AIR','totes. regular pinto'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2980,1321,60,3,27,33002.64,0.08,0.08,'N','O','1996-12-08','1996-12-03','1996-12-14','NONE','REG AIR','theodolites cajole blithely sl'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2980,247,75,4,49,56214.76,0.03,0.02,'N','O','1996-10-04','1996-12-04','1996-10-06','NONE','RAIL','hy packages sleep quic'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2980,1861,62,5,24,42308.64,0.05,0.04,'N','O','1997-01-12','1996-10-27','1997-01-14','NONE','MAIL','elets. fluffily regular in'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2980,1087,58,6,43,42487.44,0.01,0.01,'N','O','1996-12-07','1996-11-10','1997-01-02','COLLECT COD','AIR','sts. slyly regu'); -INSERT 1 +INSERT 0 1 INSERT INTO lineitem VALUES (2981,136,15,1,17,17614.21,0.03,0.05,'N','O','1998-10-17','1998-10-02','1998-10-21','DELIVER IN PERSON','RAIL',', unusual packages x-ray. furious'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (1,'Supplier#000000001','N kD4on9OM Ipw3,gf0JBoQDd7tgrzrddZ',17,'27-918-335-1736',5755.94,'each slyly above the careful'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (2,'Supplier#000000002','89eJ5ksX3ImxJQBvxObC,',5,'15-679-861-2259',4032.68,'slyly bold instructions. idle dependen'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (3,'Supplier#000000003','q1,G3Pj6OjIuUYfUoH18BFTKP5aU9bEV3',1,'11-383-516-1199',4192.40,'blithely silent requests after the express dependencies are sl'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (4,'Supplier#000000004','Bk7ah4CK8SYQTepEmvMkkgMwg',15,'25-843-787-7479',4641.08,'riously even requests above the exp'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (5,'Supplier#000000005','Gcdm2rJRzl5qlTVzc',11,'21-151-690-3663',-283.84,'. slyly regular pinto bea'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (6,'Supplier#000000006','tQxuVm7s7CnK',14,'24-696-997-4969',1365.79,'final accounts. regular dolphins use against the furiously ironic decoys.'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (7,'Supplier#000000007','s,4TicNGB4uO6PaSqNBUq',23,'33-990-965-2201',6820.35,'s unwind silently furiously regular courts. final requests are deposits. requests wake quietly blit'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (8,'Supplier#000000008','9Sq4bBH2FQEmaFOocY45sRTxo6yuoG',17,'27-498-742-3860',7627.85,'al pinto beans. asymptotes haggl'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (9,'Supplier#000000009','1KhUgZegwM3ua7dsYmekYBsK',10,'20-403-398-8662',5302.37,'s. unusual, even requests along the furiously regular pac'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (10,'Supplier#000000010','Saygah3gYWMp72i PY',24,'34-852-489-8585',3891.91,'ing waters. regular requests ar'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (11,'Supplier#000000011','JfwTs,LZrV, M,9C',18,'28-613-996-1505',3393.08,'y ironic packages. slyly ironic accounts affix furiously; ironically unusual excuses across the flu'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (12,'Supplier#000000012','aLIW q0HYd',8,'18-179-925-7181',1432.69,'al packages nag alongside of the bold instructions. express, daring accounts'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (13,'Supplier#000000013','HK71HQyWoqRWOX8GI FpgAifW,2PoH',3,'13-727-620-7813',9107.22,'requests engage regularly instructions. furiously special requests ar'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (14,'Supplier#000000014','EXsnO5pTNj4iZRm',15,'25-656-247-5058',9189.82,'l accounts boost. fluffily bold warhorses wake'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (15,'Supplier#000000015','olXVbNBfVzRqgokr1T,Ie',8,'18-453-357-6394',308.56,'across the furiously regular platelets wake even deposits. quickly express she'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (16,'Supplier#000000016','YjP5C55zHDXL7LalK27zfQnwejdpin4AMpvh',22,'32-822-502-4215',2972.26,'ously express ideas haggle quickly dugouts? fu'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (17,'Supplier#000000017','c2d,ESHRSkK3WYnxpgw6aOqN0q',19,'29-601-884-9219',1687.81,'eep against the furiously bold ideas. fluffily bold packa'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (18,'Supplier#000000018','PGGVE5PWAMwKDZw',16,'26-729-551-1115',7040.82,'accounts snooze slyly furiously bold'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (19,'Supplier#000000019','edZT3es,nBFD8lBXTGeTl',24,'34-278-310-2731',6150.38,'refully final foxes across the dogged theodolites sleep slyly abou'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (20,'Supplier#000000020','iybAE,RmTymrZVYaFZva2SH,j',3,'13-715-945-6730',530.82,'n, ironic ideas would nag blithely about the slyly regular accounts. silent, expr'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (21,'Supplier#000000021','81CavellcrJ0PQ3CPBID0Z0JwyJm0ka5igEs',2,'12-253-590-5816',9365.80,'d. instructions integrate sometimes slyly pending instructions. accounts nag among the'); -INSERT 1 +INSERT 0 1 INSERT INTO supplier VALUES (22,'Supplier#000000022','okiiQFk 8lm6EVX6Q0,bEcO',4,'14-144-830-2814',-966.20,'ironically among the deposits. closely expre'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (1,370,'O',172799.49,'1996-01-02','5-LOW','Clerk#000000951',0,'nstructions sleep furiously among'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (2,781,'O',38426.09,'1996-12-01','1-URGENT','Clerk#000000880',0,'foxes. pending accounts at the pending, silent asymptot'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (3,1234,'F',205654.30,'1993-10-14','5-LOW','Clerk#000000955',0,'sly final accounts boost. carefully regular ideas cajole carefully. depos'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (4,1369,'O',56000.91,'1995-10-11','5-LOW','Clerk#000000124',0,'sits. slyly regular warthogs cajole. regular, regular theodolites acro'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (5,445,'F',105367.67,'1994-07-30','5-LOW','Clerk#000000925',0,'quickly. bold deposits sleep slyly. packages use slyly'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (6,557,'F',45523.10,'1992-02-21','4-NOT SPECIFIED','Clerk#000000058',0,'ggle. special, final requests are against the furiously specia'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (7,392,'O',271885.66,'1996-01-10','2-HIGH','Clerk#000000470',0,'ly special requests'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (32,1301,'O',198665.57,'1995-07-16','2-HIGH','Clerk#000000616',0,'ise blithely bold, regular requests. quickly unusual dep'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (33,670,'F',146567.24,'1993-10-27','3-MEDIUM','Clerk#000000409',0,'uriously. furiously final request'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (34,611,'O',73315.48,'1998-07-21','3-MEDIUM','Clerk#000000223',0,'ly final packages. fluffily final deposits wake blithely ideas. spe'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (35,1276,'O',194641.93,'1995-10-23','4-NOT SPECIFIED','Clerk#000000259',0,'zzle. carefully enticing deposits nag furio'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (36,1153,'O',42011.04,'1995-11-03','1-URGENT','Clerk#000000358',0,'quick packages are blithely. slyly silent accounts wake qu'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (37,862,'F',131896.49,'1992-06-03','3-MEDIUM','Clerk#000000456',0,'kly regular pinto beans. carefully unusual waters cajole never'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (38,1249,'O',71553.08,'1996-08-21','4-NOT SPECIFIED','Clerk#000000604',0,'haggle blithely. furiously express ideas haggle blithely furiously regular re'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (39,818,'O',326565.37,'1996-09-20','3-MEDIUM','Clerk#000000659',0,'ole express, ironic requests: ir'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (64,322,'F',35831.73,'1994-07-16','3-MEDIUM','Clerk#000000661',0,'wake fluffily. sometimes ironic pinto beans about the dolphin'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (65,163,'P',95469.44,'1995-03-18','1-URGENT','Clerk#000000632',0,'ular requests are blithely pending orbits-- even requests against the deposit'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (66,1292,'F',104190.66,'1994-01-20','5-LOW','Clerk#000000743',0,'y pending requests integrate'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (67,568,'O',182481.16,'1996-12-19','4-NOT SPECIFIED','Clerk#000000547',0,'symptotes haggle slyly around the furiously iron'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (68,286,'O',301968.79,'1998-04-18','3-MEDIUM','Clerk#000000440',0,'pinto beans sleep carefully. blithely ironic deposits haggle furiously acro'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (69,845,'F',204110.73,'1994-06-04','4-NOT SPECIFIED','Clerk#000000330',0,'depths atop the slyly thin deposits detect among the furiously silent accou'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (70,644,'F',125705.32,'1993-12-18','5-LOW','Clerk#000000322',0,'carefully ironic request'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (71,34,'O',260603.38,'1998-01-24','4-NOT SPECIFIED','Clerk#000000271',0,'express deposits along the blithely regul'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (96,1078,'F',64364.30,'1994-04-17','2-HIGH','Clerk#000000395',0,'oost furiously. pinto'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (97,211,'F',100572.55,'1993-01-29','3-MEDIUM','Clerk#000000547',0,'hang blithely along the regular accounts. furiously even ideas after the'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (98,1045,'F',71721.40,'1994-09-25','1-URGENT','Clerk#000000448',0,'c asymptotes. quickly regular packages should have to nag re'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (99,890,'F',108594.87,'1994-03-13','4-NOT SPECIFIED','Clerk#000000973',0,'e carefully ironic packages. pending'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (100,1471,'O',198978.27,'1998-02-28','4-NOT SPECIFIED','Clerk#000000577',0,'heodolites detect slyly alongside of the ent'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (101,280,'O',118448.39,'1996-03-17','3-MEDIUM','Clerk#000000419',0,'ding accounts above the slyly final asymptote'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (102,8,'O',184806.58,'1997-05-09','2-HIGH','Clerk#000000596',0,'slyly according to the asymptotes. carefully final packages integrate furious'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (103,292,'O',118745.16,'1996-06-20','4-NOT SPECIFIED','Clerk#000000090',0,'ges. carefully unusual instructions haggle quickly regular f'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (128,740,'F',34997.04,'1992-06-15','1-URGENT','Clerk#000000385',0,'ns integrate fluffily. ironic asymptotes after the regular excuses nag around'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (129,712,'F',254281.41,'1992-11-19','5-LOW','Clerk#000000859',0,'ing tithes. carefully pending deposits boost about the silently express'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (130,370,'F',140213.54,'1992-05-08','2-HIGH','Clerk#000000036',0,'le slyly unusual, regular packages? express deposits det'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (131,928,'F',140726.47,'1994-06-08','3-MEDIUM','Clerk#000000625',0,'after the fluffily special foxes integrate s'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (132,265,'F',133485.89,'1993-06-11','3-MEDIUM','Clerk#000000488',0,'sits are daringly accounts. carefully regular foxes sleep slyly about the'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (133,440,'O',95971.06,'1997-11-29','1-URGENT','Clerk#000000738',0,'usly final asymptotes'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (134,62,'F',208201.46,'1992-05-01','4-NOT SPECIFIED','Clerk#000000711',0,'lar theodolites boos'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (135,605,'O',230472.84,'1995-10-21','4-NOT SPECIFIED','Clerk#000000804',0,'l platelets use according t'); -INSERT 1 +INSERT 0 1 INSERT INTO orders VALUES (160,826,'O',114742.32,'1996-12-19','4-NOT SPECIFIED','Clerk#000000342',0,'thely special sauternes wake slyly of t'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (1,'Customer#000000001','IVhzIApeRb ot,c,E',15,'25-989-741-2988',711.56,'BUILDING','to the even, regular platelets. regular, ironic epitaphs nag e'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (2,'Customer#000000002','XSTf4,NCwDVaWNe6tEgvwfmRchLXak',13,'23-768-687-3665',121.65,'AUTOMOBILE','l accounts. blithely ironic theodolites integrate boldly: caref'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (3,'Customer#000000003','MG9kdTD2WBHm',1,'11-719-748-3364',7498.12,'AUTOMOBILE','deposits eat slyly ironic, even instructions. express foxes detect slyly. blithely even accounts abov'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (4,'Customer#000000004','XxVSJsLAGtn',4,'14-128-190-5944',2866.83,'MACHINERY','requests. final, regular ideas sleep final accou'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (5,'Customer#000000005','KvpyuHCplrB84WgAiGV6sYpZq7Tj',3,'13-750-942-6364',794.47,'HOUSEHOLD','n accounts will have to unwind. foxes cajole accor'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (6,'Customer#000000006','sKZz0CsnMD7mp4Xd0YrBvx,LREYKUWAh yVn',20,'30-114-968-4951',7638.57,'AUTOMOBILE','tions. even deposits boost according to the slyly bold packages. final accounts cajole requests. furious'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (7,'Customer#000000007','TcGe5gaZNgVePxU5kRrvXBfkasDTea',18,'28-190-982-9759',9561.95,'AUTOMOBILE','ainst the ironic, express theodolites. express, even pinto beans among the exp'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (8,'Customer#000000008','I0B10bB0AymmC, 0PrRYBCP1yGJ8xcBPmWhl5',17,'27-147-574-9335',6819.74,'BUILDING','among the slyly regular theodolites kindle blithely courts. carefully even theodolites haggle slyly along the ide'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (9,'Customer#000000009','xKiAFTjUsCuxfeleNqefumTrjS',8,'18-338-906-3675',8324.07,'FURNITURE','r theodolites according to the requests wake thinly excuses: pending requests haggle furiousl'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (10,'Customer#000000010','6LrEaV6KR6PLVcgl2ArL Q3rqzLzcT1 v2',5,'15-741-346-9870',2753.54,'HOUSEHOLD','es regular deposits haggle. fur'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (11,'Customer#000000011','PkWS 3HlXqwTuzrKg633BEi',23,'33-464-151-3439',-272.60,'BUILDING','ckages. requests sleep slyly. quickly even pinto beans promise above the slyly regular pinto beans.'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (12,'Customer#000000012','9PWKuhzT4Zr1Q',13,'23-791-276-1263',3396.49,'HOUSEHOLD','to the carefully final braids. blithely regular requests nag. ironic theodolites boost quickly along'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (13,'Customer#000000013','nsXQu0oVjD7PM659uC3SRSp',3,'13-761-547-5974',3857.34,'BUILDING','ounts sleep carefully after the close frays. carefully bold notornis use ironic requests. blithely'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (14,'Customer#000000014','KXkletMlL2JQEA',1,'11-845-129-3851',5266.30,'FURNITURE',', ironic packages across the unus'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (15,'Customer#000000015','YtWggXoOLdwdo7b0y,BZaGUQMLJMX1Y,EC,6Dn',23,'33-687-542-7601',2788.52,'HOUSEHOLD','platelets. regular deposits detect asymptotes. blithely unusual packages nag slyly at the fluf'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (16,'Customer#000000016','cYiaeMLZSMAOQ2 d0W,',10,'20-781-609-3107',4681.03,'FURNITURE','kly silent courts. thinly regular theodolites sleep fluffily after'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (17,'Customer#000000017','izrh 6jdqtp2eqdtbkswDD8SG4SzXruMfIXyR7',2,'12-970-682-3487',6.34,'AUTOMOBILE','packages wake! blithely even pint'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (18,'Customer#000000018','3txGO AiuFux3zT0Z9NYaFRnZt',6,'16-155-215-1315',5494.43,'BUILDING','s sleep. carefully even instructions nag furiously alongside of t'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (19,'Customer#000000019','uc,3bHIx84H,wdrmLOjVsiqXCq2tr',18,'28-396-526-5053',8914.71,'HOUSEHOLD','nag. furiously careful packages are slyly at the accounts. furiously regular in'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (20,'Customer#000000020','JrPk8Pqplj4Ne',22,'32-957-234-8742',7603.40,'FURNITURE','g alongside of the special excuses-- fluffily enticing packages wake'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (21,'Customer#000000021','XYmVpr9yAHDEn',8,'18-902-614-8344',1428.25,'MACHINERY','quickly final accounts integrate blithely furiously u'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (22,'Customer#000000022','QI6p41,FNs5k7RZoCCVPUTkUdYpB',3,'13-806-545-9701',591.98,'MACHINERY','s nod furiously above the furiously ironic ideas.'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (23,'Customer#000000023','OdY W13N7Be3OC5MpgfmcYss0Wn6TKT',3,'13-312-472-8245',3332.02,'HOUSEHOLD','deposits. special deposits cajole slyly. fluffily special deposits about the furiously'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (24,'Customer#000000024','HXAFgIAyjxtdqwimt13Y3OZO 4xeLe7U8PqG',13,'23-127-851-8031',9255.67,'MACHINERY','into beans. fluffily final ideas haggle fluffily'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (25,'Customer#000000025','Hp8GyFQgGHFYSilH5tBfe',12,'22-603-468-3533',7133.70,'FURNITURE','y. accounts sleep ruthlessly according to the regular theodolites. unusual instructions sleep. ironic, final'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (26,'Customer#000000026','8ljrc5ZeMl7UciP',22,'32-363-455-4837',5182.05,'AUTOMOBILE','c requests use furiously ironic requests. slyly ironic dependencies us'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (27,'Customer#000000027','IS8GIyxpBrLpMT0u7',3,'13-137-193-2709',5679.84,'BUILDING','about the carefully ironic pinto beans. accoun'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (28,'Customer#000000028','iVyg0daQ,Tha8x2WPWA9m2529m',8,'18-774-241-1462',1007.18,'FURNITURE','along the regular deposits. furiously final pac'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (29,'Customer#000000029','sJ5adtfyAkCK63df2,vF25zyQMVYE34uh',0,'10-773-203-7342',7618.27,'FURNITURE','its after the carefully final platelets x-ray against'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (30,'Customer#000000030','nJDsELGAavU63Jl0c5NKsKfL8rIJQQkQnYL2QJY',1,'11-764-165-5076',9321.01,'BUILDING','lithely final requests. furiously unusual account'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (31,'Customer#000000031','LUACbO0viaAv6eXOAebryDB xjVst',23,'33-197-837-7094',5236.89,'HOUSEHOLD','s use among the blithely pending depo'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (32,'Customer#000000032','jD2xZzi UmId,DCtNBLXKj9q0Tlp2iQ6ZcO3J',15,'25-430-914-2194',3471.53,'BUILDING','cial ideas. final, furious requests across the e'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (33,'Customer#000000033','qFSlMuLucBmx9xnn5ib2csWUweg D',17,'27-375-391-1280',-78.56,'AUTOMOBILE','s. slyly regular accounts are furiously. carefully pending requests'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (34,'Customer#000000034','Q6G9wZ6dnczmtOx509xgE,M2KV',15,'25-344-968-5422',8589.70,'HOUSEHOLD','nder against the even, pending accounts. even'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (35,'Customer#000000035','TEjWGE4nBzJL2',17,'27-566-888-7431',1228.24,'HOUSEHOLD','requests. special, express requests nag slyly furiousl'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (36,'Customer#000000036','3TvCzjuPzpJ0,DdJ8kW5U',21,'31-704-669-5769',4987.27,'BUILDING','haggle. enticing, quiet platelets grow quickly bold sheaves. carefully regular acc'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (37,'Customer#000000037','7EV4Pwh,3SboctTWt',8,'18-385-235-7162',-917.75,'FURNITURE','ilent packages are carefully among the deposits. furiousl'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (38,'Customer#000000038','a5Ee5e9568R8RLP 2ap7',12,'22-306-880-7212',6345.11,'HOUSEHOLD','lar excuses. closely even asymptotes cajole blithely excuses. carefully silent pinto beans sleep carefully fin'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (39,'Customer#000000039','nnbRg,Pvy33dfkorYE FdeZ60',2,'12-387-467-6509',6264.31,'AUTOMOBILE','tions. slyly silent excuses slee'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (40,'Customer#000000040','gOnGWAyhSV1ofv',3,'13-652-915-8939',1335.30,'BUILDING','rges impress after the slyly ironic courts. foxes are. blithely'); -INSERT 1 +INSERT 0 1 INSERT INTO customer VALUES (41,'Customer#000000041','IM9mzmyoxeBmvNw8lA7G3Ydska2nkZF',10,'20-917-711-4011',270.95,'HOUSEHOLD','ly regular accounts hang bold, silent packages. unusual foxes haggle slyly above the special, final depo'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (0,'ALGERIA',0,'haggle. carefully final deposits detect slyly agai'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (1,'ARGENTINA',1,'al foxes promise slyly according to the regular accounts. bold requests alon'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (2,'BRAZIL',1,'y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (3,'CANADA',1,'eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (4,'EGYPT',4,'y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (5,'ETHIOPIA',0,'ven packages wake quickly. regu'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (6,'FRANCE',3,'refully final requests. regular, ironi'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (7,'GERMANY',3,'l platelets. regular accounts x-ray: unusual, regular acco'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (8,'INDIA',2,'ss excuses cajole slyly across the packages. deposits print aroun'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (9,'INDONESIA',2,'slyly express asymptotes. regular deposits haggle slyly. carefully ironic hockey players sleep blithely. carefull'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (10,'IRAN',4,'efully alongside of the slyly final dependencies.'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (11,'IRAQ',4,'nic deposits boost atop the quickly final requests? quickly regula'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (12,'JAPAN',2,'ously. final, express gifts cajole a'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (13,'JORDAN',4,'ic deposits are blithely about the carefully regular pa'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (14,'KENYA',0,'pending excuses haggle furiously deposits. pending, express pinto beans wake fluffily past t'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (15,'MOROCCO',0,'rns. blithely bold courts among the closely regular packages use furiously bold platelets?'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (16,'MOZAMBIQUE',0,'s. ironic, unusual asymptotes wake blithely r'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (17,'PERU',1,'platelets. blithely pending dependencies use fluffily across the even pinto beans. carefully silent accoun'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (18,'CHINA',2,'c dependencies. furiously express notornis sleep slyly regular accounts. ideas sleep. depos'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (19,'ROMANIA',3,'ular asymptotes are about the furious multipliers. express dependencies nag above the ironically ironic account'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (20,'SAUDI ARABIA',4,'ts. silent requests haggle. closely express packages sleep across the blithely'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (21,'VIETNAM',2,'hely enticingly express accounts. even, final'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (22,'RUSSIA',3,'requests against the platelets use never according to the quickly regular pint'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (23,'UNITED KINGDOM',3,'eans boost carefully special requests. accounts are. carefull'); -INSERT 1 +INSERT 0 1 INSERT INTO nation VALUES (24,'UNITED STATES',1,'y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (1,2,3325,771.64,', even theodolites. regular, final theodolites eat after the carefully pending foxes. furiously regular deposits sleep slyly. carefully bold realms above the ironic dependencies haggle careful'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (1,27,8076,993.49,'ven ideas. quickly even packages print. pending multipliers must have to are fluff'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (1,52,3956,337.09,'after the fluffily ironic deposits? blithely special dependencies integrate furiously even excuses. blithely silent theodolites could have to haggle pending, express requests; fu'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (1,77,4069,357.84,'al, regular dependencies serve carefully after the quickly final pinto beans. furiously even deposits sleep quickly final, silent pinto beans. fluffily reg'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (2,3,8895,378.49,'nic accounts. final accounts sleep furiously about the ironic, bold packages. regular, regular accounts'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (2,28,4969,915.27,'ptotes. quickly pending dependencies integrate furiously. fluffily ironic ideas impress blithely above the express accounts. furiously even epitaphs need to wak'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (2,53,8539,438.37,'blithely bold ideas. furiously stealthy packages sleep fluffily. slyly special deposits snooze furiously carefully regular accounts. regular deposits according to the accounts nag carefully slyl'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (2,78,3025,306.39,'olites. deposits wake carefully. even, express requests cajole. carefully regular ex'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (3,4,4651,920.92,'ilent foxes affix furiously quickly unusual requests. even packages across the carefully even theodolites nag above the sp'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (3,29,4093,498.13,'ending dependencies haggle fluffily. regular deposits boost quickly carefully regular requests. deposits affix furiously around the pinto beans. ironic, unusual platelets across the p'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (3,54,3917,645.40,'of the blithely regular theodolites. final theodolites haggle blithely carefully unusual ideas. blithely even f'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (3,79,9942,191.92,'unusual, ironic foxes according to the ideas detect furiously alongside of the even, express requests. blithely regular the'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (4,5,1339,113.97,'carefully unusual ideas. packages use slyly. blithely final pinto beans cajole along the furiously express requests. regular orbits haggle carefully. care'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (4,30,6377,591.18,'ly final courts haggle carefully regular accounts. carefully regular accounts could integrate slyly. slyly express packages about the accounts wake slyly'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (4,55,2694,51.37,'g, regular deposits: quick instructions run across the carefully ironic theodolites-- final dependencies haggle into the dependencies. f'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (4,80,2480,444.37,'requests sleep quickly regular accounts. theodolites detect. carefully final depths w'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (5,6,3735,255.88,'arefully even requests. ironic requests cajole carefully even dolphin'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (5,31,9653,50.52,'y stealthy deposits. furiously final pinto beans wake furiou'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (5,56,1329,219.83,'iously regular deposits wake deposits. pending pinto beans promise ironic dependencies. even, regular pinto beans integrate'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (5,81,6925,537.98,'sits. quickly fluffy packages wake quickly beyond the blithely regular requests. pending requests cajole among the final pinto beans. carefully busy theodolites affix quickly stealthily'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (6,7,8851,130.72,'usly final packages. slyly ironic accounts poach across the even, sly requests. carefully pending request'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (6,32,1627,424.25,'quick packages. ironic deposits print. furiously silent platelets across the carefully final requests are slyly along the furiously even instructi'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (6,57,3336,642.13,'final instructions. courts wake packages. blithely unusual realms along the multipliers nag'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (6,82,6451,175.32,'accounts alongside of the slyly even accounts wake carefully final instructions-- ruthless platelets wake carefully ideas. even deposits are quickly final,'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (7,8,7454,763.98,'y express tithes haggle furiously even foxes. furiously ironic deposits sleep toward the furiously unusual'); -INSERT 1 +INSERT 0 1 INSERT INTO partsupp VALUES (7,33,2770,149.66,'hould have to nag after the blithely final asymptotes. fluffily spe'); -INSERT 1 +INSERT 0 1 diff --git a/src/test/isolation2/expected/deadlock_under_entry_db_singleton.out b/src/test/isolation2/expected/deadlock_under_entry_db_singleton.out index d097b84db6e..8fa6bb422b9 100644 --- a/src/test/isolation2/expected/deadlock_under_entry_db_singleton.out +++ b/src/test/isolation2/expected/deadlock_under_entry_db_singleton.out @@ -22,9 +22,9 @@ -- the table to ao table here. CREATE TABLE deadlock_entry_db_singleton_table (c int, d int) WITH (appendonly=true); -CREATE +CREATE TABLE INSERT INTO deadlock_entry_db_singleton_table select i, i+1 from generate_series(1,10) i; -INSERT 10 +INSERT 0 10 -- Function that needs ExclusiveLock on a table. Use a non-SQL -- language for this function so that parser cannot understand its @@ -32,7 +32,7 @@ INSERT 10 -- of the function. If the lock is acquired during plan generation of -- the calling SQL statement, we don't get the deadlock. CREATE FUNCTION function_volatile(x int) RETURNS int AS $$ /*in func*/ BEGIN /*in func*/ UPDATE deadlock_entry_db_singleton_table SET d = d + 1 WHERE c = $1; /*in func*/ RETURN $1 + 1; /*in func*/ END $$ /*in func*/ LANGUAGE plpgsql VOLATILE MODIFIES SQL DATA; -CREATE +CREATE FUNCTION -- inject fault on QD select gp_inject_fault('transaction_start_under_entry_db_singleton', 'reset', 1); diff --git a/src/test/isolation2/expected/disable_autovacuum.out b/src/test/isolation2/expected/disable_autovacuum.out index 409e40bbd2b..382fe3cac90 100644 --- a/src/test/isolation2/expected/disable_autovacuum.out +++ b/src/test/isolation2/expected/disable_autovacuum.out @@ -1,5 +1,5 @@ alter system set autovacuum = off; -ALTER +ALTER SYSTEM select gp_segment_id, pg_reload_conf() from gp_id union select gp_segment_id, pg_reload_conf() from gp_dist_random('gp_id'); gp_segment_id | pg_reload_conf ---------------+---------------- diff --git a/src/test/isolation2/expected/distributed_transactions.out b/src/test/isolation2/expected/distributed_transactions.out index 1135a126d13..d2278385273 100644 --- a/src/test/isolation2/expected/distributed_transactions.out +++ b/src/test/isolation2/expected/distributed_transactions.out @@ -9,7 +9,7 @@ SELECT gp_inject_fault( 'abort_after_procarray_end', 'error', 1); BEGIN; BEGIN CREATE TABLE test_xact_abort_failure(a int); -CREATE +CREATE TABLE ABORT; ERROR: fault triggered, fault name:'abort_after_procarray_end' fault type:'error' SELECT gp_inject_fault( 'abort_after_procarray_end', 'reset', 1); @@ -27,9 +27,9 @@ SELECT gp_inject_fault( 'abort_after_procarray_end', 'error', dbid) from gp_segm BEGIN; BEGIN CREATE TABLE test_xact_abort_failure(a int); -CREATE +CREATE TABLE ABORT; -ABORT +ROLLBACK SELECT gp_inject_fault( 'abort_after_procarray_end', 'reset', dbid) from gp_segment_configuration where role = 'p' and content = 0; gp_inject_fault ----------------- @@ -45,7 +45,7 @@ SELECT gp_inject_fault( 'abort_after_procarray_end', 'error', dbid) from gp_segm 0U: BEGIN; BEGIN 0U: CREATE TABLE test_xact_abort_failure(a int); -CREATE +CREATE TABLE 0U: ABORT; ERROR: fault triggered, fault name:'abort_after_procarray_end' fault type:'error' SELECT gp_inject_fault( 'abort_after_procarray_end', 'reset', dbid) from gp_segment_configuration where role = 'p' and content = 0; diff --git a/src/test/isolation2/expected/distributedlog-bug.out b/src/test/isolation2/expected/distributedlog-bug.out index 829c96b3dc5..6b716726a92 100644 --- a/src/test/isolation2/expected/distributedlog-bug.out +++ b/src/test/isolation2/expected/distributedlog-bug.out @@ -6,7 +6,7 @@ -- in-progress. -- CREATE TABLE distributed_snapshot_test ( id INTEGER, f FLOAT); -CREATE +CREATE TABLE 1: BEGIN; BEGIN @@ -27,7 +27,7 @@ SET -- Drop table in a transaction 1: drop table distributed_snapshot_test; -DROP +DROP TABLE 3: vacuum pg_class; VACUUM diff --git a/src/test/isolation2/expected/drop_rename.out b/src/test/isolation2/expected/drop_rename.out index af0916cb293..96cdb5bb428 100644 --- a/src/test/isolation2/expected/drop_rename.out +++ b/src/test/isolation2/expected/drop_rename.out @@ -5,17 +5,17 @@ -- relation does not exist error. 1:drop table if exists t1; -DROP +DROP TABLE 1:drop table if exists newt1; -DROP +DROP TABLE 1:create table t1 (a int, b text) distributed by (a); -CREATE +CREATE TABLE 1:insert into t1 select i, 'abc '||i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 1:begin; BEGIN 1:alter table t1 rename to newt1; -ALTER +ALTER TABLE 1:analyze newt1; ANALYZE -- this drop should block to acquire AccessExclusive lock on t1's OID. @@ -32,17 +32,17 @@ ERROR: table "t1" does not exist -- DROP is executed concurrently with ALTER RENAME but not ANALYZE. 1:drop table if exists t2; -DROP +DROP TABLE 1:drop table if exists newt2; -DROP +DROP TABLE 1:create table t2 (a int, b text) distributed by (a); -CREATE +CREATE TABLE 1:insert into t2 select i, 'pqr '||i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 1:begin; BEGIN 1:alter table t2 rename to newt2; -ALTER +ALTER TABLE 2&:drop table t2; 1:commit; COMMIT @@ -57,20 +57,20 @@ ERROR: table "t2" does not exist -- The same, but with DROP IF EXISTS. (We used to have a bug, where the DROP -- command found and drop the relation in the segments, but not in master.) 1:drop table if exists t3; -DROP +DROP TABLE 1:create table t3 (a int, b text) distributed by (a); -CREATE +CREATE TABLE 1:insert into t3 select i, '123 '||i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 1:begin; BEGIN 1:alter table t3 rename to t3_new; -ALTER +ALTER TABLE 2&:drop table if exists t3; 1:commit; COMMIT 2<: <... completed> -DROP +DROP TABLE 2:select count(*) from t3; ERROR: relation "t3" does not exist LINE 1: select count(*) from t3; @@ -89,15 +89,15 @@ LINE 1: select count(*) from t3; (3 rows) 1:drop table if exists t3; -DROP +DROP TABLE 1:create table t3 (a int, b text) distributed by (a); -CREATE +CREATE TABLE 1:insert into t3 select i, '123 '||i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 1:begin; BEGIN 1:drop table t3; -DROP +DROP TABLE 2&:drop table if exists t3; 3&:drop table t3; 1:commit; @@ -105,7 +105,7 @@ COMMIT 3<: <... completed> ERROR: table "t3" does not exist 2<: <... completed> -DROP +DROP TABLE 2:select count(*) from t3; ERROR: relation "t3" does not exist LINE 1: select count(*) from t3; diff --git a/src/test/isolation2/expected/enable_autovacuum.out b/src/test/isolation2/expected/enable_autovacuum.out index 90c866cb6dc..c8a4aec3dc4 100644 --- a/src/test/isolation2/expected/enable_autovacuum.out +++ b/src/test/isolation2/expected/enable_autovacuum.out @@ -1,5 +1,5 @@ alter system set autovacuum = on; -ALTER +ALTER SYSTEM select gp_segment_id, pg_reload_conf() from gp_id union select gp_segment_id, pg_reload_conf() from gp_dist_random('gp_id'); gp_segment_id | pg_reload_conf ---------------+---------------- diff --git a/src/test/isolation2/expected/execute_on_utilitymode.out b/src/test/isolation2/expected/execute_on_utilitymode.out index 84c397c7776..8d6d24d823f 100644 --- a/src/test/isolation2/expected/execute_on_utilitymode.out +++ b/src/test/isolation2/expected/execute_on_utilitymode.out @@ -4,17 +4,17 @@ -- First, create test functions with different EXECUTE ON options -create function srf_on_master () returns setof text as $$ begin /* in func */ return next 'foo ' || current_setting('gp_contentid'); /* in func */ return next 'bar ' || current_setting('gp_contentid'); /* in func */ end; /* in func */ $$ language plpgsql EXECUTE ON COORDINATOR; -CREATE +create function srf_on_coordinator () returns setof text as $$ begin /* in func */ return next 'foo ' || current_setting('gp_contentid'); /* in func */ return next 'bar ' || current_setting('gp_contentid'); /* in func */ end; /* in func */ $$ language plpgsql EXECUTE ON COORDINATOR; +CREATE FUNCTION create function srf_on_all_segments () returns setof text as $$ begin /* in func */ return next 'foo ' || current_setting('gp_contentid'); /* in func */ return next 'bar ' || current_setting('gp_contentid'); /* in func */ end; /* in func */ $$ language plpgsql EXECUTE ON ALL SEGMENTS; -CREATE +CREATE FUNCTION create function srf_on_any () returns setof text as $$ begin /* in func */ return next 'foo ' || current_setting('gp_contentid'); /* in func */ return next 'bar ' || current_setting('gp_contentid'); /* in func */ end; /* in func */ $$ language plpgsql EXECUTE ON ANY IMMUTABLE; -CREATE +CREATE FUNCTION create function srf_on_initplan () returns setof text as $$ begin /* in func */ return next 'foo ' || current_setting('gp_contentid'); /* in func */ return next 'bar ' || current_setting('gp_contentid'); /* in func */ end; /* in func */ $$ language plpgsql EXECUTE ON INITPLAN; -CREATE +CREATE FUNCTION -- Now try executing them in utility mode, in the master node and on a -- segment. The expected behavior is that the function runs on the node @@ -23,9 +23,9 @@ CREATE -- Join with a table, to give the planner something more exciting to do -- than just create the FunctionScan plan. create table fewrows (t text) distributed by (t); -CREATE +CREATE TABLE insert into fewrows select g from generate_series(1, 10) g; -INSERT 10 +INSERT 0 10 -1U: select * from srf_on_master() as srf (x) left join fewrows on x = t; x | t diff --git a/src/test/isolation2/expected/export_distributed_snapshot.out b/src/test/isolation2/expected/export_distributed_snapshot.out index d9270f20e30..fcd8dd58442 100644 --- a/src/test/isolation2/expected/export_distributed_snapshot.out +++ b/src/test/isolation2/expected/export_distributed_snapshot.out @@ -10,31 +10,31 @@ -- start_ignore DROP FUNCTION IF EXISTS corrupt_snapshot_file(text, text); -DROP +DROP FUNCTION DROP FUNCTION IF EXISTS snapshot_file_ds_fields_exist(text); -DROP -DROP LANGUAGE IF EXISTS plpython3u; -DROP +DROP FUNCTION +DROP LANGUAGE IF EXISTS plpython3u cascade; +DROP LANGUAGE DROP TABLE IF EXISTS export_distributed_snapshot_test1; -DROP +DROP TABLE +-- end_ignore CREATE LANGUAGE plpython3u; -CREATE --- end_ignore +CREATE LANGUAGE -- Corrupt field entry for given snapshot file CREATE OR REPLACE FUNCTION corrupt_snapshot_file(token text, field text) RETURNS integer as $$ import os content = bytearray() query = "select (select datadir from gp_segment_configuration where role='p' and content=-1) || '/pg_snapshots/' as path" rv = plpy.execute(query) abs_path = rv[0]['path'] snapshot_file = abs_path + token if not os.path.isfile(snapshot_file): plpy.info('skipping non-existent file %s' % (snapshot_file)) else: plpy.info('corrupting file %s for field %s' % (snapshot_file, field)) with open(snapshot_file , "rb+") as f: for line in f: l = line.decode() id = l.split(":")[0] if field == id: corrupt = l[:-2] + '*' + l[len(l)-1:] content.extend(corrupt.encode()) else: content.extend(line) f.seek(0) f.truncate f.write(content) f.close() return 0 $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION -- Determine if field exists for given snapshot file CREATE OR REPLACE FUNCTION snapshot_file_ds_fields_exist(token text) RETURNS boolean as $$ import os content = bytearray() query = "select (select datadir from gp_segment_configuration where role='p' and content=-1) || '/pg_snapshots/' as path" rv = plpy.execute(query) abs_path = rv[0]['path'] snapshot_file = abs_path + token if not os.path.isfile(snapshot_file): plpy.info('snapshot file %s does not exist' % (snapshot_file)) return -1 else: plpy.info('checking file %s for ds fields' % (snapshot_file)) with open(snapshot_file , "rb+") as f: for line in f: l = line.decode() if "ds" in l: return True return False $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION -- INSERT test CREATE TABLE export_distributed_snapshot_test1 (a int); -CREATE +CREATE TABLE INSERT INTO export_distributed_snapshot_test1 values(1); -INSERT 1 +INSERT 0 1 1: BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN @@ -45,7 +45,7 @@ BEGIN (1 row) INSERT INTO export_distributed_snapshot_test1 values(2); -INSERT 1 +INSERT 0 1 SELECT * FROM export_distributed_snapshot_test1; a --- @@ -81,9 +81,9 @@ COMMIT -- DELETE test CREATE TABLE export_distributed_snapshot_test2 (a int); -CREATE +CREATE TABLE INSERT INTO export_distributed_snapshot_test2 SELECT a FROM generate_series(1,3) a; -INSERT 3 +INSERT 0 3 1: BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN @@ -134,9 +134,9 @@ COMMIT -- UPDATE test CREATE TABLE export_distributed_snapshot_test3 (a int); -CREATE +CREATE TABLE INSERT INTO export_distributed_snapshot_test3 SELECT a FROM generate_series(1,5) a; -INSERT 5 +INSERT 0 5 1: BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN @@ -187,7 +187,7 @@ COMMIT -- DROP test CREATE TABLE export_distributed_snapshot_test4 (a int); -CREATE +CREATE TABLE 1: BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN @@ -209,7 +209,7 @@ BEGIN -- Drop table in transaction 2: DROP TABLE export_distributed_snapshot_test4; -DROP +DROP TABLE 2: COMMIT; COMMIT @@ -263,9 +263,9 @@ ERROR: could not import the requested snapshot DETAIL: The source process with PID 651456 is not running anymore. 1: END; -END +COMMIT 2: END; -END +ROLLBACK -- dsxminall @@ -292,9 +292,9 @@ BEGIN SET 1: END; -END +COMMIT 2: END; -END +COMMIT -- dsxmin 1: BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; @@ -320,9 +320,9 @@ BEGIN ERROR: invalid snapshot data in file "pg_snapshots/00000007-00000006-1" 1: END; -END +COMMIT 2: END; -END +ROLLBACK -- Test export snapshot in utility mode does not export distributed snapshot fields @@ -345,7 +345,7 @@ BEGIN -1Uq: ... 1: END; -END +COMMIT -- Test import snapshot in utility mode fails if distributed snapshot fields exist 1: BEGIN; @@ -367,7 +367,7 @@ HINT: export the snapshot in utility mode -1Uq: ... 1: END; -END +COMMIT -- Test export snapshot in utility mode and import snapshot in utility mode succeeds -1U: @db_name postgres: BEGIN; diff --git a/src/test/isolation2/expected/frozen_insert_crash.out b/src/test/isolation2/expected/frozen_insert_crash.out index bf41eebf750..334f2dbf389 100644 --- a/src/test/isolation2/expected/frozen_insert_crash.out +++ b/src/test/isolation2/expected/frozen_insert_crash.out @@ -13,7 +13,7 @@ -- the WAL record responsible for updating it to frozen. -- After crash recovery, the insert will follow regular MVCC and not be seen. 1: create table tab_fi(a int) with (appendoptimized=true) distributed replicated; -CREATE +CREATE TABLE -- switch WAL on seg0 to reduce flakiness 1: select gp_segment_id, pg_switch_wal() is not null from gp_dist_random('gp_id') where gp_segment_id = 0; @@ -100,12 +100,12 @@ RESET RESET 1: drop table tab_fi; -DROP +DROP TABLE -- Case 2. crash after we have flushed the WAL that updates the row to be frozen. -- After crash recovery, the insert should be seen. 1: create table tab_fi(a int) with (appendoptimized=true) distributed replicated; -CREATE +CREATE TABLE -- switch WAL on seg0 to reduce flakiness 1: select gp_segment_id, pg_switch_wal() is not null from gp_dist_random('gp_id') where gp_segment_id = 0; @@ -245,20 +245,22 @@ END (2 rows) -- Same set of tests for bitmap LOV insert. +create extension if not exists pageinspect; +CREATE EXTENSION -- Function to check the bitmap lov content regarding the column 'b' -- which is the table column that we will have bitmap created on. -- Basically, we want to see if "SELECT b FROM pg_bitmapindex.pg_bm_xxx" -- returns the same result in seqscan and indexscan. CREATE OR REPLACE FUNCTION insert_bm_lov_res() RETURNS void AS $$ DECLARE lov_table text; /* in func */ sql text; /* in func */ BEGIN /* in func */ drop table if exists bm_lov_res; /* in func */ create temp table bm_lov_res(b int); /* in func */ SELECT c.relname INTO lov_table /* in func */ FROM bm_metap('tab_fi_idx') b /* in func */ JOIN pg_class c ON b.auxrelid = c.oid; /* in func */ sql := format('INSERT INTO bm_lov_res SELECT b FROM pg_bitmapindex.%I', lov_table); /* in func */ EXECUTE sql; /* in func */ END; /* in func */ $$ LANGUAGE plpgsql; -CREATE +CREATE FUNCTION 1: create table tab_fi(a int, b int) with (appendoptimized=true) distributed replicated; -CREATE +CREATE TABLE 1: create index tab_fi_idx on tab_fi using bitmap(b); -CREATE +CREATE INDEX 1: insert into tab_fi values(1, 1); -INSERT 1 +INSERT 0 1 -- switch WAL on seg0 to reduce flakiness 1: select gp_segment_id, pg_switch_wal() is not null from gp_dist_random('gp_id') where gp_segment_id = 0; gp_segment_id | ?column? @@ -345,16 +347,16 @@ SET (1 row) 0Uq: ... 1: drop table tab_fi; -DROP +DROP TABLE -- case 2: suspend and flush WAL after freezing the tuple 1: create table tab_fi(a int, b int) with (appendoptimized=true) distributed replicated; -CREATE +CREATE TABLE 1: create index tab_fi_idx on tab_fi using bitmap(b); -CREATE +CREATE INDEX 1: insert into tab_fi values(1, 1); -INSERT 1 +INSERT 0 1 -- switch WAL on seg0 to reduce flakiness 1: select gp_segment_id, pg_switch_wal() is not null from gp_dist_random('gp_id') where gp_segment_id = 0; gp_segment_id | ?column? @@ -441,6 +443,50 @@ SET 2 (2 rows) +1: drop extension pageinspect; +DROP EXTENSION + +-- Test for aoseg: suspend the insert into aoseg table before we mark the row frozen. +-- Another session should still be able to choose a different segno. +1: create table tab_aoseg(a int) using ao_row; +CREATE TABLE +1: select gp_inject_fault('insert_aoseg_before_freeze', 'suspend', dbid) from gp_segment_configuration where role = 'p' and content = 0; + gp_inject_fault +----------------- + Success: +(1 row) +1: begin; +BEGIN +1>: insert into tab_aoseg select * from generate_series(1,10); +-- wait until the aoseg record is inserted but not yet frozen +2: select gp_wait_until_triggered_fault('insert_aoseg_before_freeze', 1, dbid) from gp_segment_configuration where role = 'p' and content = 0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) +2: begin; +BEGIN +2>: insert into tab_aoseg select * from generate_series(1,10); +3: select gp_inject_fault('insert_aoseg_before_freeze', 'reset', dbid) from gp_segment_configuration where role = 'p' and content = 0; + gp_inject_fault +----------------- + Success: +(1 row) +1<: <... completed> +INSERT 0 10 +2<: <... completed> +INSERT 0 10 +1: end; +COMMIT +2: end; +COMMIT +3: select segment_id, segno, eof from gp_toolkit.__gp_aoseg('tab_aoseg') where segment_id = 0; + segment_id | segno | eof +------------+-------+----- + 0 | 1 | 88 + 0 | 2 | 88 +(2 rows) + -- validate that we've actually tested desired scan method -- for some reason this disrupts the output of subsequent queries so -- validating at the end here diff --git a/src/test/isolation2/expected/fsync_ao.out b/src/test/isolation2/expected/fsync_ao.out index 4f2a466507a..c4dbaae74ae 100644 --- a/src/test/isolation2/expected/fsync_ao.out +++ b/src/test/isolation2/expected/fsync_ao.out @@ -21,17 +21,17 @@ (exited with code 0) create table fsync_ao(a int, b int) with (appendoptimized = true) distributed by (a); -CREATE +CREATE TABLE create table fsync_co(a int, b int) with (appendoptimized = true, orientation = column) distributed by (a); -CREATE +CREATE TABLE -- no fsync requests should ever be registered for unlogged tables create unlogged table ul_fsync_co(a int, b int, c int) using ao_column distributed by (a); -CREATE +CREATE TABLE insert into fsync_ao select i, i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 insert into fsync_co select i, i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 -- Fault to check that mirror has flushed pending fsync requests. select gp_inject_fault_infinite('restartpoint_guts', 'skip', dbid) from gp_segment_configuration where role = 'm' and content = 0; @@ -66,11 +66,11 @@ select gp_inject_fault_infinite('ao_fsync_counter', 'skip', dbid) from gp_segmen -- Write ao and co data files including aoseg & gp_fastsequence. -- These should be fsync-ed by checkpoint & restartpoint. insert into fsync_ao select i, i from generate_series(1,20)i; -INSERT 20 +INSERT 0 20 insert into fsync_co select i, i from generate_series(1,20)i; -INSERT 20 +INSERT 0 20 insert into ul_fsync_co select i, i, i from generate_series(1,20)i; -INSERT 20 +INSERT 0 20 checkpoint; CHECKPOINT @@ -103,19 +103,19 @@ select gp_inject_fault('ao_fsync_counter', 'status', dbid) from gp_segment_confi 1: begin; BEGIN 1: insert into fsync_ao select i, i from generate_series(1,20)i; -INSERT 20 +INSERT 0 20 1: insert into fsync_co select i, i from generate_series(1,20)i; -INSERT 20 +INSERT 0 20 1: insert into ul_fsync_co select i, i, i from generate_series(1,20)i; -INSERT 20 +INSERT 0 20 insert into fsync_ao select i, i from generate_series(21,40)i; -INSERT 20 +INSERT 0 20 insert into fsync_co select i, i from generate_series(21,40)i; -INSERT 20 +INSERT 0 20 insert into ul_fsync_co select i, i, i from generate_series(1,40)i; -INSERT 40 +INSERT 0 40 1: end; -END +COMMIT -- Generate some invisible tuples in both the tables so as to trigger -- compaction during vacuum. delete from fsync_ao where a > 20; @@ -207,23 +207,23 @@ select gp_inject_fault('ao_fsync_counter', 'status', dbid) from gp_segment_confi update fsync_co set b = -a; UPDATE 70 drop table fsync_co; -DROP +DROP TABLE update ul_fsync_co set c = -a; UPDATE 23 drop table ul_fsync_co; -DROP +DROP TABLE -- Drop but don't commit the transaction. begin; BEGIN update fsync_ao set b = -a; UPDATE 50 drop table fsync_ao; -DROP +DROP TABLE abort; -ABORT +ROLLBACK -- Fsync request for the following insert should not be forgotten. insert into fsync_ao select * from generate_series(41,60)i; -INSERT 20 +INSERT 0 20 checkpoint; CHECKPOINT @@ -244,8 +244,8 @@ select gp_wait_until_triggered_fault('ao_fsync_counter', 13, dbid) from gp_segme select gp_inject_fault('ao_fsync_counter', 'status', dbid) from gp_segment_configuration where content=0 and role='m'; gp_inject_fault ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Success: fault name:'ao_fsync_counter' fault type:'skip' ddl statement:'' database name:'' table name:'' start occurrence:'1' end occurrence:'-1' extra arg:'0' fault injection state:'triggered' num times hit:'13' + (1 row) -- Reset all faults. diff --git a/src/test/isolation2/expected/fts_errors.out b/src/test/isolation2/expected/fts_errors.out index 69cb7a2a38c..fca1ecaf5d8 100644 --- a/src/test/isolation2/expected/fts_errors.out +++ b/src/test/isolation2/expected/fts_errors.out @@ -27,8 +27,8 @@ (exited with code 0) -- Helper function -CREATE or REPLACE FUNCTION wait_until_segments_are_down(num_segs int) RETURNS bool AS $$ declare retries int; /* in func */ begin /* in func */ retries := 120; /* in func */ loop /* in func */ if (select count(*) = num_segs from gp_segment_configuration where status = 'd') then /* in func */ return true; /* in func */ end if; /* in func */ if retries <= 0 then /* in func */ return false; /* in func */ end if; /* in func */ perform pg_sleep(1); /* in func */ retries := retries - 1; /* in func */ end loop; /* in func */ end; /* in func */ $$ language plpgsql; -CREATE +CREATE or REPLACE FUNCTION wait_until_segments_are_down(num_segs int) RETURNS bool AS $$ declare retries int; /* in func */ begin /* in func */ retries := 1200; /* in func */ loop /* in func */ if (select count(*) = num_segs from gp_segment_configuration where status = 'd') then /* in func */ return true; /* in func */ end if; /* in func */ if retries <= 0 then /* in func */ return false; /* in func */ end if; /* in func */ perform pg_sleep(0.1); /* in func */ retries := retries - 1; /* in func */ end loop; /* in func */ end; /* in func */ $$ language plpgsql; +CREATE FUNCTION -- no segment down. select count(*) from gp_segment_configuration where status = 'd'; @@ -38,34 +38,34 @@ select count(*) from gp_segment_configuration where status = 'd'; (1 row) drop table if exists fts_errors_test; -DROP +DROP TABLE create table fts_errors_test(a int); -CREATE +CREATE TABLE 1:BEGIN; BEGIN 1:END; -END +COMMIT 2:BEGIN; BEGIN 2:INSERT INTO fts_errors_test SELECT * FROM generate_series(1,100); -INSERT 100 +INSERT 0 100 3:BEGIN; BEGIN 3:CREATE TEMP TABLE tmp3 (c1 int, c2 int); -CREATE +CREATE TABLE 3:DECLARE c1 CURSOR for select * from tmp3; -DECLARE +DECLARE CURSOR 4:CREATE TEMP TABLE tmp4 (c1 int, c2 int); -CREATE +CREATE TABLE 5:BEGIN; BEGIN 5:CREATE TEMP TABLE tmp5 (c1 int, c2 int); -CREATE +CREATE TABLE 5:SAVEPOINT s1; SAVEPOINT 5:CREATE TEMP TABLE tmp51 (c1 int, c2 int); -CREATE +CREATE TABLE -- probe to make sure when we call gp_request_fts_probe_scan() next -- time below, don't overlap with auto-trigger of FTS scans by FTS @@ -144,7 +144,7 @@ select gp_inject_fault('get_dns_cached_address', 'reset', 1); 1:BEGIN; BEGIN 1:END; -END +COMMIT -- session 2: in transaction, gxid is dispatched to writer gang, cann't -- update cdb_component_dbs, following query should fail -- start_ignore @@ -177,7 +177,7 @@ ERROR: gang was lost due to cluster reconfiguration (cdbgang_async.c:98) 5:ROLLBACK TO SAVEPOINT s1; ERROR: Could not rollback to savepoint (ROLLBACK TO SAVEPOINT s1) 5:END; -END +ROLLBACK 1q: ... 2q: ... 3q: ... diff --git a/src/test/isolation2/expected/fts_errors_1.out b/src/test/isolation2/expected/fts_errors_1.out index 945b669d7f6..6b99fc282c8 100644 --- a/src/test/isolation2/expected/fts_errors_1.out +++ b/src/test/isolation2/expected/fts_errors_1.out @@ -34,7 +34,7 @@ -- Helper function CREATE or REPLACE FUNCTION wait_until_segments_are_down(num_segs int) RETURNS bool AS $$ declare retries int; /* in func */ begin /* in func */ retries := 1200; /* in func */ loop /* in func */ if (select count(*) = num_segs from gp_segment_configuration where status = 'd') then /* in func */ return true; /* in func */ end if; /* in func */ if retries <= 0 then /* in func */ return false; /* in func */ end if; /* in func */ perform pg_sleep(0.1); /* in func */ retries := retries - 1; /* in func */ end loop; /* in func */ end; /* in func */ $$ language plpgsql; -CREATE +CREATE FUNCTION -- no segment down. select count(*) from gp_segment_configuration where status = 'd'; @@ -44,34 +44,34 @@ select count(*) from gp_segment_configuration where status = 'd'; (1 row) drop table if exists fts_errors_test; -DROP +DROP TABLE create table fts_errors_test(a int); -CREATE +CREATE TABLE 1:BEGIN; BEGIN 1:END; -END +COMMIT 2:BEGIN; BEGIN 2:INSERT INTO fts_errors_test SELECT * FROM generate_series(1,100); -INSERT 100 +INSERT 0 100 3:BEGIN; BEGIN 3:CREATE TEMP TABLE tmp3 (c1 int, c2 int); -CREATE +CREATE TABLE 3:DECLARE c1 CURSOR for select * from tmp3; -DECLARE +DECLARE CURSOR 4:CREATE TEMP TABLE tmp4 (c1 int, c2 int); -CREATE +CREATE TABLE 5:BEGIN; BEGIN 5:CREATE TEMP TABLE tmp5 (c1 int, c2 int); -CREATE +CREATE TABLE 5:SAVEPOINT s1; SAVEPOINT 5:CREATE TEMP TABLE tmp51 (c1 int, c2 int); -CREATE +CREATE TABLE -- probe to make sure when we call gp_request_fts_probe_scan() next -- time below, don't overlap with auto-trigger of FTS scans by FTS @@ -130,7 +130,7 @@ select gp_inject_fault('get_dns_cached_address', 'reset', 1); 1:BEGIN; BEGIN 1:END; -END +COMMIT -- session 2: in transaction, gxid is dispatched to writer gang, cann't -- update cdb_component_dbs, following query should fail 2:END; @@ -161,7 +161,7 @@ ERROR: gang was lost due to cluster reconfiguration (cdbgang_async.c:98) 5:ROLLBACK TO SAVEPOINT s1; ERROR: Could not rollback to savepoint (ROLLBACK TO SAVEPOINT s1) 5:END; -END +ROLLBACK 1q: ... 2q: ... 3q: ... diff --git a/src/test/isolation2/expected/fts_segment_reset.out b/src/test/isolation2/expected/fts_segment_reset.out index 141c104f772..2f346219da9 100644 --- a/src/test/isolation2/expected/fts_segment_reset.out +++ b/src/test/isolation2/expected/fts_segment_reset.out @@ -58,7 +58,7 @@ select gp_inject_fault_infinite('postmaster_server_loop_no_sigkill', 'skip', dbi 3:set gp_gang_creation_retry_timer = 10000; SET 3:create table fts_reset_t3(a int); -CREATE +CREATE TABLE 1<: <... completed> ERROR: fault triggered, fault name:'start_prepare' fault type:'panic' @@ -105,7 +105,7 @@ select pg_reload_conf(); -- The only table that should have been created successfully drop table fts_reset_t3; -DROP +DROP TABLE -- In case anything goes wrong, we don't want to affect other tests. So rebalance the cluster anyway. !\retcode gprecoverseg -aF !\retcode gprecoverseg -ar \ No newline at end of file diff --git a/src/test/isolation2/expected/fts_session_reset.out b/src/test/isolation2/expected/fts_session_reset.out index afeef05ed74..e667f4c546d 100644 --- a/src/test/isolation2/expected/fts_session_reset.out +++ b/src/test/isolation2/expected/fts_session_reset.out @@ -6,15 +6,24 @@ -- set these values purely to cut down test time, as default ts trigger is -- every min and 5 retries +alter system set gp_fts_probe_interval to 10; +ALTER SYSTEM +alter system set gp_fts_probe_retries to 0; +ALTER SYSTEM +select pg_reload_conf(); + pg_reload_conf +---------------- + t +(1 row) create table test_fts_session_reset(c1 int); -CREATE +CREATE TABLE 1:BEGIN; BEGIN -- let the dispatcher create a gang 1:insert into test_fts_session_reset select * from generate_series(1,20); -INSERT 20 +INSERT 0 20 -- this injected fault can make dispatcher think the primary is down 2:select gp_inject_fault_infinite('fts_conn_startup_packet', 'error', dbid) from gp_segment_configuration where role='p' and content=0; gp_inject_fault_infinite @@ -44,12 +53,7 @@ ERROR: gang was lost due to cluster reconfiguration (cdbgang_async.c:98) 1:select count(*) from test_fts_session_reset; ERROR: current transaction is aborted, commands ignored until end of transaction block 1:END; -END -1:select pg_sleep(30); - pg_sleep ----------- - -(1 row) +ROLLBACK 1:select count(*) from test_fts_session_reset; count ------- @@ -117,3 +121,14 @@ select count(*) from gp_segment_configuration where status = 'd'; ------- 0 (1 row) + +alter system reset gp_fts_probe_interval; +ALTER SYSTEM +alter system reset gp_fts_probe_retries; +ALTER SYSTEM +select pg_reload_conf(); + pg_reload_conf +---------------- + t +(1 row) + diff --git a/src/test/isolation2/expected/gdd/avoid-qd-deadlock.out b/src/test/isolation2/expected/gdd/avoid-qd-deadlock.out index 81825036cbc..6d064de62d2 100644 --- a/src/test/isolation2/expected/gdd/avoid-qd-deadlock.out +++ b/src/test/isolation2/expected/gdd/avoid-qd-deadlock.out @@ -1,14 +1,14 @@ DROP TABLE IF EXISTS tsudf; -DROP +DROP TABLE CREATE TABLE tsudf (c int, d int); -CREATE +CREATE TABLE CREATE OR REPLACE FUNCTION func1(int) RETURNS int AS $$ BEGIN UPDATE tsudf SET d = d+1 WHERE c = $1; /* in func */ RETURN $1; /* in func */ END; /* in func */ $$ LANGUAGE plpgsql; -CREATE +CREATE FUNCTION INSERT INTO tsudf select i, i+1 from generate_series(1,10) i; -INSERT 10 +INSERT 0 10 SELECT gp_inject_fault('upgrade_row_lock', 'reset', 1); gp_inject_fault diff --git a/src/test/isolation2/expected/gdd/concurrent_update.out b/src/test/isolation2/expected/gdd/concurrent_update.out index 761e3f681a1..a109c0b6a0e 100644 --- a/src/test/isolation2/expected/gdd/concurrent_update.out +++ b/src/test/isolation2/expected/gdd/concurrent_update.out @@ -1,8 +1,8 @@ -- Test concurrent update a table with a varying length type CREATE TABLE t_concurrent_update(a int, b int, c char(84)); -CREATE +CREATE TABLE INSERT INTO t_concurrent_update VALUES(1,1,'test'); -INSERT 1 +INSERT 0 1 1: BEGIN; BEGIN @@ -14,7 +14,7 @@ UPDATE 1 SET 2&: UPDATE t_concurrent_update SET b=b+10 WHERE a=1; 1: END; -END +COMMIT 2<: <... completed> UPDATE 1 1: SELECT * FROM t_concurrent_update; @@ -26,13 +26,13 @@ UPDATE 1 2q: ... DROP TABLE t_concurrent_update; -DROP +DROP TABLE -- Test the concurrent update transaction order on the segment is reflected on master 1: CREATE TABLE t_concurrent_update(a int, b int); -CREATE +CREATE TABLE 1: INSERT INTO t_concurrent_update VALUES(1,1); -INSERT 1 +INSERT 0 1 2: BEGIN; BEGIN @@ -76,9 +76,9 @@ UPDATE 1 Success: (1 row) 2<: <... completed> -END +COMMIT 3<: <... completed> -END +COMMIT 2q: ... 3q: ... @@ -117,7 +117,7 @@ SET Success: (1 row) 4: END; -END +COMMIT 4: SELECT gp_inject_fault('before_get_distributed_xid', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=1; gp_inject_fault ----------------- @@ -127,14 +127,14 @@ END 5<: <... completed> UPDATE 1 5: END; -END +COMMIT 6: SELECT * FROM t_concurrent_update; a | b ---+---- 1 | 41 (1 row) 6: DROP TABLE t_concurrent_update; -DROP +DROP TABLE 4q: ... 5q: ... 6q: ... @@ -152,9 +152,9 @@ DROP -- distribution keys is updated. 0: create table tab_update_hashcol (c1 int, c2 int) distributed by(c1); -CREATE +CREATE TABLE 0: insert into tab_update_hashcol values(1,1); -INSERT 1 +INSERT 0 1 0: select * from tab_update_hashcol; c1 | c2 ----+---- @@ -169,18 +169,18 @@ BEGIN UPDATE 1 2&: update tab_update_hashcol set c1 = c1 + 1 where c1 = 1; 1: end; -END +COMMIT 2<: <... completed> ERROR: EvalPlanQual can not handle subPlan with Motion node (seg1 127.0.1.1:7003 pid=34629) 2: end; -END +ROLLBACK 0: select * from tab_update_hashcol; c1 | c2 ----+---- 2 | 1 (1 row) 0: drop table tab_update_hashcol; -DROP +DROP TABLE -- Test EvalplanQual -- If we enable the GDD, then the lock maybe downgrade to @@ -192,13 +192,13 @@ DROP -- GDD is enabled and EvalPlanQual is tiggered. 0: create table tab_update_epq1 (c1 int, c2 int) distributed randomly; -CREATE +CREATE TABLE 0: create table tab_update_epq2 (c1 int, c2 int) distributed randomly; -CREATE +CREATE TABLE 0: insert into tab_update_epq1 values(1,1); -INSERT 1 +INSERT 0 1 0: insert into tab_update_epq2 values(1,1); -INSERT 1 +INSERT 0 1 0: select * from tab_update_epq1; c1 | c2 ----+---- @@ -223,11 +223,11 @@ BEGIN UPDATE 1 2&: update tab_update_epq1 set c1 = tab_update_epq1.c1 + 1 from tab_update_epq2 where tab_update_epq1.c2 = tab_update_epq2.c2; 1: end; -END +COMMIT 2<: <... completed> ERROR: EvalPlanQual can not handle subPlan with Motion node (seg0 127.0.1.1:7002 pid=108407) 2: end; -END +ROLLBACK 0: select * from tab_update_epq1; c1 | c2 @@ -235,16 +235,16 @@ END 2 | 1 (1 row) 0: drop table tab_update_epq1; -DROP +DROP TABLE 0: drop table tab_update_epq2; -DROP +DROP TABLE 0q: ... 1q: ... 2q: ... -- check that orca concurrent delete transaction won't delete tuple, updated in other transaction (which doesn't match predicate anymore) create table test as select 0 as i distributed randomly; -CREATE 1 +SELECT 1 1: begin; BEGIN 1: update test set i = i + 1; @@ -253,17 +253,17 @@ UPDATE 1 -- the following SQL will hang due to XID lock 2&: delete from test where i = 0; 1: end; -END +COMMIT 2<: <... completed> DELETE 0 drop table test; -DROP +DROP TABLE 1q: ... 2q: ... -- check that orca concurrent delete transaction will delete tuple, updated in other transaction (which still matches predicate) create table test as select 0 as i distributed randomly; -CREATE 1 +SELECT 1 1: begin; BEGIN 1: update test set i = i; @@ -272,19 +272,19 @@ UPDATE 1 -- the following SQL will hang due to XID lock 2&: delete from test where i = 0; 1: end; -END +COMMIT 2<: <... completed> DELETE 1 drop table test; -DROP +DROP TABLE 1q: ... 2q: ... -- test ORCA partition table create table test(a int, b int, c int) partition by range(b) (start (1) end (7) every (3)); -CREATE +CREATE TABLE insert into test values (1, 1, 1); -INSERT 1 +INSERT 0 1 1: begin; BEGIN 1: delete from test where b = 1; @@ -293,7 +293,7 @@ DELETE 1 -- the following SQL will hang due to XID lock 2&: update test set b = 1; 1: end; -END +COMMIT 2<: <... completed> UPDATE 0 @@ -302,7 +302,7 @@ UPDATE 0 ---+---+--- (0 rows) 0: drop table test; -DROP +DROP TABLE 0q: ... 1q: ... 2q: ... @@ -310,9 +310,9 @@ DROP -- test ORCA partition table -- related github issue https://github.com/greenplum-db/gpdb/issues/14935 create table test(a int, b int, c int) partition by range(b) (start (1) end (7) every (3)); -CREATE +CREATE TABLE insert into test values (1, 1, 1), (1, 2, 1); -INSERT 2 +INSERT 0 2 1: begin; BEGIN 1: update test set c = 1; @@ -321,7 +321,7 @@ UPDATE 2 -- the following SQL will hang due to XID lock 2&: update test set c = 1; 1: end; -END +COMMIT 2<: <... completed> UPDATE 2 @@ -332,7 +332,7 @@ UPDATE 2 1 | 2 | 1 (2 rows) 0: drop table test; -DROP +DROP TABLE 0q: ... 1q: ... 2q: ... @@ -346,9 +346,9 @@ DROP -- See github issue: https://github.com/greenplum-db/gpdb/issues/8919 0:create table t_splitupdate_raise_error (a int, b int) distributed by (a); -CREATE +CREATE TABLE 0:insert into t_splitupdate_raise_error values (1, 1); -INSERT 1 +INSERT 0 1 -- test delete will throw error 1: begin; @@ -361,12 +361,12 @@ BEGIN 2&: delete from t_splitupdate_raise_error; 1: end; -END +COMMIT 2<: <... completed> ERROR: tuple to be locked was already moved to another partition or segment due to concurrent update (seg1 127.0.1.1:7003 pid=34629) 2: abort; -ABORT +ROLLBACK 1q: ... 2q: ... @@ -381,12 +381,12 @@ BEGIN 2&: update t_splitupdate_raise_error set b = 999; 1: end; -END +COMMIT 2<: <... completed> ERROR: tuple to be locked was already moved to another partition or segment due to concurrent update (seg0 127.0.1.1:7002 pid=43842) 2: abort; -ABORT +ROLLBACK 1q: ... 2q: ... @@ -409,14 +409,14 @@ SET 2&: select * from t_splitupdate_raise_error for update; 1: end; -END +COMMIT 2<: <... completed> ERROR: tuple to be locked was already moved to another partition or segment due to concurrent update (seg0 slice1 127.0.1.1:7002 pid=43866) 2: abort; -ABORT +ROLLBACK 1q: ... 2q: ... 0:drop table t_splitupdate_raise_error; -DROP +DROP TABLE diff --git a/src/test/isolation2/expected/gdd/concurrent_update_optimizer.out b/src/test/isolation2/expected/gdd/concurrent_update_optimizer.out index 4a04cd86a21..2642b507f7d 100644 --- a/src/test/isolation2/expected/gdd/concurrent_update_optimizer.out +++ b/src/test/isolation2/expected/gdd/concurrent_update_optimizer.out @@ -1,8 +1,8 @@ -- Test concurrent update a table with a varying length type CREATE TABLE t_concurrent_update(a int, b int, c char(84)); -CREATE +CREATE TABLE INSERT INTO t_concurrent_update VALUES(1,1,'test'); -INSERT 1 +INSERT 0 1 1: BEGIN; BEGIN @@ -14,7 +14,7 @@ UPDATE 1 SET 2&: UPDATE t_concurrent_update SET b=b+10 WHERE a=1; 1: END; -END +COMMIT 2<: <... completed> UPDATE 1 1: SELECT * FROM t_concurrent_update; @@ -26,13 +26,13 @@ UPDATE 1 2q: ... DROP TABLE t_concurrent_update; -DROP +DROP TABLE -- Test the concurrent update transaction order on the segment is reflected on master 1: CREATE TABLE t_concurrent_update(a int, b int); -CREATE +CREATE TABLE 1: INSERT INTO t_concurrent_update VALUES(1,1); -INSERT 1 +INSERT 0 1 2: BEGIN; BEGIN @@ -76,9 +76,9 @@ UPDATE 1 Success: (1 row) 2<: <... completed> -END +COMMIT 3<: <... completed> -END +COMMIT 2q: ... 3q: ... @@ -117,7 +117,7 @@ SET Success: (1 row) 4: END; -END +COMMIT 4: SELECT gp_inject_fault('before_get_distributed_xid', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=1; gp_inject_fault ----------------- @@ -127,14 +127,14 @@ END 5<: <... completed> UPDATE 1 5: END; -END +COMMIT 6: SELECT * FROM t_concurrent_update; a | b ---+---- 1 | 41 (1 row) 6: DROP TABLE t_concurrent_update; -DROP +DROP TABLE 4q: ... 5q: ... 6q: ... @@ -152,9 +152,9 @@ DROP -- distribution keys is updated. 0: create table tab_update_hashcol (c1 int, c2 int) distributed by(c1); -CREATE +CREATE TABLE 0: insert into tab_update_hashcol values(1,1); -INSERT 1 +INSERT 0 1 0: select * from tab_update_hashcol; c1 | c2 ----+---- @@ -169,18 +169,18 @@ BEGIN UPDATE 1 2&: update tab_update_hashcol set c1 = c1 + 1 where c1 = 1; 1: end; -END +COMMIT 2<: <... completed> ERROR: EvalPlanQual can not handle subPlan with Motion node 2: end; -END +ROLLBACK 0: select * from tab_update_hashcol; c1 | c2 ----+---- 2 | 1 (1 row) 0: drop table tab_update_hashcol; -DROP +DROP TABLE -- Test EvalplanQual -- If we enable the GDD, then the lock maybe downgrade to @@ -192,13 +192,13 @@ DROP -- GDD is enabled and EvalPlanQual is tiggered. 0: create table tab_update_epq1 (c1 int, c2 int) distributed randomly; -CREATE +CREATE TABLE 0: create table tab_update_epq2 (c1 int, c2 int) distributed randomly; -CREATE +CREATE TABLE 0: insert into tab_update_epq1 values(1,1); -INSERT 1 +INSERT 0 1 0: insert into tab_update_epq2 values(1,1); -INSERT 1 +INSERT 0 1 0: select * from tab_update_epq1; c1 | c2 ----+---- @@ -223,11 +223,11 @@ BEGIN UPDATE 1 2&: update tab_update_epq1 set c1 = tab_update_epq1.c1 + 1 from tab_update_epq2 where tab_update_epq1.c2 = tab_update_epq2.c2; 1: end; -END +COMMIT 2<: <... completed> ERROR: EvalPlanQual can not handle subPlan with Motion node (seg1 127.0.1.1:6003 pid=76275) 2: end; -END +ROLLBACK 0: select * from tab_update_epq1; c1 | c2 @@ -235,16 +235,16 @@ END 2 | 1 (1 row) 0: drop table tab_update_epq1; -DROP +DROP TABLE 0: drop table tab_update_epq2; -DROP +DROP TABLE 0q: ... 1q: ... 2q: ... -- check that orca concurrent delete transaction won't delete tuple, updated in other transaction (which doesn't match predicate anymore) create table test as select 0 as i distributed randomly; -CREATE 1 +SELECT 1 1: begin; BEGIN 1: update test set i = i + 1; @@ -253,17 +253,17 @@ UPDATE 1 -- the following SQL will hang due to XID lock 2&: delete from test where i = 0; 1: end; -END +COMMIT 2<: <... completed> DELETE 0 drop table test; -DROP +DROP TABLE 1q: ... 2q: ... -- check that orca concurrent delete transaction will delete tuple, updated in other transaction (which still matches predicate) create table test as select 0 as i distributed randomly; -CREATE 1 +SELECT 1 1: begin; BEGIN 1: update test set i = i; @@ -272,19 +272,19 @@ UPDATE 1 -- the following SQL will hang due to XID lock 2&: delete from test where i = 0; 1: end; -END +COMMIT 2<: <... completed> DELETE 1 drop table test; -DROP +DROP TABLE 1q: ... 2q: ... -- test ORCA partition table create table test(a int, b int, c int) partition by range(b) (start (1) end (7) every (3)); -CREATE +CREATE TABLE insert into test values (1, 1, 1); -INSERT 1 +INSERT 0 1 1: begin; BEGIN 1: delete from test where b = 1; @@ -293,7 +293,7 @@ DELETE 1 -- the following SQL will hang due to XID lock 2&: update test set b = 1; 1: end; -END +COMMIT 2<: <... completed> UPDATE 0 @@ -302,7 +302,7 @@ UPDATE 0 ---+---+--- (0 rows) 0: drop table test; -DROP +DROP TABLE 0q: ... 1q: ... 2q: ... @@ -310,9 +310,9 @@ DROP -- test ORCA partition table -- related github issue https://github.com/greenplum-db/gpdb/issues/14935 create table test(a int, b int, c int) partition by range(b) (start (1) end (7) every (3)); -CREATE +CREATE TABLE insert into test values (1, 1, 1), (1, 2, 1); -INSERT 2 +INSERT 0 2 1: begin; BEGIN 1: update test set c = 1; @@ -321,7 +321,7 @@ UPDATE 2 -- the following SQL will hang due to XID lock 2&: update test set c = 1; 1: end; -END +COMMIT 2<: <... completed> UPDATE 2 @@ -332,7 +332,7 @@ UPDATE 2 1 | 2 | 1 (2 rows) 0: drop table test; -DROP +DROP TABLE 0q: ... 1q: ... 2q: ... @@ -346,9 +346,9 @@ DROP -- See github issue: https://github.com/greenplum-db/gpdb/issues/8919 0:create table t_splitupdate_raise_error (a int, b int) distributed by (a); -CREATE +CREATE TABLE 0:insert into t_splitupdate_raise_error values (1, 1); -INSERT 1 +INSERT 0 1 -- test delete will throw error 1: begin; @@ -361,12 +361,12 @@ BEGIN 2&: delete from t_splitupdate_raise_error; 1: end; -END +COMMIT 2<: <... completed> ERROR: tuple to be locked was already moved to another partition or segment due to concurrent update (seg1 127.0.1.1:6003 pid=76275) 2: abort; -ABORT +ROLLBACK 1q: ... 2q: ... @@ -381,12 +381,12 @@ BEGIN 2&: update t_splitupdate_raise_error set b = 999; 1: end; -END +COMMIT 2<: <... completed> ERROR: tuple to be locked was already moved to another partition or segment due to concurrent update (seg0 127.0.1.1:6002 pid=76337) 2: abort; -ABORT +ROLLBACK 1q: ... 2q: ... @@ -409,14 +409,14 @@ SET 2&: select * from t_splitupdate_raise_error for update; 1: end; -END +COMMIT 2<: <... completed> ERROR: tuple to be locked was already moved to another partition or segment due to concurrent update (seg0 slice1 127.0.1.1:7002 pid=43866) 2: abort; -ABORT +ROLLBACK 1q: ... 2q: ... 0:drop table t_splitupdate_raise_error; -DROP +DROP TABLE diff --git a/src/test/isolation2/expected/gdd/delete-deadlock-root-leaf-concurrent-op.out b/src/test/isolation2/expected/gdd/delete-deadlock-root-leaf-concurrent-op.out index 1b4dd43bf0a..d21de9145b0 100644 --- a/src/test/isolation2/expected/gdd/delete-deadlock-root-leaf-concurrent-op.out +++ b/src/test/isolation2/expected/gdd/delete-deadlock-root-leaf-concurrent-op.out @@ -1,9 +1,9 @@ DROP TABLE IF EXISTS part_tbl; -DROP +DROP TABLE CREATE TABLE part_tbl (a int, b int, c int) PARTITION BY RANGE(b) (START(1) END(2) EVERY(1)); -CREATE +CREATE TABLE INSERT INTO part_tbl SELECT i, 1, i FROM generate_series(1,100)i; -INSERT 100 +INSERT 0 100 -- check gdd is enabled show gp_enable_global_deadlock_detector; @@ -44,4 +44,4 @@ ROLLBACK 2:ROLLBACK; ROLLBACK DROP TABLE IF EXISTS part_tbl; -DROP +DROP TABLE diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-01.out b/src/test/isolation2/expected/gdd/dist-deadlock-01.out index 3cedc30f509..b8b46b715db 100644 --- a/src/test/isolation2/expected/gdd/dist-deadlock-01.out +++ b/src/test/isolation2/expected/gdd/dist-deadlock-01.out @@ -1,9 +1,9 @@ DROP TABLE IF EXISTS t01; -DROP +DROP TABLE CREATE TABLE t01 (id int, val int); -CREATE +CREATE TABLE INSERT INTO t01 (id, val) SELECT i, i FROM generate_series(1, 100) i; -INSERT 100 +INSERT 0 100 -- gang creation order is important, reset any guc to force the creation 10: RESET optimizer; diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-04.out b/src/test/isolation2/expected/gdd/dist-deadlock-04.out index c393a69b54b..f497faa08f3 100644 --- a/src/test/isolation2/expected/gdd/dist-deadlock-04.out +++ b/src/test/isolation2/expected/gdd/dist-deadlock-04.out @@ -1,12 +1,12 @@ DROP TABLE IF EXISTS t04a; -DROP +DROP TABLE CREATE TABLE t04a (id int, val int); -CREATE +CREATE TABLE INSERT INTO t04a (id, val) SELECT i, i FROM generate_series(1, 100) i; -INSERT 100 +INSERT 0 100 DROP TABLE IF EXISTS t04b; -DROP +DROP TABLE -- gang creation order is important, reset any guc to force the creation 10: RESET optimizer; @@ -22,7 +22,7 @@ BEGIN 10: UPDATE t04a SET val=val WHERE id=segid(0,1); UPDATE 1 10: CREATE TABLE t04b (id int); -CREATE +CREATE TABLE 20: UPDATE t04a SET val=val WHERE id=segid(1,1); UPDATE 1 diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-05.out b/src/test/isolation2/expected/gdd/dist-deadlock-05.out index 57a8dced788..0611482c312 100644 --- a/src/test/isolation2/expected/gdd/dist-deadlock-05.out +++ b/src/test/isolation2/expected/gdd/dist-deadlock-05.out @@ -1,7 +1,7 @@ DROP TABLE IF EXISTS t05; -DROP +DROP TABLE CREATE TABLE t05 (id int primary key); -CREATE +CREATE TABLE -- gang creation order is important, reset any guc to force the creation 10: RESET optimizer; @@ -15,10 +15,10 @@ BEGIN BEGIN 10: INSERT INTO t05 VALUES(segid(0,1)); -INSERT 1 +INSERT 0 1 20: INSERT INTO t05 VALUES(segid(1,1)); -INSERT 1 +INSERT 0 1 -- seg 0: con20 ==> con10, xid lock 20&: INSERT INTO t05 VALUES(segid(0,1)); @@ -32,5 +32,5 @@ ERROR: canceling statement due to user request: "cancelled by global deadlock d -- no more deadlock 10<: <... completed> -INSERT 1 +INSERT 0 1 10q: ... diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-06.out b/src/test/isolation2/expected/gdd/dist-deadlock-06.out index 08202fcc758..45a116ce075 100644 --- a/src/test/isolation2/expected/gdd/dist-deadlock-06.out +++ b/src/test/isolation2/expected/gdd/dist-deadlock-06.out @@ -1,9 +1,9 @@ DROP TABLE IF EXISTS t06; -DROP +DROP TABLE CREATE TABLE t06 (id int, val int); -CREATE +CREATE TABLE INSERT INTO t06 (id, val) SELECT i, i FROM generate_series(1, 100) i; -INSERT 100 +INSERT 0 100 -- gang creation order is important, reset any guc to force the creation 10: RESET optimizer; diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-07.out b/src/test/isolation2/expected/gdd/dist-deadlock-07.out index 40e2fa8253d..37cde3b837c 100644 --- a/src/test/isolation2/expected/gdd/dist-deadlock-07.out +++ b/src/test/isolation2/expected/gdd/dist-deadlock-07.out @@ -1,15 +1,15 @@ DROP TABLE IF EXISTS t07a; -DROP +DROP TABLE DROP TABLE IF EXISTS t07b; -DROP +DROP TABLE CREATE TABLE t07a (c1 int, c2 int); -CREATE +CREATE TABLE CREATE TABLE t07b (c1 int, c2 int); -CREATE +CREATE TABLE INSERT INTO t07a (c1, c2) SELECT i, i FROM generate_series(1, 100) i; -INSERT 100 +INSERT 0 100 INSERT INTO t07b (c1, c2) SELECT i, i FROM generate_series(1, 100) i; -INSERT 100 +INSERT 0 100 -- gang creation order is important, reset any guc to force the creation 10: RESET optimizer; @@ -37,7 +37,7 @@ UPDATE 1 UPDATE 1 30: LOCK t07a; -LOCK +LOCK TABLE -- seg 0: con30 ==> con10, xid lock 30&: UPDATE t07b SET c2 = 21 WHERE c1 = segid(0,1); diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-102.out b/src/test/isolation2/expected/gdd/dist-deadlock-102.out index c7ecda8664e..fc1603bada2 100644 --- a/src/test/isolation2/expected/gdd/dist-deadlock-102.out +++ b/src/test/isolation2/expected/gdd/dist-deadlock-102.out @@ -1,9 +1,9 @@ DROP TABLE IF EXISTS t102; -DROP +DROP TABLE CREATE TABLE t102 (id int, val int); -CREATE +CREATE TABLE INSERT INTO t102 (id, val) SELECT i, i FROM generate_series(1, 100) i; -INSERT 100 +INSERT 0 100 -- gang creation order is important, reset any guc to force the creation 10: RESET optimizer; diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-103.out b/src/test/isolation2/expected/gdd/dist-deadlock-103.out index befd5eaab5d..fe7c5eb2dbf 100644 --- a/src/test/isolation2/expected/gdd/dist-deadlock-103.out +++ b/src/test/isolation2/expected/gdd/dist-deadlock-103.out @@ -1,9 +1,9 @@ DROP TABLE IF EXISTS t103; -DROP +DROP TABLE CREATE TABLE t103 (id int, val int); -CREATE +CREATE TABLE INSERT INTO t103 (id, val) SELECT i, i FROM generate_series(1, 100) i; -INSERT 100 +INSERT 0 100 -- gang creation order is important, reset any guc to force the creation 10: RESET optimizer; diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-104.out b/src/test/isolation2/expected/gdd/dist-deadlock-104.out index 7feeb022c89..c8bd4aa6880 100644 --- a/src/test/isolation2/expected/gdd/dist-deadlock-104.out +++ b/src/test/isolation2/expected/gdd/dist-deadlock-104.out @@ -1,9 +1,9 @@ DROP TABLE IF EXISTS t104; -DROP +DROP TABLE CREATE TABLE t104 (id int, val int); -CREATE +CREATE TABLE INSERT INTO t104 (id, val) SELECT i, i FROM generate_series(1, 100) i; -INSERT 100 +INSERT 0 100 -- gang creation order is important, reset any guc to force the creation 10: RESET optimizer; diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-106.out b/src/test/isolation2/expected/gdd/dist-deadlock-106.out index 4441f8d94ef..1640d6bc857 100644 --- a/src/test/isolation2/expected/gdd/dist-deadlock-106.out +++ b/src/test/isolation2/expected/gdd/dist-deadlock-106.out @@ -1,9 +1,9 @@ DROP TABLE IF EXISTS t106; -DROP +DROP TABLE CREATE TABLE t106 (id int, val int); -CREATE +CREATE TABLE INSERT INTO t106 (id, val) SELECT i, i FROM generate_series(1, 100) i; -INSERT 100 +INSERT 0 100 -- gang creation order is important, reset any guc to force the creation 10: RESET optimizer; diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-upsert.out b/src/test/isolation2/expected/gdd/dist-deadlock-upsert.out index db209932428..7a900b6ab96 100644 --- a/src/test/isolation2/expected/gdd/dist-deadlock-upsert.out +++ b/src/test/isolation2/expected/gdd/dist-deadlock-upsert.out @@ -4,13 +4,13 @@ -- global deadlock when GDD is enabled. DROP TABLE IF EXISTS t_upsert; -DROP +DROP TABLE CREATE TABLE t_upsert (id int, val int) distributed by (id); -CREATE +CREATE TABLE CREATE UNIQUE INDEX uidx_t_upsert on t_upsert(id, val); -CREATE +CREATE INDEX INSERT INTO t_upsert (id, val) SELECT i, i FROM generate_series(1, 100) i; -INSERT 100 +INSERT 0 100 -- gang creation order is important, reset any guc to force the creation 10: RESET optimizer; @@ -24,10 +24,10 @@ BEGIN BEGIN 10: INSERT INTO t_upsert VALUES (segid(0,1), segid(0,1)) on conflict (id, val) do update set val = 999; -INSERT 1 +INSERT 0 1 20: INSERT INTO t_upsert VALUES (segid(1,1), segid(1,1)) on conflict (id, val) do update set val = 888; -INSERT 1 +INSERT 0 1 select gp_inject_fault('gdd_probe', 'suspend', dbid) from gp_segment_configuration where content=-1 and role='p'; gp_inject_fault @@ -57,5 +57,5 @@ ERROR: canceling statement due to user request: "cancelled by global deadlock d -- no more deadlock 10<: <... completed> -INSERT 1 +INSERT 0 1 10q: ... diff --git a/src/test/isolation2/expected/gdd/end.out b/src/test/isolation2/expected/gdd/end.out index 01a10b8a47a..70986307742 100644 --- a/src/test/isolation2/expected/gdd/end.out +++ b/src/test/isolation2/expected/gdd/end.out @@ -1,7 +1,7 @@ ALTER SYSTEM RESET gp_enable_global_deadlock_detector; -ALTER +ALTER SYSTEM ALTER SYSTEM RESET gp_global_deadlock_detector_period; -ALTER +ALTER SYSTEM -- Use utility session on seg 0 to restart master. This way avoids the -- situation where session issuing the restart doesn't disappear diff --git a/src/test/isolation2/expected/gdd/insert_root_partition_truncate_deadlock.out b/src/test/isolation2/expected/gdd/insert_root_partition_truncate_deadlock.out index bf78325fe93..3392152eb54 100644 --- a/src/test/isolation2/expected/gdd/insert_root_partition_truncate_deadlock.out +++ b/src/test/isolation2/expected/gdd/insert_root_partition_truncate_deadlock.out @@ -11,7 +11,7 @@ -- without GDD it is running to show that no deadlock happens. create table rank_13652 (id int, year int) partition by range (year) (start (2006) end (2009) every (1)); -CREATE +CREATE TABLE 1: select gp_inject_fault('func_init_plan_end', 'suspend', dbid, current_setting('gp_session_id')::int) from gp_segment_configuration where content = 0 and role = 'p'; gp_inject_fault @@ -35,7 +35,7 @@ select gp_inject_fault('func_init_plan_end', 'reset', dbid) from gp_segment_conf (1 row) 1<: <... completed> -INSERT 30 +INSERT 0 30 2<: <... completed> ERROR: canceling statement due to user request: "cancelled by global deadlock detector" @@ -43,4 +43,4 @@ ERROR: canceling statement due to user request: "cancelled by global deadlock d 2q: ... drop table rank_13652; -DROP +DROP TABLE diff --git a/src/test/isolation2/expected/gdd/local-deadlock-03.out b/src/test/isolation2/expected/gdd/local-deadlock-03.out index 0a0fc49e086..6aa7a02a2a6 100644 --- a/src/test/isolation2/expected/gdd/local-deadlock-03.out +++ b/src/test/isolation2/expected/gdd/local-deadlock-03.out @@ -4,7 +4,7 @@ -- deadlock testcases stable we reset the gdd period to 2min so should -- not be triggered during the local deadlock tests. ALTER SYSTEM SET gp_global_deadlock_detector_period to '2min'; -ALTER +ALTER SYSTEM SELECT pg_reload_conf(); pg_reload_conf ---------------- @@ -18,11 +18,11 @@ SELECT pg_reload_conf(); (1 row) DROP TABLE IF EXISTS t03; -DROP +DROP TABLE CREATE TABLE t03 (id int, val int); -CREATE +CREATE TABLE INSERT INTO t03 (id, val) SELECT i, i FROM generate_series(1, 100) i; -INSERT 100 +INSERT 0 100 -- gang creation order is important, reset any guc to force the creation 10: RESET optimizer; diff --git a/src/test/isolation2/expected/gdd/non-lock-105.out b/src/test/isolation2/expected/gdd/non-lock-105.out index 781e6d9929d..537f7225ddc 100644 --- a/src/test/isolation2/expected/gdd/non-lock-105.out +++ b/src/test/isolation2/expected/gdd/non-lock-105.out @@ -1,9 +1,9 @@ DROP TABLE IF EXISTS t105; -DROP +DROP TABLE CREATE TABLE t105 (id int, val int); -CREATE +CREATE TABLE INSERT INTO t105 (id, val) SELECT i, i FROM generate_series(1, 100) i; -INSERT 100 +INSERT 0 100 -- gang creation order is important, reset any guc to force the creation 10: RESET optimizer; diff --git a/src/test/isolation2/expected/gdd/prepare.out b/src/test/isolation2/expected/gdd/prepare.out index d8940c2d3cf..73af33a0268 100644 --- a/src/test/isolation2/expected/gdd/prepare.out +++ b/src/test/isolation2/expected/gdd/prepare.out @@ -1,17 +1,17 @@ -- t0r is the reference table to provide the data distribution info. DROP TABLE IF EXISTS t0p; -DROP +DROP TABLE CREATE TABLE t0p (id int, val int); -CREATE +CREATE TABLE INSERT INTO t0p (id, val) SELECT i, i FROM generate_series(1, 100) i; -INSERT 100 +INSERT 0 100 DROP TABLE IF EXISTS t0r; -DROP +DROP TABLE CREATE TABLE t0r (id int, val int, segid int) DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE INSERT INTO t0r (id, val, segid) SELECT id, val, gp_segment_id from t0p; -INSERT 100 +INSERT 0 100 -- GDD tests rely on the data distribution, but depends on the number of -- the segments the distribution might be different. @@ -19,7 +19,7 @@ INSERT 100 -- * `seg` is the segment id, starts from 0; -- * `idx` is the index on the segment, starts from 1; CREATE OR REPLACE FUNCTION segid(seg int, idx int) RETURNS int AS $$ SELECT id FROM t0r WHERE segid=$1 ORDER BY id LIMIT 1 OFFSET ($2-1) $$ LANGUAGE sql; -CREATE +CREATE FUNCTION -- In some of the testcases the execution order of two background queries -- must be enforced not only on master but also on segments, for example @@ -38,7 +38,7 @@ CREATE -- So we provide this barrier function to ensure the execution order. -- It's implemented with sleep now, but should at least work. CREATE OR REPLACE FUNCTION barrier() RETURNS void AS $$ SELECT pg_sleep(4) $$ LANGUAGE sql; -CREATE +CREATE FUNCTION -- verify the function -- Data distribution is sensitive to the underlying hash algorithm, we need each @@ -63,14 +63,14 @@ SELECT segid(2,10) is not null; -- table to just store the master's data directory path on segment. CREATE TABLE datadir(a int, dir text); -CREATE +CREATE TABLE INSERT INTO datadir select 1,datadir from gp_segment_configuration where role='p' and content=-1; -INSERT 1 +INSERT 0 1 ALTER SYSTEM SET gp_enable_global_deadlock_detector TO on; -ALTER +ALTER SYSTEM ALTER SYSTEM SET gp_global_deadlock_detector_period TO 5; -ALTER +ALTER SYSTEM -- Use utility session on seg 0 to restart master. This way avoids the -- situation where session issuing the restart doesn't disappear diff --git a/src/test/isolation2/expected/gdd/update-deadlock-root-leaf-concurrent-op.out b/src/test/isolation2/expected/gdd/update-deadlock-root-leaf-concurrent-op.out index c88976980b4..a8954eae80f 100644 --- a/src/test/isolation2/expected/gdd/update-deadlock-root-leaf-concurrent-op.out +++ b/src/test/isolation2/expected/gdd/update-deadlock-root-leaf-concurrent-op.out @@ -1,9 +1,9 @@ DROP TABLE IF EXISTS part_tbl; -DROP +DROP TABLE CREATE TABLE part_tbl (a int, b int, c int) PARTITION BY RANGE(b) (START(1) END(2) EVERY(1)); -CREATE +CREATE TABLE INSERT INTO part_tbl SELECT i, 1, i FROM generate_series(1,100)i; -INSERT 100 +INSERT 0 100 -- check gdd is enabled show gp_enable_global_deadlock_detector; @@ -44,4 +44,4 @@ ROLLBACK 2:ROLLBACK; ROLLBACK DROP TABLE IF EXISTS part_tbl; -DROP +DROP TABLE diff --git a/src/test/isolation2/expected/gp_terminate_mpp_backends.out b/src/test/isolation2/expected/gp_terminate_mpp_backends.out index 941ccef8ddc..6c8ba84db6a 100644 --- a/src/test/isolation2/expected/gp_terminate_mpp_backends.out +++ b/src/test/isolation2/expected/gp_terminate_mpp_backends.out @@ -1,6 +1,6 @@ -- test gp_terminate_mpp_backends 1:create table gp_terminate_mpp_backends_t (a int); -CREATE +CREATE TABLE select gp_terminate_mpp_backends() from gp_dist_random('gp_id'); gp_terminate_mpp_backends diff --git a/src/test/isolation2/expected/gpdispatch.out b/src/test/isolation2/expected/gpdispatch.out index 67f569f65f4..8f6ca9e71a6 100644 --- a/src/test/isolation2/expected/gpdispatch.out +++ b/src/test/isolation2/expected/gpdispatch.out @@ -2,7 +2,7 @@ -- Report on https://github.com/greenplum-db/gpdb/issues/12399 create extension if not exists gp_inject_fault; -CREATE +CREATE EXTENSION 1: select gp_inject_fault('make_dispatch_result_error', 'skip', dbid) from gp_segment_configuration where role = 'p' and content = -1; gp_inject_fault @@ -41,9 +41,9 @@ select gp_inject_fault('make_dispatch_result_error', 'reset', dbid) from gp_segm -- create table test_waitevent(i int); -CREATE +CREATE TABLE insert into test_waitevent select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 1: set optimizer = off; SET @@ -59,7 +59,7 @@ SET 1&: WITH a1 as (select * from test_waitevent), a2 as (select * from test_waitevent) SELECT sum(a1.i) FROM a1 INNER JOIN a2 ON a2.i = a1.i UNION ALL SELECT count(a1.i) FROM a1 INNER JOIN a2 ON a2.i = a1.i; -- start_ignore 2: copy (select pg_stat_get_activity(NULL) from gp_dist_random('gp_id') where gp_segment_id=0) to '/tmp/_gpdb_test_output.txt'; -COPY 9 +COPY 10 -- end_ignore 2: select gp_wait_until_triggered_fault('shareinput_writer_notifyready', 1, 2); gp_wait_until_triggered_fault @@ -98,13 +98,13 @@ COPY 9 -- Case for cdbgang_createGang_async 1: create table t_12703(a int); -CREATE +CREATE TABLE 1:begin; BEGIN -- make a cursor so that we have a named portal 1: declare cur12703 cursor for select * from t_12703; -DECLARE +DECLARE CURSOR 2: select pg_ctl((select datadir from gp_segment_configuration c where c.role='p' and c.content=1), 'stop'); pg_ctl @@ -135,7 +135,7 @@ DECLARE 1: select * from t_12703; ERROR: gang was lost due to cluster reconfiguration (cdbgang_async.c:98) 1: abort; -ABORT +ROLLBACK 1q: ... 2q: ... @@ -143,7 +143,7 @@ ABORT -- Case for cdbCopyEndInternal -- Provide some data to copy in 4: insert into t_12703 select * from generate_series(1, 10)i; -INSERT 10 +INSERT 0 10 4: copy t_12703 to '/tmp/t_12703'; COPY 10 -- make copy in statement hang at the entry point of cdbCopyEndInternal @@ -193,7 +193,7 @@ ERROR: MPP detected 1 segment failures, system is reconnected 1 (1 row) 2: end; -END +COMMIT 2q: ... !\retcode gprecoverseg -aF --no-progress; diff --git a/src/test/isolation2/expected/gpdispatch_1.out b/src/test/isolation2/expected/gpdispatch_1.out index edd307d6d14..d54e376acd7 100644 --- a/src/test/isolation2/expected/gpdispatch_1.out +++ b/src/test/isolation2/expected/gpdispatch_1.out @@ -2,7 +2,7 @@ -- Report on https://github.com/greenplum-db/gpdb/issues/12399 create extension if not exists gp_inject_fault; -CREATE +CREATE EXTENSION 1: select gp_inject_fault('make_dispatch_result_error', 'skip', dbid) from gp_segment_configuration where role = 'p' and content = -1; gp_inject_fault @@ -41,9 +41,9 @@ select gp_inject_fault('make_dispatch_result_error', 'reset', dbid) from gp_segm -- create table test_waitevent(i int); -CREATE +CREATE TABLE insert into test_waitevent select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 1: set optimizer = off; SET @@ -98,13 +98,13 @@ COPY 9 -- Case for cdbgang_createGang_async 1: create table t_12703(a int); -CREATE +CREATE TABLE 1:begin; BEGIN -- make a cursor so that we have a named portal 1: declare cur12703 cursor for select * from t_12703; -DECLARE +DECLARE CURSOR 2: select pg_ctl((select datadir from gp_segment_configuration c where c.role='p' and c.content=1), 'stop'); pg_ctl @@ -127,7 +127,7 @@ ERROR: Error on receive from seg1 slice1 127.0.1.1:7003 pid=58391: server close This probably means the server terminated abnormally before or while processing the request. 1: abort; -ABORT +ROLLBACK 1q: ... 2q: ... @@ -135,7 +135,7 @@ ABORT -- Case for cdbCopyEndInternal -- Provide some data to copy in 4: insert into t_12703 select * from generate_series(1, 10)i; -INSERT 10 +INSERT 0 10 4: copy t_12703 to '/tmp/t_12703'; COPY 10 -- make copy in statement hang at the entry point of cdbCopyEndInternal @@ -180,7 +180,7 @@ ERROR: MPP detected 1 segment failures, system is reconnected 1 (1 row) 2: end; -END +COMMIT 2q: ... !\retcode gprecoverseg -aF --no-progress; diff --git a/src/test/isolation2/expected/gpexpand_catalog_lock.out b/src/test/isolation2/expected/gpexpand_catalog_lock.out index b060a23cfcc..580eaaf0e7d 100644 --- a/src/test/isolation2/expected/gpexpand_catalog_lock.out +++ b/src/test/isolation2/expected/gpexpand_catalog_lock.out @@ -1,12 +1,12 @@ drop table if exists t20; -DROP +DROP TABLE drop table if exists t30; -DROP +DROP TABLE create table t20 (c1 int, c2 int) distributed by (c1); -CREATE +CREATE TABLE create table t30 (c1 int, c2 int) distributed by (c1); -CREATE +CREATE TABLE -- c10, c11 simulate gpexpand's catalog lock protection -- they will acquire the catalog lock in exclusive mode @@ -34,7 +34,7 @@ BEGIN 11&: select gp_expand_lock_catalog(); 10: end; -END +COMMIT -- c10 released the lock, c11 acquired it now 11<: <... completed> @@ -43,7 +43,7 @@ END (1 row) 11: end; -END +COMMIT -- -- client sessions do not block each other on catalog changes @@ -56,14 +56,14 @@ BEGIN -- c20 and c30 both acquired the catalog lock in shared mode 20: create table t21 (c1 int, c2 int) distributed by (c1); -CREATE +CREATE TABLE 30: create table t31 (c1 int, c2 int) distributed by (c1); -CREATE +CREATE TABLE 20: insert into t21 values (1,1); -INSERT 1 +INSERT 0 1 30: insert into t31 values (1,1); -INSERT 1 +INSERT 0 1 20: rollback; ROLLBACK @@ -83,17 +83,17 @@ BEGIN -- c20 and c30 both acquired the catalog lock in shared mode 20: create table t21 (c1 int, c2 int) distributed by (c1); -CREATE +CREATE TABLE 30: create table t31 (c1 int, c2 int) distributed by (c1); -CREATE +CREATE TABLE -- c10 can not acquire the lock in exclusive mode ... 10&: select gp_expand_lock_catalog(); 20: insert into t21 values (1,1); -INSERT 1 +INSERT 0 1 30: insert into t31 values (1,1); -INSERT 1 +INSERT 0 1 20: rollback; ROLLBACK @@ -107,7 +107,7 @@ ROLLBACK (1 row) 10: end; -END +COMMIT -- -- the catalog lock can be acquired in order @@ -122,7 +122,7 @@ BEGIN -- c20 acquired the catalog lock in shared mode 20: create table t21 (c1 int, c2 int) distributed by (c1); -CREATE +CREATE TABLE -- c10 has to wait for c20 10&: select gp_expand_lock_catalog(); @@ -135,7 +135,7 @@ ROLLBACK -- c20 can still make catalog changes 20: drop table t21; -DROP +DROP TABLE 20: rollback; ROLLBACK @@ -147,7 +147,7 @@ ROLLBACK (1 row) 10: end; -END +COMMIT -- -- gpexpand does not block DMLs or readonly queries to catalogs @@ -169,7 +169,7 @@ BEGIN -- c20 and c30 can still run DMLs 20: insert into t20 values (1,1); -INSERT 1 +INSERT 0 1 20: select * from t20; c1 | c2 ----+---- @@ -181,7 +181,7 @@ UPDATE 1 DELETE 1 30: insert into t30 values (1,1); -INSERT 1 +INSERT 0 1 30: select * from t30; c1 | c2 ----+---- @@ -210,7 +210,7 @@ ROLLBACK ROLLBACK 10: end; -END +COMMIT -- -- catalog changes are disallowed when gpexpand is in progress @@ -246,4 +246,4 @@ ROLLBACK ROLLBACK 10: end; -END +COMMIT diff --git a/src/test/isolation2/expected/insert_root_partition_truncate_deadlock_without_gdd.out b/src/test/isolation2/expected/insert_root_partition_truncate_deadlock_without_gdd.out index 4724a9cf7ef..25834116f2f 100644 --- a/src/test/isolation2/expected/insert_root_partition_truncate_deadlock_without_gdd.out +++ b/src/test/isolation2/expected/insert_root_partition_truncate_deadlock_without_gdd.out @@ -11,7 +11,7 @@ -- without GDD it is running to show that no deadlock happens. create table rank_13652 (id int, year int) partition by range (year) (start (2006) end (2009) every (1)); -CREATE +CREATE TABLE 1: select gp_inject_fault('func_init_plan_end', 'suspend', dbid, current_setting('gp_session_id')::int) from gp_segment_configuration where content = 0 and role = 'p'; gp_inject_fault @@ -35,12 +35,12 @@ select gp_inject_fault('func_init_plan_end', 'reset', dbid) from gp_segment_conf (1 row) 1<: <... completed> -INSERT 30 +INSERT 0 30 2<: <... completed> -TRUNCATE +TRUNCATE TABLE 1q: ... 2q: ... drop table rank_13652; -DROP +DROP TABLE diff --git a/src/test/isolation2/expected/instr_in_shmem_terminate.out b/src/test/isolation2/expected/instr_in_shmem_terminate.out index 2bb0a86a6c9..338f5fd9b9a 100644 --- a/src/test/isolation2/expected/instr_in_shmem_terminate.out +++ b/src/test/isolation2/expected/instr_in_shmem_terminate.out @@ -5,34 +5,34 @@ -- slots in shmem should be recycled correctly. DROP SCHEMA IF EXISTS QUERY_METRICS CASCADE; -DROP +DROP SCHEMA CREATE SCHEMA QUERY_METRICS; -CREATE +CREATE SCHEMA SET SEARCH_PATH=QUERY_METRICS; SET CREATE EXTERNAL WEB TABLE __gp_localid ( localid int ) EXECUTE E'echo $GP_SEGMENT_ID' FORMAT 'TEXT'; -CREATE +CREATE EXTERNAL TABLE GRANT SELECT ON TABLE __gp_localid TO public; GRANT -CREATE EXTERNAL WEB TABLE __gp_masterid ( masterid int ) EXECUTE E'echo $GP_SEGMENT_ID' ON COORDINATOR FORMAT 'TEXT'; -CREATE -GRANT SELECT ON TABLE __gp_masterid TO public; +CREATE EXTERNAL WEB TABLE __gp_coordinatorid ( coordinatorid int ) EXECUTE E'echo $GP_SEGMENT_ID' ON COORDINATOR FORMAT 'TEXT'; +CREATE EXTERNAL TABLE +GRANT SELECT ON TABLE __gp_coordinatorid TO public; GRANT CREATE FUNCTION gp_instrument_shmem_detail_f() RETURNS SETOF RECORD AS '$libdir/gp_instrument_shmem', 'gp_instrument_shmem_detail' LANGUAGE C IMMUTABLE; -CREATE +CREATE FUNCTION GRANT EXECUTE ON FUNCTION gp_instrument_shmem_detail_f() TO public; GRANT -CREATE VIEW gp_instrument_shmem_detail AS WITH all_entries AS ( SELECT C.* FROM __gp_localid, gp_instrument_shmem_detail_f() as C ( tmid int4,ssid int4,ccnt int2,segid int2,pid int4 ,nid int2,tuplecount int8,nloops int8,ntuples int8 ) UNION ALL SELECT C.* FROM __gp_masterid, gp_instrument_shmem_detail_f() as C ( tmid int4,ssid int4,ccnt int2,segid int2,pid int4 ,nid int2,tuplecount int8,nloops int8,ntuples int8 )) SELECT tmid, ssid, ccnt,segid, pid, nid, tuplecount, nloops, ntuples FROM all_entries ORDER BY segid; -CREATE +CREATE VIEW gp_instrument_shmem_detail AS WITH all_entries AS ( SELECT C.* FROM __gp_localid, gp_instrument_shmem_detail_f() as C ( tmid int4,ssid int4,ccnt int2,segid int2,pid int4 ,nid int2,tuplecount int8,nloops int8,ntuples int8 ) UNION ALL SELECT C.* FROM __gp_coordinatorid, gp_instrument_shmem_detail_f() as C ( tmid int4,ssid int4,ccnt int2,segid int2,pid int4 ,nid int2,tuplecount int8,nloops int8,ntuples int8 )) SELECT tmid, ssid, ccnt,segid, pid, nid, tuplecount, nloops, ntuples FROM all_entries ORDER BY segid; +CREATE VIEW CREATE TABLE a (id int, c char) DISTRIBUTED BY (id); -CREATE +CREATE TABLE INSERT INTO a SELECT *, 'a' FROM generate_series(1, 50); -INSERT 50 +INSERT 0 50 SET OPTIMIZER=OFF; SET ANALYZE a; @@ -48,7 +48,7 @@ SELECT count(*) FROM (SELECT 1 FROM gp_instrument_shmem_detail GROUP BY ssid, cc (1 row) CREATE TABLE foo AS SELECT i a, i b FROM generate_series(1, 10) i; -CREATE 10 +SELECT 10 -- this query will be terminated by 'test pg_terminate_backend' 1&:EXPLAIN ANALYZE CREATE TEMP TABLE t1 AS SELECT count(*) FROM QUERY_METRICS.foo WHERE pg_sleep(200) IS NULL; @@ -156,9 +156,9 @@ SELECT count(*) FROM foo, pg_sleep(2); -- test 4: Merge Append should expose plan_node_id for whole plan tree CREATE TABLE QUERY_METRICS.mergeappend_test (a int, b int, x int) DISTRIBUTED BY (a,b); -CREATE +CREATE TABLE INSERT INTO QUERY_METRICS.mergeappend_test SELECT g/100, g/100, g FROM generate_series(1, 500) g; -INSERT 500 +INSERT 0 500 ANALYZE QUERY_METRICS.mergeappend_test; ANALYZE @@ -270,5 +270,5 @@ SELECT count(*) FROM (SELECT 1 FROM gp_instrument_shmem_detail GROUP BY ssid, cc -- start_ignore DROP SCHEMA IF EXISTS QUERY_METRICS CASCADE; -DROP +DROP SCHEMA -- end_ignore diff --git a/src/test/isolation2/expected/invalidated_toast_index.out b/src/test/isolation2/expected/invalidated_toast_index.out index 2c7e974813e..fe1351a4d75 100644 --- a/src/test/isolation2/expected/invalidated_toast_index.out +++ b/src/test/isolation2/expected/invalidated_toast_index.out @@ -5,19 +5,19 @@ -- CREATE TABLE toastable_heap(a text, b varchar, c int); -CREATE +CREATE TABLE -- Force external storage for toasted columns. ALTER TABLE toastable_heap ALTER COLUMN a SET STORAGE EXTERNAL; -ALTER +ALTER TABLE ALTER TABLE toastable_heap ALTER COLUMN b SET STORAGE EXTERNAL; -ALTER +ALTER TABLE -- Insert two values that we know will be toasted. INSERT INTO toastable_heap VALUES(repeat('a',100000), repeat('b',100001), 1); -INSERT 1 +INSERT 0 1 INSERT INTO toastable_heap VALUES(repeat('A',100000), repeat('B',100001), 2); -INSERT 1 +INSERT 0 1 -- start_ignore -- @@ -34,11 +34,7 @@ SET SET SET -*U: UPDATE pg_index - SET indisvalid = false - FROM pg_class heap - WHERE indrelid = heap.reltoastrelid - AND heap.oid = 'toastable_heap'::regclass; +*U: UPDATE pg_index SET indisvalid = false FROM pg_class heap WHERE indrelid = heap.reltoastrelid AND heap.oid = 'toastable_heap'::regclass; UPDATE 1 UPDATE 1 @@ -60,4 +56,4 @@ ERROR: no valid index found for toast relation with Oid 107484 (tuptoaster.c:10 -- Don't leave an unusable table in the DB for others to trip over. DROP TABLE toastable_heap; -DROP +DROP TABLE diff --git a/src/test/isolation2/expected/lockmodes.out b/src/test/isolation2/expected/lockmodes.out index 16635f9618b..01ce04b0582 100644 --- a/src/test/isolation2/expected/lockmodes.out +++ b/src/test/isolation2/expected/lockmodes.out @@ -1,14 +1,14 @@ -- table to just store the master's data directory path on segment. CREATE TABLE lockmodes_datadir(a int, dir text); -CREATE +CREATE TABLE INSERT INTO lockmodes_datadir select 1,datadir from gp_segment_configuration where role='p' and content=-1; -INSERT 1 +INSERT 0 1 1: set optimizer = off; SET create or replace view show_locks_lockmodes as select locktype, mode, granted, relation::regclass from pg_locks where gp_segment_id = -1 and locktype = 'relation' and relation::regclass::text like 't_lockmods%'; -CREATE +CREATE VIEW show gp_enable_global_deadlock_detector; gp_enable_global_deadlock_detector @@ -22,25 +22,25 @@ show gp_enable_global_deadlock_detector; -- 1.1 test for heap tables create table t_lockmods (c int) distributed randomly; -CREATE +CREATE TABLE insert into t_lockmods select * from generate_series(1, 5); -INSERT 5 +INSERT 0 5 analyze t_lockmods; ANALYZE create table t_lockmods1 (c int) distributed randomly; -CREATE +CREATE TABLE create table t_lockmods_rep(c int) distributed replicated; -CREATE +CREATE TABLE -- See github issue: https://github.com/greenplum-db/gpdb/issues/9449 -- upsert may lock tuples on segment, so we should upgrade lock level -- on QD if GDD is disabled. create table t_lockmods_upsert(a int, b int) distributed by (a); -CREATE +CREATE TABLE create unique index uidx_t_lockmodes_upsert on t_lockmods_upsert(a, b); -CREATE +CREATE INDEX -- add analyze to avoid auto vacuum when executing first insert analyze t_lockmods_upsert; ANALYZE @@ -71,7 +71,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -98,7 +98,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -125,7 +125,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -152,7 +152,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -180,7 +180,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -208,7 +208,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -236,7 +236,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -264,7 +264,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (4 rows) 1: abort; -ABORT +ROLLBACK -- 1.1.2 update | delete should hold ExclusiveLock on result relations 1: begin; @@ -277,7 +277,7 @@ UPDATE 5 relation | ExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -289,33 +289,33 @@ DELETE 5 relation | ExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK -- 1.1.3 insert should hold RowExclusiveLock on result relations 1: begin; BEGIN 1: insert into t_lockmods select * from generate_series(1, 5); -INSERT 5 +INSERT 0 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+------------ relation | RowExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK -- 1.1.4 upsert should hold ExclusiveLock on result relations 1: begin; BEGIN 1: insert into t_lockmods_upsert values (1, 1) on conflict(a, b) do update set b = 99; -INSERT 1 +INSERT 0 1 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+---------------+---------+------------------- relation | ExclusiveLock | t | t_lockmods_upsert (1 row) 1: abort; -ABORT +ROLLBACK -- 1.1.5 use cached plan should be consistent with no cached plan 1: prepare select_for_update as select * from t_lockmods for update; @@ -352,7 +352,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -371,7 +371,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -390,7 +390,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -409,65 +409,65 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute update_tlockmods; -EXECUTE 5 +UPDATE 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+---------------+---------+------------ relation | ExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute delete_tlockmods; -EXECUTE 5 +DELETE 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+---------------+---------+------------ relation | ExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute insert_tlockmods; -EXECUTE 5 +INSERT 0 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+------------ relation | RowExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute upsert_tlockmods; -EXECUTE 1 +INSERT 0 1 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+---------------+---------+------------------- relation | ExclusiveLock | t | t_lockmods_upsert (1 row) 1: abort; -ABORT +ROLLBACK -- 1.2 test for AO table create table t_lockmods_ao (c int) with (appendonly=true) distributed randomly; -CREATE +CREATE TABLE insert into t_lockmods_ao select * from generate_series(1, 8); -INSERT 8 +INSERT 0 8 analyze t_lockmods_ao; ANALYZE create table t_lockmods_ao1 (c int) with (appendonly=true) distributed randomly; -CREATE +CREATE TABLE -- 1.2.1 select for (update|share|key share|no key update) should hold ExclusiveLock on range tables 1: begin; @@ -498,7 +498,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -528,7 +528,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -558,7 +558,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -588,7 +588,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -616,7 +616,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -644,7 +644,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -672,7 +672,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -700,7 +700,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (4 rows) 1: abort; -ABORT +ROLLBACK -- 1.2.2 update | delete should hold ExclusiveLock on result relations 1: begin; @@ -713,7 +713,7 @@ UPDATE 8 relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -725,20 +725,20 @@ DELETE 8 relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK -- 1.2.3 insert should hold RowExclusiveLock on result relations 1: begin; BEGIN 1: insert into t_lockmods_ao select * from generate_series(1, 5); -INSERT 5 +INSERT 0 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+--------------- relation | RowExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK -- 1.2.4 use cached plan should be consistent with no cached plan 1: prepare select_for_update_ao as select * from t_lockmods_ao for update; @@ -776,7 +776,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -798,7 +798,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -820,7 +820,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -842,43 +842,43 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute update_tlockmods_ao; -EXECUTE 8 +UPDATE 8 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+---------------+---------+--------------- relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute delete_tlockmods_ao; -EXECUTE 8 +DELETE 8 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+---------------+---------+--------------- relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute insert_tlockmods_ao; -EXECUTE 5 +INSERT 0 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+--------------- relation | RowExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK -- 1.3 With limit clause, such case should -- acquire ExclusiveLock on the whole table and do not generate lockrows node @@ -908,7 +908,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK -- 1.4 For replicated table, we should lock the entire table on ExclusiveLock 1: begin; @@ -931,7 +931,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_rep (2 rows) 1: abort; -ABORT +ROLLBACK -- 1.5 test order-by's plan 1: begin; @@ -962,7 +962,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK -- 1.6 select for update NOWAIT/SKIP LOCKED -- NOWAIT/SKIP LOCKED should not affect the table-level lock @@ -979,7 +979,7 @@ BEGIN (5 rows) 2&: select * from t_lockmods for update nowait; 1: abort; -ABORT +ROLLBACK 2<: <... completed> c --- @@ -1003,7 +1003,7 @@ BEGIN (5 rows) 2&: select * from t_lockmods for update skip locked; 1: abort; -ABORT +ROLLBACK 2<: <... completed> c --- @@ -1026,12 +1026,12 @@ ABORT -- Details: https://groups.google.com/a/greenplum.org/g/gpdb-dev/c/wAPKpJzhbpM -- Issue: https://github.com/greenplum-db/gpdb/issues/13652 1:DROP TABLE IF EXISTS t_lockmods_part_tbl_dml; -DROP +DROP TABLE 1:CREATE TABLE t_lockmods_part_tbl_dml (a int, b int, c int) PARTITION BY RANGE(b) (START(1) END(3) EVERY(1)); -CREATE +CREATE TABLE 1:INSERT INTO t_lockmods_part_tbl_dml SELECT i, 1, i FROM generate_series(1,10)i; -INSERT 10 +INSERT 0 10 -- 1: BEGIN; @@ -1052,7 +1052,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: INSERT INTO t_lockmods_part_tbl_dml SELECT i, 1, i FROM generate_series(1,10)i; -INSERT 10 +INSERT 0 10 -- without GDD, it will lock all leaf partitions on QD 1: select * from show_locks_lockmodes; locktype | mode | granted | relation @@ -1134,8 +1134,8 @@ ROLLBACK -- enable gdd ALTER SYSTEM SET gp_enable_global_deadlock_detector TO on; -ALTER --- Use utility session on seg 0 to restart master. This way avoids the +ALTER SYSTEM +-- Use utility session on seg 0 to restart coordinator. This way avoids the -- situation where session issuing the restart doesn't disappear -- itself. 1U:SELECT pg_ctl(dir, 'restart') from lockmodes_datadir; @@ -1191,7 +1191,7 @@ BEGIN relation | RowShareLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1219,7 +1219,7 @@ BEGIN relation | RowShareLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1247,7 +1247,7 @@ BEGIN relation | RowShareLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1275,7 +1275,7 @@ BEGIN relation | RowShareLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1303,7 +1303,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1331,7 +1331,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1359,7 +1359,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1387,7 +1387,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (4 rows) 1: abort; -ABORT +ROLLBACK -- 2.1.2 update | delete should hold RowExclusiveLock on result relations @@ -1401,7 +1401,7 @@ UPDATE 5 relation | RowExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1413,33 +1413,33 @@ DELETE 5 relation | RowExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK -- 2.1.3 insert should hold RowExclusiveLock on result relations 1: begin; BEGIN 1: insert into t_lockmods select * from generate_series(1, 5); -INSERT 5 +INSERT 0 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+------------ relation | RowExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK -- 2.1.4 upsert should hold RowExclusiveLock on result relations 1: begin; BEGIN 1: insert into t_lockmods_upsert values (1, 1) on conflict(a, b) do update set b = 99; -INSERT 1 +INSERT 0 1 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+------------------- relation | RowExclusiveLock | t | t_lockmods_upsert (1 row) 1: abort; -ABORT +ROLLBACK -- 2.1.5 use cached plan should be consistent with no cached plan 1: prepare select_for_update as select * from t_lockmods for update; @@ -1476,7 +1476,7 @@ BEGIN relation | RowShareLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1495,7 +1495,7 @@ BEGIN relation | RowShareLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1514,7 +1514,7 @@ BEGIN relation | RowShareLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1533,55 +1533,55 @@ BEGIN relation | RowShareLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute update_tlockmods; -EXECUTE 5 +UPDATE 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+------------ relation | RowExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute delete_tlockmods; -EXECUTE 5 +DELETE 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+------------ relation | RowExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute insert_tlockmods; -EXECUTE 5 +INSERT 0 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+------------ relation | RowExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute upsert_tlockmods; -EXECUTE 1 +INSERT 0 1 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+------------------- relation | RowExclusiveLock | t | t_lockmods_upsert (1 row) 1: abort; -ABORT +ROLLBACK -- 2.2 test for AO table @@ -1614,7 +1614,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1644,7 +1644,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1674,7 +1674,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1704,7 +1704,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1732,7 +1732,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1760,7 +1760,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1788,7 +1788,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1816,7 +1816,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (4 rows) 1: abort; -ABORT +ROLLBACK -- 2.2.2 update | delete should hold ExclusiveLock on result relations 1: begin; @@ -1829,7 +1829,7 @@ UPDATE 8 relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1841,20 +1841,20 @@ DELETE 8 relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK -- 2.2.3 insert should hold RowExclusiveLock on result relations 1: begin; BEGIN 1: insert into t_lockmods_ao select * from generate_series(1, 5); -INSERT 5 +INSERT 0 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+--------------- relation | RowExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK -- 2.2.4 use cached plan should be consistent with no cached plan 1: prepare select_for_update_ao as select * from t_lockmods_ao for update; @@ -1892,7 +1892,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1914,7 +1914,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1936,7 +1936,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1958,43 +1958,43 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute update_tlockmods_ao; -EXECUTE 8 +UPDATE 8 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+---------------+---------+--------------- relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute delete_tlockmods_ao; -EXECUTE 8 +DELETE 8 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+---------------+---------+--------------- relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute insert_tlockmods_ao; -EXECUTE 5 +INSERT 0 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+--------------- relation | RowExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK -- 2.3 With limit clause, such case should -- acquire ExclusiveLock on the whole table and do not generate lockrows node @@ -2026,7 +2026,7 @@ BEGIN relation | RowShareLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK -- 2.4 For replicated table, we should lock the entire table on ExclusiveLock 1: begin; @@ -2049,7 +2049,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_rep (2 rows) 1: abort; -ABORT +ROLLBACK -- 2.5 test order-by's plan 1: begin; @@ -2081,7 +2081,7 @@ BEGIN relation | RowShareLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK -- 2.6 select for update NOWAIT/SKIP LOCKED -- with GDD, select for update could be optimized to not upgrade lock. @@ -2123,7 +2123,7 @@ BEGIN 2: select * from t_lockmods for update nowait; ERROR: could not obtain lock on row in relation "t_lockmods" (seg1 slice1 10.140.0.3:7003 pid=15182) 1: abort; -ABORT +ROLLBACK 1q: ... 2q: ... @@ -2199,7 +2199,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: INSERT INTO t_lockmods_part_tbl_dml SELECT i, 1, i FROM generate_series(1,10)i; -INSERT 10 +INSERT 0 10 -- With GDD enabled, QD will only hold lock on root for insert 1: select * from show_locks_lockmodes; locktype | mode | granted | relation @@ -2210,14 +2210,34 @@ INSERT 10 ROLLBACK 1q: ... +1: CREATE TABLE t_lockmods_aopart(i int, t text) USING ao_row PARTITION BY RANGE(i) (START(1) END(5) EVERY(1)); +CREATE TABLE +1: BEGIN; +BEGIN +1: DELETE FROM t_lockmods_aopart WHERE i = 4; +DELETE 0 +-- With GDD enabled, QD will only hold lock on root for delete +1: select * from show_locks_lockmodes; + locktype | mode | granted | relation +----------+-----------------+---------+--------------------------- + relation | ExclusiveLock | t | t_lockmods_aopart_1_prt_4 + relation | AccessShareLock | t | t_lockmods_aopart + relation | ExclusiveLock | t | t_lockmods_aopart +(3 rows) +1: COMMIT; +COMMIT +1: DROP TABLE t_lockmods_aopart; +DROP TABLE +1q: ... + -- 2.8 Verify behaviors of select with locking clause (i.e. select for update) -- when running concurrently with index creation, for Heap tables. -- For AO/CO tables, refer to create_index_allows_readonly.source. 1: CREATE TABLE create_index_select_for_update_tbl(a int, b int); -CREATE +CREATE TABLE 1: INSERT INTO create_index_select_for_update_tbl SELECT i,i FROM generate_series(1,10)i; -INSERT 10 +INSERT 0 10 1: set optimizer = off; SET @@ -2243,7 +2263,7 @@ SET BEGIN -- expect no blocking 2: CREATE INDEX create_index_select_for_update_idx ON create_index_select_for_update_tbl(a); -CREATE +CREATE INDEX 2: COMMIT; COMMIT @@ -2251,12 +2271,12 @@ COMMIT COMMIT 2: DROP INDEX create_index_select_for_update_idx; -DROP +DROP INDEX 2: BEGIN; BEGIN 2: CREATE INDEX create_index_select_for_update_idx ON create_index_select_for_update_tbl(a); -CREATE +CREATE INDEX 1: BEGIN; BEGIN @@ -2275,12 +2295,12 @@ COMMIT COMMIT 2: DROP INDEX create_index_select_for_update_idx; -DROP +DROP INDEX -- 2.8.2 with GDD disabled, expect blocking -- reset gdd 2: ALTER SYSTEM RESET gp_enable_global_deadlock_detector; -ALTER +ALTER SYSTEM -- close session to avoid renew session failure after restart 2q: ... 1U:SELECT pg_ctl(dir, 'restart') from lockmodes_datadir; @@ -2317,17 +2337,17 @@ BEGIN COMMIT 2<: <... completed> -CREATE +CREATE INDEX 2: COMMIT; COMMIT 2: DROP INDEX create_index_select_for_update_idx; -DROP +DROP INDEX 2: BEGIN; BEGIN 2: CREATE INDEX create_index_select_for_update_idx ON create_index_select_for_update_tbl(a); -CREATE +CREATE INDEX 1: BEGIN; BEGIN @@ -2346,7 +2366,7 @@ COMMIT COMMIT 1: drop table lockmodes_datadir; -DROP +DROP TABLE 1q: ... 2q: ... @@ -2355,13 +2375,13 @@ DROP -- require an AccessExclusiveLock. -- Case 1. The analyze result is expected when there's concurrent drop on child. 1:create table analyzedrop(a int) partition by range(a); -CREATE +CREATE TABLE 1:create table analyzedrop_1 partition of analyzedrop for values from (0) to (10); -CREATE +CREATE TABLE 1:create table analyzedrop_2 partition of analyzedrop for values from (10) to (20); -CREATE +CREATE TABLE 1:insert into analyzedrop select * from generate_series(0,19); -INSERT 20 +INSERT 0 20 1:select gp_inject_fault_infinite('merge_leaf_stats_after_find_children', 'suspend', dbid) from gp_segment_configuration where content = -1 and role = 'p'; gp_inject_fault_infinite -------------------------- @@ -2377,7 +2397,7 @@ INSERT 20 1<: <... completed> ANALYZE 2<: <... completed> -DROP +DROP TABLE 3:select * from pg_stats where tablename like 'analyzedrop%'; schemaname | tablename | attname | inherited | null_frac | avg_width | n_distinct | most_common_vals | most_common_freqs | histogram_bounds | correlation | most_common_elems | most_common_elem_freqs | elem_count_histogram ------------+---------------+---------+-----------+-----------+-----------+------------+------------------+-------------------+--------------------------------------------------+-------------+-------------------+------------------------+---------------------- @@ -2401,9 +2421,9 @@ DROP 1<: <... completed> ANALYZE 2<: <... completed> -DROP +DROP TABLE 3<: <... completed> -DROP +DROP TABLE --empty as table is dropped 4:select * from pg_stats where tablename like 'analyzedrop%'; schemaname | tablename | attname | inherited | null_frac | avg_width | n_distinct | most_common_vals | most_common_freqs | histogram_bounds | correlation | most_common_elems | most_common_elem_freqs | elem_count_histogram diff --git a/src/test/isolation2/expected/lockmodes_optimizer.out b/src/test/isolation2/expected/lockmodes_optimizer.out index 9077e116cf2..420b205ad64 100644 --- a/src/test/isolation2/expected/lockmodes_optimizer.out +++ b/src/test/isolation2/expected/lockmodes_optimizer.out @@ -1,14 +1,14 @@ -- table to just store the master's data directory path on segment. CREATE TABLE lockmodes_datadir(a int, dir text); -CREATE +CREATE TABLE INSERT INTO lockmodes_datadir select 1,datadir from gp_segment_configuration where role='p' and content=-1; -INSERT 1 +INSERT 0 1 1: set optimizer = off; SET create or replace view show_locks_lockmodes as select locktype, mode, granted, relation::regclass from pg_locks where gp_segment_id = -1 and locktype = 'relation' and relation::regclass::text like 't_lockmods%'; -CREATE +CREATE VIEW show gp_enable_global_deadlock_detector; gp_enable_global_deadlock_detector @@ -22,25 +22,25 @@ show gp_enable_global_deadlock_detector; -- 1.1 test for heap tables create table t_lockmods (c int) distributed randomly; -CREATE +CREATE TABLE insert into t_lockmods select * from generate_series(1, 5); -INSERT 5 +INSERT 0 5 analyze t_lockmods; ANALYZE create table t_lockmods1 (c int) distributed randomly; -CREATE +CREATE TABLE create table t_lockmods_rep(c int) distributed replicated; -CREATE +CREATE TABLE -- See github issue: https://github.com/greenplum-db/gpdb/issues/9449 -- upsert may lock tuples on segment, so we should upgrade lock level -- on QD if GDD is disabled. create table t_lockmods_upsert(a int, b int) distributed by (a); -CREATE +CREATE TABLE create unique index uidx_t_lockmodes_upsert on t_lockmods_upsert(a, b); -CREATE +CREATE INDEX -- add analyze to avoid auto vacuum when executing first insert analyze t_lockmods_upsert; ANALYZE @@ -71,7 +71,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -98,7 +98,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -125,7 +125,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -152,7 +152,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -180,7 +180,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -208,7 +208,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -236,7 +236,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -264,7 +264,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (4 rows) 1: abort; -ABORT +ROLLBACK -- 1.1.2 update | delete should hold ExclusiveLock on result relations 1: begin; @@ -277,7 +277,7 @@ UPDATE 5 relation | ExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -289,33 +289,33 @@ DELETE 5 relation | ExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK -- 1.1.3 insert should hold RowExclusiveLock on result relations 1: begin; BEGIN 1: insert into t_lockmods select * from generate_series(1, 5); -INSERT 5 +INSERT 0 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+------------ relation | RowExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK -- 1.1.4 upsert should hold ExclusiveLock on result relations 1: begin; BEGIN 1: insert into t_lockmods_upsert values (1, 1) on conflict(a, b) do update set b = 99; -INSERT 1 +INSERT 0 1 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+---------------+---------+------------------- relation | ExclusiveLock | t | t_lockmods_upsert (1 row) 1: abort; -ABORT +ROLLBACK -- 1.1.5 use cached plan should be consistent with no cached plan 1: prepare select_for_update as select * from t_lockmods for update; @@ -352,7 +352,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -371,7 +371,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -390,7 +390,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -409,65 +409,65 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute update_tlockmods; -EXECUTE 5 +UPDATE 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+---------------+---------+------------ relation | ExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute delete_tlockmods; -EXECUTE 5 +DELETE 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+---------------+---------+------------ relation | ExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute insert_tlockmods; -EXECUTE 5 +INSERT 0 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+------------ relation | RowExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute upsert_tlockmods; -EXECUTE 1 +INSERT 0 1 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+---------------+---------+------------------- relation | ExclusiveLock | t | t_lockmods_upsert (1 row) 1: abort; -ABORT +ROLLBACK -- 1.2 test for AO table create table t_lockmods_ao (c int) with (appendonly=true) distributed randomly; -CREATE +CREATE TABLE insert into t_lockmods_ao select * from generate_series(1, 8); -INSERT 8 +INSERT 0 8 analyze t_lockmods_ao; ANALYZE create table t_lockmods_ao1 (c int) with (appendonly=true) distributed randomly; -CREATE +CREATE TABLE -- 1.2.1 select for (update|share|key share|no key update) should hold ExclusiveLock on range tables 1: begin; @@ -498,7 +498,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -528,7 +528,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -558,7 +558,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -588,7 +588,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -616,7 +616,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -644,7 +644,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -672,7 +672,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -700,7 +700,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (4 rows) 1: abort; -ABORT +ROLLBACK -- 1.2.2 update | delete should hold ExclusiveLock on result relations 1: begin; @@ -713,7 +713,7 @@ UPDATE 8 relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -725,20 +725,20 @@ DELETE 8 relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK -- 1.2.3 insert should hold RowExclusiveLock on result relations 1: begin; BEGIN 1: insert into t_lockmods_ao select * from generate_series(1, 5); -INSERT 5 +INSERT 0 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+--------------- relation | RowExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK -- 1.2.4 use cached plan should be consistent with no cached plan 1: prepare select_for_update_ao as select * from t_lockmods_ao for update; @@ -776,7 +776,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -798,7 +798,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -820,7 +820,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -842,43 +842,43 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute update_tlockmods_ao; -EXECUTE 8 +UPDATE 8 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+---------------+---------+--------------- relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute delete_tlockmods_ao; -EXECUTE 8 +DELETE 8 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+---------------+---------+--------------- relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute insert_tlockmods_ao; -EXECUTE 5 +INSERT 0 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+--------------- relation | RowExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK -- 1.3 With limit clause, such case should -- acquire ExclusiveLock on the whole table and do not generate lockrows node @@ -908,7 +908,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK -- 1.4 For replicated table, we should lock the entire table on ExclusiveLock 1: begin; @@ -931,7 +931,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_rep (2 rows) 1: abort; -ABORT +ROLLBACK -- 1.5 test order-by's plan 1: begin; @@ -962,7 +962,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK -- 1.6 select for update NOWAIT/SKIP LOCKED -- NOWAIT/SKIP LOCKED should not affect the table-level lock @@ -979,7 +979,7 @@ BEGIN (5 rows) 2&: select * from t_lockmods for update nowait; 1: abort; -ABORT +ROLLBACK 2<: <... completed> c --- @@ -1003,7 +1003,7 @@ BEGIN (5 rows) 2&: select * from t_lockmods for update skip locked; 1: abort; -ABORT +ROLLBACK 2<: <... completed> c --- @@ -1026,12 +1026,12 @@ ABORT -- Details: https://groups.google.com/a/greenplum.org/g/gpdb-dev/c/wAPKpJzhbpM -- Issue: https://github.com/greenplum-db/gpdb/issues/13652 1:DROP TABLE IF EXISTS t_lockmods_part_tbl_dml; -DROP +DROP TABLE 1:CREATE TABLE t_lockmods_part_tbl_dml (a int, b int, c int) PARTITION BY RANGE(b) (START(1) END(3) EVERY(1)); -CREATE +CREATE TABLE 1:INSERT INTO t_lockmods_part_tbl_dml SELECT i, 1, i FROM generate_series(1,10)i; -INSERT 10 +INSERT 0 10 -- 1: BEGIN; @@ -1052,7 +1052,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: INSERT INTO t_lockmods_part_tbl_dml SELECT i, 1, i FROM generate_series(1,10)i; -INSERT 10 +INSERT 0 10 -- without GDD, it will lock all leaf partitions on QD 1: select * from show_locks_lockmodes; locktype | mode | granted | relation @@ -1135,8 +1135,8 @@ ROLLBACK -- enable gdd ALTER SYSTEM SET gp_enable_global_deadlock_detector TO on; -ALTER --- Use utility session on seg 0 to restart master. This way avoids the +ALTER SYSTEM +-- Use utility session on seg 0 to restart coordinator. This way avoids the -- situation where session issuing the restart doesn't disappear -- itself. 1U:SELECT pg_ctl(dir, 'restart') from lockmodes_datadir; @@ -1192,7 +1192,7 @@ BEGIN relation | RowShareLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1220,7 +1220,7 @@ BEGIN relation | RowShareLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1248,7 +1248,7 @@ BEGIN relation | RowShareLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1276,7 +1276,7 @@ BEGIN relation | RowShareLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1304,7 +1304,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1332,7 +1332,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1360,7 +1360,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1388,7 +1388,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods (4 rows) 1: abort; -ABORT +ROLLBACK -- 2.1.2 update | delete should hold RowExclusiveLock on result relations @@ -1402,7 +1402,7 @@ UPDATE 5 relation | RowExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1414,33 +1414,33 @@ DELETE 5 relation | RowExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK -- 2.1.3 insert should hold RowExclusiveLock on result relations 1: begin; BEGIN 1: insert into t_lockmods select * from generate_series(1, 5); -INSERT 5 +INSERT 0 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+------------ relation | RowExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK -- 2.1.4 upsert should hold RowExclusiveLock on result relations 1: begin; BEGIN 1: insert into t_lockmods_upsert values (1, 1) on conflict(a, b) do update set b = 99; -INSERT 1 +INSERT 0 1 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+------------------- relation | RowExclusiveLock | t | t_lockmods_upsert (1 row) 1: abort; -ABORT +ROLLBACK -- 2.1.5 use cached plan should be consistent with no cached plan 1: prepare select_for_update as select * from t_lockmods for update; @@ -1477,7 +1477,7 @@ BEGIN relation | RowShareLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1496,7 +1496,7 @@ BEGIN relation | RowShareLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1515,7 +1515,7 @@ BEGIN relation | RowShareLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1534,55 +1534,55 @@ BEGIN relation | RowShareLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute update_tlockmods; -EXECUTE 5 +UPDATE 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+------------ relation | RowExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute delete_tlockmods; -EXECUTE 5 +DELETE 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+------------ relation | RowExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute insert_tlockmods; -EXECUTE 5 +INSERT 0 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+------------ relation | RowExclusiveLock | t | t_lockmods (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute upsert_tlockmods; -EXECUTE 1 +INSERT 0 1 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+------------------- relation | RowExclusiveLock | t | t_lockmods_upsert (1 row) 1: abort; -ABORT +ROLLBACK -- 2.2 test for AO table @@ -1615,7 +1615,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1645,7 +1645,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1675,7 +1675,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1705,7 +1705,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (2 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1733,7 +1733,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1761,7 +1761,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1789,7 +1789,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (4 rows) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1817,7 +1817,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (4 rows) 1: abort; -ABORT +ROLLBACK -- 2.2.2 update | delete should hold ExclusiveLock on result relations 1: begin; @@ -1830,7 +1830,7 @@ UPDATE 8 relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1842,20 +1842,20 @@ DELETE 8 relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK -- 2.2.3 insert should hold RowExclusiveLock on result relations 1: begin; BEGIN 1: insert into t_lockmods_ao select * from generate_series(1, 5); -INSERT 5 +INSERT 0 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+--------------- relation | RowExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK -- 2.2.4 use cached plan should be consistent with no cached plan 1: prepare select_for_update_ao as select * from t_lockmods_ao for update; @@ -1893,7 +1893,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1915,7 +1915,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1937,7 +1937,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN @@ -1959,43 +1959,43 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute update_tlockmods_ao; -EXECUTE 8 +UPDATE 8 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+---------------+---------+--------------- relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute delete_tlockmods_ao; -EXECUTE 8 +DELETE 8 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+---------------+---------+--------------- relation | ExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK 1: begin; BEGIN 1: execute insert_tlockmods_ao; -EXECUTE 5 +INSERT 0 5 2: select * from show_locks_lockmodes; locktype | mode | granted | relation ----------+------------------+---------+--------------- relation | RowExclusiveLock | t | t_lockmods_ao (1 row) 1: abort; -ABORT +ROLLBACK -- 2.3 With limit clause, such case should -- acquire ExclusiveLock on the whole table and do not generate lockrows node @@ -2028,7 +2028,7 @@ BEGIN relation | RowShareLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK -- 2.4 For replicated table, we should lock the entire table on ExclusiveLock 1: begin; @@ -2051,7 +2051,7 @@ BEGIN relation | ExclusiveLock | t | t_lockmods_rep (2 rows) 1: abort; -ABORT +ROLLBACK -- 2.5 test order-by's plan 1: begin; @@ -2083,7 +2083,7 @@ BEGIN relation | RowShareLock | t | t_lockmods (2 rows) 1: abort; -ABORT +ROLLBACK -- 2.6 select for update NOWAIT/SKIP LOCKED -- with GDD, select for update could be optimized to not upgrade lock. @@ -2125,7 +2125,7 @@ BEGIN 2: select * from t_lockmods for update nowait; ERROR: could not obtain lock on row in relation "t_lockmods" (seg1 slice1 10.140.0.3:7003 pid=15182) 1: abort; -ABORT +ROLLBACK 1q: ... 2q: ... @@ -2202,7 +2202,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: INSERT INTO t_lockmods_part_tbl_dml SELECT i, 1, i FROM generate_series(1,10)i; -INSERT 10 +INSERT 0 10 -- With GDD enabled, QD will only hold lock on root for insert 1: select * from show_locks_lockmodes; locktype | mode | granted | relation @@ -2214,14 +2214,37 @@ INSERT 10 ROLLBACK 1q: ... +1: CREATE TABLE t_lockmods_aopart(i int, t text) USING ao_row PARTITION BY RANGE(i) (START(1) END(5) EVERY(1)); +CREATE TABLE +1: BEGIN; +BEGIN +1: DELETE FROM t_lockmods_aopart WHERE i = 4; +DELETE 0 +-- With GDD enabled, QD will only hold lock on root for delete +1: select * from show_locks_lockmodes; + locktype | mode | granted | relation +----------+-----------------+---------+--------------------------- + relation | AccessShareLock | t | t_lockmods_aopart_1_prt_4 + relation | AccessShareLock | t | t_lockmods_aopart_1_prt_3 + relation | AccessShareLock | t | t_lockmods_aopart_1_prt_2 + relation | AccessShareLock | t | t_lockmods_aopart_1_prt_1 + relation | AccessShareLock | t | t_lockmods_aopart + relation | ExclusiveLock | t | t_lockmods_aopart +(6 rows) +1: COMMIT; +COMMIT +1: DROP TABLE t_lockmods_aopart; +DROP TABLE +1q: ... + -- 2.8 Verify behaviors of select with locking clause (i.e. select for update) -- when running concurrently with index creation, for Heap tables. -- For AO/CO tables, refer to create_index_allows_readonly.source. 1: CREATE TABLE create_index_select_for_update_tbl(a int, b int); -CREATE +CREATE TABLE 1: INSERT INTO create_index_select_for_update_tbl SELECT i,i FROM generate_series(1,10)i; -INSERT 10 +INSERT 0 10 1: set optimizer = off; SET @@ -2247,7 +2270,7 @@ SET BEGIN -- expect no blocking 2: CREATE INDEX create_index_select_for_update_idx ON create_index_select_for_update_tbl(a); -CREATE +CREATE INDEX 2: COMMIT; COMMIT @@ -2255,12 +2278,12 @@ COMMIT COMMIT 2: DROP INDEX create_index_select_for_update_idx; -DROP +DROP INDEX 2: BEGIN; BEGIN 2: CREATE INDEX create_index_select_for_update_idx ON create_index_select_for_update_tbl(a); -CREATE +CREATE INDEX 1: BEGIN; BEGIN @@ -2279,12 +2302,12 @@ COMMIT COMMIT 2: DROP INDEX create_index_select_for_update_idx; -DROP +DROP INDEX -- 2.8.2 with GDD disabled, expect blocking -- reset gdd 2: ALTER SYSTEM RESET gp_enable_global_deadlock_detector; -ALTER +ALTER SYSTEM -- close session to avoid renew session failure after restart 2q: ... 1U:SELECT pg_ctl(dir, 'restart') from lockmodes_datadir; @@ -2321,17 +2344,17 @@ BEGIN COMMIT 2<: <... completed> -CREATE +CREATE INDEX 2: COMMIT; COMMIT 2: DROP INDEX create_index_select_for_update_idx; -DROP +DROP INDEX 2: BEGIN; BEGIN 2: CREATE INDEX create_index_select_for_update_idx ON create_index_select_for_update_tbl(a); -CREATE +CREATE INDEX 1: BEGIN; BEGIN @@ -2350,7 +2373,7 @@ COMMIT COMMIT 1: drop table lockmodes_datadir; -DROP +DROP TABLE 1q: ... 2q: ... @@ -2359,13 +2382,13 @@ DROP -- require an AccessExclusiveLock. -- Case 1. The analyze result is expected when there's concurrent drop on child. 1:create table analyzedrop(a int) partition by range(a); -CREATE +CREATE TABLE 1:create table analyzedrop_1 partition of analyzedrop for values from (0) to (10); -CREATE +CREATE TABLE 1:create table analyzedrop_2 partition of analyzedrop for values from (10) to (20); -CREATE +CREATE TABLE 1:insert into analyzedrop select * from generate_series(0,19); -INSERT 20 +INSERT 0 20 1:select gp_inject_fault_infinite('merge_leaf_stats_after_find_children', 'suspend', dbid) from gp_segment_configuration where content = -1 and role = 'p'; gp_inject_fault_infinite -------------------------- @@ -2381,7 +2404,7 @@ INSERT 20 1<: <... completed> ANALYZE 2<: <... completed> -DROP +DROP TABLE 3:select * from pg_stats where tablename like 'analyzedrop%'; schemaname | tablename | attname | inherited | null_frac | avg_width | n_distinct | most_common_vals | most_common_freqs | histogram_bounds | correlation | most_common_elems | most_common_elem_freqs | elem_count_histogram ------------+---------------+---------+-----------+-----------+-----------+------------+------------------+-------------------+--------------------------------------------------+-------------+-------------------+------------------------+---------------------- @@ -2405,9 +2428,9 @@ DROP 1<: <... completed> ANALYZE 2<: <... completed> -DROP +DROP TABLE 3<: <... completed> -DROP +DROP TABLE --empty as table is dropped 4:select * from pg_stats where tablename like 'analyzedrop%'; schemaname | tablename | attname | inherited | null_frac | avg_width | n_distinct | most_common_vals | most_common_freqs | histogram_bounds | correlation | most_common_elems | most_common_elem_freqs | elem_count_histogram diff --git a/src/test/isolation2/expected/mark_all_aoseg_await_drop.out b/src/test/isolation2/expected/mark_all_aoseg_await_drop.out index 2bdc65236f8..77c1b98655c 100644 --- a/src/test/isolation2/expected/mark_all_aoseg_await_drop.out +++ b/src/test/isolation2/expected/mark_all_aoseg_await_drop.out @@ -3,7 +3,7 @@ -- AOSEG_STATE_AWAITING_DROP. CREATE TABLE mark_all_aoseg_await_drop (a int) WITH (appendonly=true); -CREATE +CREATE TABLE -- Create 3 aoseg entries 0: BEGIN; @@ -13,11 +13,11 @@ BEGIN 2: BEGIN; BEGIN 0: INSERT INTO mark_all_aoseg_await_drop SELECT i FROM generate_series(1, 10)i; -INSERT 10 +INSERT 0 10 1: INSERT INTO mark_all_aoseg_await_drop SELECT i FROM generate_series(1, 10)i; -INSERT 10 +INSERT 0 10 2: INSERT INTO mark_all_aoseg_await_drop SELECT i FROM generate_series(1, 10)i; -INSERT 10 +INSERT 0 10 0: COMMIT; COMMIT 1: COMMIT; @@ -46,11 +46,11 @@ BEGIN 0 (1 row) 1: INSERT INTO mark_all_aoseg_await_drop SELECT i FROM generate_series(1, 10)i; -INSERT 10 +INSERT 0 10 2: VACUUM mark_all_aoseg_await_drop; VACUUM 1: END; -END +COMMIT -- We should see segno 2 and 3 in state 2 (AOSEG_STATE_AWAITING_DROP) -- and segno 1 and 4 in state 1 (AOSEG_STATE_DEFAULT). Segno 1 is not diff --git a/src/test/isolation2/expected/misc.out b/src/test/isolation2/expected/misc.out index c342e1ad5cf..bf65cf90c4a 100644 --- a/src/test/isolation2/expected/misc.out +++ b/src/test/isolation2/expected/misc.out @@ -5,11 +5,11 @@ -- preassigned in QD, if we create a table in utility mode in QE, the oid might -- conflict with preassigned oid. -1U: create table utilitymode_primary_key_tab (c1 int); -CREATE +CREATE TABLE -1U: create unique index idx_utilitymode_c1 on utilitymode_primary_key_tab (c1); -CREATE +CREATE INDEX -1U: drop table utilitymode_primary_key_tab; -DROP +DROP TABLE -- Try a few queries in utility mode. (Once upon a time, there was a bug that -- caused a crash on EXPLAIN ANALYZE on a Sort node in utility mode.) @@ -62,7 +62,7 @@ ROLLBACK -- -- We have changed the name to pg_temp_0 in utility mode. 0U: CREATE TEMP TABLE utilitymode_tmp_tab (c1 int) DISTRIBUTED BY (c1); -CREATE +CREATE TABLE 0U: SELECT substring(n.nspname FROM 1 FOR 9) FROM pg_namespace n JOIN pg_class c ON n.oid = c.relnamespace WHERE c.relname = 'utilitymode_tmp_tab'; substring ----------- @@ -78,7 +78,7 @@ CREATE -- gp_dist_random('') should not crash in utility mode -- create or replace view misc_v as select 1; -CREATE +CREATE VIEW 0U: select 1 from gp_dist_random('misc_v') union select 1 from misc_v; ?column? ---------- @@ -91,12 +91,12 @@ CREATE (1 row) -- But views created in utility mode should not throw away gp_dist_random 0U: create or replace view misc_v2 as select 1 from gp_dist_random('pg_class'); -CREATE +CREATE VIEW 0U: select definition from pg_views where viewname = 'misc_v2'; definition ------------------------------------------------------------- - FROM gp_dist_random('pg_class'); SELECT 1 AS "?column?" + FROM gp_dist_random('pg_class'); (1 row) 0U: select count(*) > 0 from gp_dist_random('misc_v2'); ?column? @@ -104,7 +104,7 @@ CREATE t (1 row) 0U: drop view misc_v2; -DROP +DROP VIEW drop view misc_v; DROP diff --git a/src/test/isolation2/expected/modify_table_data_corrupt.out b/src/test/isolation2/expected/modify_table_data_corrupt.out index e5fc04f36f0..3af5476081e 100644 --- a/src/test/isolation2/expected/modify_table_data_corrupt.out +++ b/src/test/isolation2/expected/modify_table_data_corrupt.out @@ -24,18 +24,18 @@ DROP -- and see if it is motioned to other segments. create table tab1(a int, b int) distributed by (b); -CREATE +CREATE TABLE create table tab2(a int, b int) distributed by (a); -CREATE +CREATE TABLE create table tab3 (a int, b int) distributed by (b); -CREATE +CREATE TABLE insert into tab1 values (1, 1); -INSERT 1 +INSERT 0 1 insert into tab2 values (1, 1); -INSERT 1 +INSERT 0 1 insert into tab3 values (1, 1); -INSERT 1 +INSERT 0 1 analyze tab1; ANALYZE @@ -56,7 +56,7 @@ update pg_class set reltuples = 100000 where relname='tab3'; UPDATE 1 0U: insert into tab1 values (1, 1); -INSERT 1 +INSERT 0 1 select gp_segment_id, * from tab1; gp_segment_id | a | b @@ -90,7 +90,7 @@ BEGIN delete from tab1 using tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.b; ERROR: distribution key of the tuple (0, 1) doesn't belong to current segment (actually from seg0) (nodeModifyTable.c:735) (seg1 127.0.1.1:7003 pid=89508) (nodeModifyTable.c:735) abort; -ABORT +ROLLBACK -- For planner, this will error out explain (costs off) update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.b; @@ -117,7 +117,7 @@ BEGIN update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.b; ERROR: distribution key of the tuple (0, 1) doesn't belong to current segment (actually from seg0) (nodeModifyTable.c:1276) (seg1 127.0.1.1:7003 pid=89508) (nodeModifyTable.c:1276) abort; -ABORT +ROLLBACK -- For orca, this will error out explain (costs off) delete from tab1 using tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.a; @@ -144,7 +144,7 @@ BEGIN delete from tab1 using tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.a; DELETE 2 abort; -ABORT +ROLLBACK -- For orca, this will error out explain (costs off) update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.a; @@ -171,7 +171,7 @@ BEGIN update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.a; UPDATE 2 abort; -ABORT +ROLLBACK -- test splitupdate. -- For orca, the plan contains a redistribute motion, so that @@ -193,11 +193,11 @@ BEGIN update tab1 set b = b + 1; UPDATE 2 abort; -ABORT +ROLLBACK drop table tab1; -DROP +DROP TABLE drop table tab2; -DROP +DROP TABLE drop table tab3; -DROP +DROP TABLE diff --git a/src/test/isolation2/expected/modify_table_data_corrupt_optimizer.out b/src/test/isolation2/expected/modify_table_data_corrupt_optimizer.out index 2a647497cb0..b60bb24315b 100644 --- a/src/test/isolation2/expected/modify_table_data_corrupt_optimizer.out +++ b/src/test/isolation2/expected/modify_table_data_corrupt_optimizer.out @@ -24,18 +24,18 @@ ERROR: table "tab3" does not exist -- and see if it is motioned to other segments. create table tab1(a int, b int) distributed by (b); -CREATE +CREATE TABLE create table tab2(a int, b int) distributed by (a); -CREATE +CREATE TABLE create table tab3 (a int, b int) distributed by (b); -CREATE +CREATE TABLE insert into tab1 values (1, 1); -INSERT 1 +INSERT 0 1 insert into tab2 values (1, 1); -INSERT 1 +INSERT 0 1 insert into tab3 values (1, 1); -INSERT 1 +INSERT 0 1 analyze tab1; ANALYZE @@ -56,7 +56,7 @@ update pg_class set reltuples = 100000 where relname='tab3'; UPDATE 1 0U: insert into tab1 values (1, 1); -INSERT 1 +INSERT 0 1 select gp_segment_id, * from tab1; gp_segment_id | a | b @@ -90,7 +90,7 @@ BEGIN delete from tab1 using tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.b; DELETE 1 abort; -ABORT +ROLLBACK -- For planner, this will error out explain (costs off) update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.b; @@ -110,14 +110,14 @@ explain (costs off) update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2. -> Seq Scan on tab3 -> Hash -> Seq Scan on tab1 - Optimizer: Pivotal Optimizer (GPORCA) + Optimizer: Pivotal Optimizer (GPORCA) version 3.86.0 (15 rows) begin; BEGIN update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.b; UPDATE 1 abort; -ABORT +ROLLBACK -- For orca, this will error out explain (costs off) delete from tab1 using tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.a; @@ -145,7 +145,7 @@ BEGIN delete from tab1 using tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.a; ERROR: distribution key of the tuple (0, 1) doesn't belong to current segment (actually from seg0) (nodeModifyTable.c:1156) (seg1 172.17.0.2:7003 pid=30251) (nodeModifyTable.c:1156) abort; -ABORT +ROLLBACK -- For orca, this will error out explain (costs off) update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.a; @@ -166,14 +166,14 @@ explain (costs off) update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2. -> Hash -> Broadcast Motion 3:3 (slice3; segments: 3) -> Seq Scan on tab1 - Optimizer: Pivotal Optimizer (GPORCA) + Optimizer: Pivotal Optimizer (GPORCA) version 3.86.0 (16 rows) begin; BEGIN update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.a; ERROR: distribution key of the tuple (0, 1) doesn't belong to current segment (actually from seg0) (nodeModifyTable.c:1156) (seg1 172.17.0.2:7003 pid=30251) (nodeModifyTable.c:1156) abort; -ABORT +ROLLBACK -- test splitupdate. -- For orca, the plan contains a redistribute motion, so that @@ -199,11 +199,11 @@ BEGIN update tab1 set b = b + 1; ERROR: distribution key of the tuple (0, 1) doesn't belong to current segment (actually from seg0) (nodeModifyTable.c:1156) (seg1 172.17.0.2:7003 pid=30251) (nodeModifyTable.c:1156) abort; -ABORT +ROLLBACK drop table tab1; -DROP +DROP TABLE drop table tab2; -DROP +DROP TABLE drop table tab3; -DROP +DROP TABLE diff --git a/src/test/isolation2/expected/orphan_temp_table.out b/src/test/isolation2/expected/orphan_temp_table.out index 85cbe165008..a47a7642748 100644 --- a/src/test/isolation2/expected/orphan_temp_table.out +++ b/src/test/isolation2/expected/orphan_temp_table.out @@ -3,7 +3,7 @@ -- case 1: Before the fix, when backend process panic on the segment, the temp table will be left on the coordinator. -- create a temp table 1: CREATE TEMP TABLE test_temp_table_cleanup(a int); -CREATE +CREATE TABLE -- panic on segment 0 1: SELECT gp_inject_fault('before_exec_scan', 'panic', dbid) FROM gp_segment_configuration WHERE role='p' AND content = 0; diff --git a/src/test/isolation2/expected/pg_rewind_fail_missing_xlog.out b/src/test/isolation2/expected/pg_rewind_fail_missing_xlog.out index 845dc6be0f7..9520545d094 100644 --- a/src/test/isolation2/expected/pg_rewind_fail_missing_xlog.out +++ b/src/test/isolation2/expected/pg_rewind_fail_missing_xlog.out @@ -9,9 +9,9 @@ CREATE OR REPLACE FUNCTION connectSeg(n int, port int, hostname text) RETURNS bo CREATE CREATE TABLE tst_missing_tbl (a int); -CREATE +CREATE TABLE INSERT INTO tst_missing_tbl values(2),(1),(5); -INSERT 3 +INSERT 0 3 -- Make the test faster by not preserving any extra wal segment files !\retcode gpconfig -c wal_keep_size -v 0; @@ -42,7 +42,7 @@ CHECKPOINT t (1 row) 1: INSERT INTO tst_missing_tbl values(2),(1),(5); -INSERT 3 +INSERT 0 3 -- Should be not needed mostly but let's 100% ensure since pg_switch_wal() -- won't switch if it has been on the boundary (seldom though). 0U: SELECT pg_switch_wal is not null FROM pg_switch_wal(); @@ -51,7 +51,7 @@ INSERT 3 t (1 row) 1: INSERT INTO tst_missing_tbl values(2),(1),(5); -INSERT 3 +INSERT 0 3 0Uq: ... -- Make sure primary/mirror pair is in sync, otherwise FTS can't promote mirror @@ -87,9 +87,9 @@ CHECKPOINT 0M: BEGIN; BEGIN 0M: DROP TABLE tst_missing_tbl; -DROP +DROP TABLE 0M: ABORT; -ABORT +ROLLBACK 0M: CHECKPOINT; CHECKPOINT 0Mq: ... @@ -99,7 +99,7 @@ CHECKPOINT -- know that a wal divergence is explicitly triggered and 100% completed. Also -- sanity check the tuple distribution (assumption of the test). 2: INSERT INTO tst_missing_tbl values(2),(1),(5); -INSERT 3 +INSERT 0 3 2: SELECT gp_segment_id, count(*) from tst_missing_tbl group by gp_segment_id; gp_segment_id | count ---------------+------- @@ -147,21 +147,21 @@ CHECKPOINT t (1 row) 3: INSERT INTO tst_missing_tbl values(2),(1),(5); -INSERT 3 +INSERT 0 3 1U: SELECT pg_switch_wal is not null FROM pg_switch_wal(); ?column? ---------- t (1 row) 3: INSERT INTO tst_missing_tbl values(2),(1),(5); -INSERT 3 +INSERT 0 3 1U: SELECT pg_switch_wal is not null FROM pg_switch_wal(); ?column? ---------- t (1 row) 3: INSERT INTO tst_missing_tbl values(2),(1),(5); -INSERT 3 +INSERT 0 3 -- Should be not needed mostly but let's 100% ensure since pg_switch_wal() -- won't switch if it is on the boundary already (seldom though). 1U: SELECT pg_switch_wal is not null FROM pg_switch_wal(); @@ -170,7 +170,7 @@ INSERT 3 t (1 row) 3: INSERT INTO tst_missing_tbl values(2),(1),(5); -INSERT 3 +INSERT 0 3 -- Hang at checkpointer before writing checkpoint xlog. 3: SELECT gp_inject_fault('checkpoint_after_redo_calculated', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content = 1; @@ -226,7 +226,7 @@ INSERT 3 (2 rows) 4: INSERT INTO tst_missing_tbl values(2),(1),(5); -INSERT 3 +INSERT 0 3 4: SELECT gp_segment_id, count(*) from tst_missing_tbl group by gp_segment_id; gp_segment_id | count ---------------+------- @@ -284,7 +284,7 @@ CHECKPOINT t (1 row) 1: INSERT INTO tst_missing_tbl values(2),(1),(5); -INSERT 3 +INSERT 0 3 -- Should be not needed mostly but let's 100% ensure since pg_switch_wal() -- won't switch if it has been on the boundary (seldom though). 0U: SELECT pg_switch_wal is not null FROM pg_switch_wal(); @@ -293,7 +293,7 @@ INSERT 3 t (1 row) 1: INSERT INTO tst_missing_tbl values(2),(1),(5); -INSERT 3 +INSERT 0 3 0Uq: ... -- Make sure primary/mirror pair is in sync, otherwise FTS can't promote mirror @@ -327,17 +327,17 @@ INSERT 3 0M: BEGIN; BEGIN 0M: DROP TABLE tst_missing_tbl; -DROP +DROP TABLE 0M: ABORT; -ABORT +ROLLBACK 0M: CHECKPOINT; CHECKPOINT 0M: BEGIN; BEGIN 0M: DROP TABLE tst_missing_tbl; -DROP +DROP TABLE 0M: ABORT; -ABORT +ROLLBACK 0M: CHECKPOINT; CHECKPOINT @@ -368,7 +368,7 @@ CHECKPOINT -- know that a wal divergence is explicitly triggered and 100% completed. Also -- sanity check the tuple distribution (assumption of the test). 2: INSERT INTO tst_missing_tbl values(2),(1),(5); -INSERT 3 +INSERT 0 3 2: SELECT gp_segment_id, count(*) from tst_missing_tbl group by gp_segment_id; gp_segment_id | count ---------------+------- @@ -416,7 +416,7 @@ CHECKPOINT t (1 row) 3: INSERT INTO tst_missing_tbl values(2),(1),(5); -INSERT 3 +INSERT 0 3 -- Should be not needed mostly but let's 100% ensure since pg_switch_wal() -- won't switch if it is on the boundary already (seldom though). 1U: SELECT pg_switch_wal is not null FROM pg_switch_wal(); @@ -425,7 +425,7 @@ INSERT 3 t (1 row) 3: INSERT INTO tst_missing_tbl values(2),(1),(5); -INSERT 3 +INSERT 0 3 -- Have primary/mirror pair in sync before suspending the wal sender. 3: SELECT wait_until_all_segments_synchronized(); @@ -497,7 +497,7 @@ INSERT 3 -- Write something on the current primary 4: INSERT INTO tst_missing_tbl values(2),(1),(5); -INSERT 3 +INSERT 0 3 4: SELECT gp_segment_id, count(*) from tst_missing_tbl group by gp_segment_id; gp_segment_id | count ---------------+------- @@ -550,9 +550,9 @@ server closed the connection unexpectedly -- Create an unlogged table on the primary that remembers replication slot's last restart_lsn and number of WAL files. 1U: CREATE UNLOGGED TABLE unlogged_wal_retention_test(restart_lsn_before pg_lsn, wal_count_before int); -CREATE +CREATE TABLE 1U: INSERT INTO unlogged_wal_retention_test SELECT (select restart_lsn FROM pg_replication_slots WHERE slot_name = 'internal_wal_replication_slot') as restart_lsn_before, (select count(*) from pg_ls_waldir()) as wal_count_before; -INSERT 1 +INSERT 0 1 5: CHECKPOINT; CHECKPOINT -- Replication slot's restart_lsn should advance to the checkpoint's redo location. @@ -582,7 +582,7 @@ UPDATE 1 t (1 row) 5: INSERT INTO tst_missing_tbl values(2),(1),(5); -INSERT 3 +INSERT 0 3 -- Replication slot's restart_lsn should NOT change regardless mirror has received more wals. 1U: select pg_wal_lsn_diff(restart_lsn, restart_lsn_before) = 0 FROM pg_replication_slots, unlogged_wal_retention_test WHERE slot_name = 'internal_wal_replication_slot'; ?column? @@ -624,7 +624,7 @@ CHECKPOINT (1 row) -- Perform transaction to make sure wals are in sync. 5: INSERT INTO tst_missing_tbl values(2),(1),(5); -INSERT 3 +INSERT 0 3 -- Replication slot's restart_lsn should now advance to the checkpoint's redo location. 1U: SELECT pg_wal_lsn_diff(restart_lsn, restart_lsn_before) > 0 from pg_replication_slots, unlogged_wal_retention_test WHERE slot_name = 'internal_wal_replication_slot'; ?column? @@ -648,13 +648,11 @@ CHECKPOINT -- Cleanup 1U: DROP TABLE unlogged_wal_retention_test; -DROP +DROP TABLE 1Uq: ... 5: DROP TABLE tst_missing_tbl; -DROP -5: DROP FUNCTION connectSeg; -DROP +DROP TABLE !\retcode gpconfig -r wal_keep_size; (exited with code 0) !\retcode gpconfig -r wal_recycle; diff --git a/src/test/isolation2/expected/pg_terminate_backend.out b/src/test/isolation2/expected/pg_terminate_backend.out index d3bb0b2ad2b..fc1e6f1d42c 100644 --- a/src/test/isolation2/expected/pg_terminate_backend.out +++ b/src/test/isolation2/expected/pg_terminate_backend.out @@ -1,5 +1,5 @@ 1:create table terminate_backend_t (a int) distributed by (a); -CREATE +CREATE TABLE -- fault on seg1 to block insert command into terminate_backend_t table select gp_inject_fault('heap_insert', 'infinite_loop', '', '', 'terminate_backend_t', 1, 1, 0, dbid) from gp_segment_configuration where content = 1 and role = 'p'; diff --git a/src/test/isolation2/expected/pg_views_concurrent_drop.out b/src/test/isolation2/expected/pg_views_concurrent_drop.out index aa3c86fc664..a2187664711 100644 --- a/src/test/isolation2/expected/pg_views_concurrent_drop.out +++ b/src/test/isolation2/expected/pg_views_concurrent_drop.out @@ -1,7 +1,7 @@ 1:drop view if exists concurrent_drop_view cascade; -DROP +DROP VIEW 1:create view concurrent_drop_view as select * from pg_class; -CREATE +CREATE VIEW 1:select viewname from pg_views where viewname = 'concurrent_drop_view'; viewname ---------------------- @@ -19,7 +19,7 @@ CREATE 1:begin; BEGIN 1:drop view concurrent_drop_view; -DROP +DROP VIEW 2&:select viewname, definition from pg_views where viewname = 'concurrent_drop_view'; -- wait till halts for AccessShareLock on QD 3: SELECT wait_until_waiting_for_required_lock('concurrent_drop_view', 'AccessShareLock', -1); diff --git a/src/test/isolation2/expected/prepare_limit.out b/src/test/isolation2/expected/prepare_limit.out index c9479617b27..8e188a57252 100644 --- a/src/test/isolation2/expected/prepare_limit.out +++ b/src/test/isolation2/expected/prepare_limit.out @@ -18,13 +18,13 @@ (exited with code 0) 5: create table prepare_limit1 (a int); -CREATE +CREATE TABLE 5: create table prepare_limit2 (a int); -CREATE +CREATE TABLE 5: create table prepare_limit3 (a int); -CREATE +CREATE TABLE 5: create table prepare_limit4 (a int); -CREATE +CREATE TABLE 5: select gp_inject_fault_infinite('dtm_before_insert_forget_comitted', 'suspend', 1); gp_inject_fault_infinite @@ -64,13 +64,13 @@ CREATE Success: (1 row) 1<: <... completed> -INSERT 1 +INSERT 0 1 2<: <... completed> -INSERT 1 +INSERT 0 1 3<: <... completed> -INSERT 1 +INSERT 0 1 4<: <... completed> -INSERT 1 +INSERT 0 1 -- verify that standby is correctly wal streaming. 5: select state from pg_stat_replication; @@ -104,13 +104,13 @@ INSERT 1 -- cleanup 5: drop table prepare_limit1; -DROP +DROP TABLE 5: drop table prepare_limit2; -DROP +DROP TABLE 5: drop table prepare_limit3; -DROP +DROP TABLE 5: drop table prepare_limit4; -DROP +DROP TABLE -- Not using gpconfig -r, else it makes max_prepared_transactions be default -- (50) and some isolation2 tests will fail due to "too many clients". Hardcode diff --git a/src/test/isolation2/expected/prepared_xact_deadlock_pg_rewind.out b/src/test/isolation2/expected/prepared_xact_deadlock_pg_rewind.out index cb59647769e..56838af0657 100644 --- a/src/test/isolation2/expected/prepared_xact_deadlock_pg_rewind.out +++ b/src/test/isolation2/expected/prepared_xact_deadlock_pg_rewind.out @@ -5,9 +5,9 @@ -- start_ignore -- set GUCs to speed-up the test 1: alter system set gp_fts_probe_retries to 2; -ALTER +ALTER SYSTEM 1: alter system set gp_fts_probe_timeout to 5; -ALTER +ALTER SYSTEM 1: select pg_reload_conf(); pg_reload_conf ---------------- @@ -109,9 +109,9 @@ select wait_until_all_segments_synchronized(); -- start_ignore -- reset fts GUCs. 3: alter system reset gp_fts_probe_retries; -ALTER +ALTER SYSTEM 3: alter system reset gp_fts_probe_timeout; -ALTER +ALTER SYSTEM 3: select pg_reload_conf(); pg_reload_conf ---------------- diff --git a/src/test/isolation2/expected/prevent_ao_wal.out b/src/test/isolation2/expected/prevent_ao_wal.out index 314d5b20440..6b76c11c8c8 100644 --- a/src/test/isolation2/expected/prevent_ao_wal.out +++ b/src/test/isolation2/expected/prevent_ao_wal.out @@ -23,9 +23,9 @@ GP_IGNORE: defined new match expression -- Create tables (AO, AOCO) -1U: CREATE TABLE ao_foo (n int) WITH (appendonly=true); -CREATE +CREATE TABLE -1U: CREATE TABLE aoco_foo (n int, m int) WITH (appendonly=true, orientation=column); -CREATE +CREATE TABLE -- Switch WAL file -1U: SELECT true FROM pg_switch_wal(); @@ -35,10 +35,10 @@ CREATE (1 row) -- Insert data (AO) -1U: INSERT INTO ao_foo SELECT generate_series(1,10); -INSERT 10 +INSERT 0 10 -- Insert data (AOCO) -1U: INSERT INTO aoco_foo SELECT generate_series(1,10), generate_series(1,10); -INSERT 10 +INSERT 0 10 -- Delete data and run vacuum (AO) -1U: DELETE FROM ao_foo WHERE n > 5; DELETE 5 @@ -93,10 +93,10 @@ rmgr: Appendonly len (rec/tot): 50/ 50, tx: ##, lsn: #/########, prev #/ (1 row) -- Insert data (AO) -1U: INSERT INTO ao_foo SELECT generate_series(1,10); -INSERT 10 +INSERT 0 10 -- Insert data (AOCO) -1U: INSERT INTO aoco_foo SELECT generate_series(1,10), generate_series(1,10); -INSERT 10 +INSERT 0 10 -- Delete data and run vacuum (AO) -1U: DELETE FROM ao_foo WHERE n > 5; DELETE 5 @@ -113,9 +113,9 @@ VACUUM -1U: DROP TABLE ao_foo; -DROP +DROP TABLE -1U: DROP TABLE aoco_foo; -DROP +DROP TABLE -- Reset wal_level !\retcode gpconfig -r wal_level --masteronly; diff --git a/src/test/isolation2/expected/reader_waits_for_lock.out b/src/test/isolation2/expected/reader_waits_for_lock.out index df9cf7de46f..98c8763ef00 100644 --- a/src/test/isolation2/expected/reader_waits_for_lock.out +++ b/src/test/isolation2/expected/reader_waits_for_lock.out @@ -4,17 +4,17 @@ -- setup CREATE or REPLACE FUNCTION check_readers_are_blocked () RETURNS bool AS $$ declare retries int; /* in func */ begin retries := 1200; /* in func */ loop if (SELECT count(*) > 0 as reader_waits from pg_locks l join pg_stat_activity a on a.pid = l.pid and a.query like '%reader_waits_for_lock_table%' and not a.pid = pg_backend_pid() and l.granted = false and l.mppiswriter = false) then return true; /* in func */ end if; /* in func */ if retries <= 0 then return false; /* in func */ end if; /* in func */ perform pg_sleep(0.1); /* in func */ perform pg_stat_clear_snapshot(); /* in func */ retries := retries - 1; /* in func */ end loop; /* in func */ end; /* in func */ $$ language plpgsql; -CREATE +CREATE FUNCTION 1: create table reader_waits_for_lock_table(a int, b int) distributed by (a); -CREATE +CREATE TABLE 1: insert into reader_waits_for_lock_table select 1, 1; -INSERT 1 +INSERT 0 1 -- Aquire a conflicting lock in utility mode on seg0. 0U: BEGIN; BEGIN 0U: LOCK reader_waits_for_lock_table IN ACCESS EXCLUSIVE MODE; -LOCK +LOCK TABLE -- A utility mode connection should not have valid gp_session_id, else -- locks aquired by it may not confict with locks requested by a -- normal mode backend. diff --git a/src/test/isolation2/expected/reindex.out b/src/test/isolation2/expected/reindex.out index a9eac24f660..b166a6ad591 100644 --- a/src/test/isolation2/expected/reindex.out +++ b/src/test/isolation2/expected/reindex.out @@ -1,6 +1,6 @@ -- Dropping table while reindex database should not fail reindex CREATE DATABASE reindexdb1 TEMPLATE template1; -CREATE +CREATE DATABASE -- halt reindex after scanning the pg_class and getting the relids SELECT gp_inject_fault_infinite('reindex_db', 'suspend', 1); gp_inject_fault_infinite @@ -8,7 +8,7 @@ SELECT gp_inject_fault_infinite('reindex_db', 'suspend', 1); Success: (1 row) 1:@db_name reindexdb1: CREATE TABLE heap1(a INT, b INT); -CREATE +CREATE TABLE 1&:REINDEX DATABASE reindexdb1; SELECT gp_wait_until_triggered_fault('reindex_db', 1, 1); gp_wait_until_triggered_fault @@ -16,7 +16,7 @@ SELECT gp_wait_until_triggered_fault('reindex_db', 1, 1); Success: (1 row) 2:@db_name reindexdb1:DROP TABLE heap1; -DROP +DROP TABLE SELECT gp_inject_fault('reindex_db', 'reset', 1); gp_inject_fault ----------------- @@ -33,11 +33,11 @@ REINDEX BEGIN; BEGIN CREATE TABLE reindex_index1(a int, b int); -CREATE +CREATE TABLE CREATE INDEX reindex_index1_idx1 on reindex_index1 (b); -CREATE +CREATE INDEX insert into reindex_index1 select i,i+1 from generate_series(1, 10)i; -INSERT 10 +INSERT 0 10 COMMIT; COMMIT SELECT gp_inject_fault_infinite('reindex_relation', 'suspend', 1); @@ -53,7 +53,7 @@ SELECT gp_wait_until_triggered_fault('reindex_relation', 1, 1); (1 row) -- create one more index CREATE INDEX reindex_index1_idx2 on reindex_index1 (a); -CREATE +CREATE INDEX SELECT gp_inject_fault('reindex_relation', 'reset', 1); gp_inject_fault ----------------- @@ -63,4 +63,4 @@ SELECT gp_inject_fault('reindex_relation', 'reset', 1); REINDEX DROP DATABASE reindexdb1; -DROP +DROP DATABASE diff --git a/src/test/isolation2/expected/reindex/abort_reindex.out b/src/test/isolation2/expected/reindex/abort_reindex.out index ad5c7d1cef2..3a8e2a73701 100644 --- a/src/test/isolation2/expected/reindex/abort_reindex.out +++ b/src/test/isolation2/expected/reindex/abort_reindex.out @@ -1,18 +1,18 @@ DROP TABLE IF EXISTS reindex_abort_ao; -DROP +DROP TABLE CREATE TABLE reindex_abort_ao (a INT) WITH (appendonly=true, orientation=column); -CREATE +CREATE TABLE insert into reindex_abort_ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 create index idx_btree_reindex_abort_ao on reindex_abort_ao(a); -CREATE +CREATE INDEX -- start_ignore drop table if exists reindex_abort_ao_old; -DROP +DROP TABLE create table reindex_abort_ao_old as (select oid as c_oid, gp_segment_id as c_gp_segment_id, relfilenode as c_relfilenode from pg_class where relname = 'idx_btree_reindex_abort_ao' union all select oid as c_oid, gp_segment_id as c_gp_segment_id, relfilenode as c_relfilenode from gp_dist_random('pg_class') where relname = 'idx_btree_reindex_abort_ao'); -CREATE 4 +SELECT 4 -- end_ignore select 1 as have_same_number_of_rows from reindex_abort_ao_old where c_gp_segment_id > -1 group by c_oid having count(*) = (select count(*) from gp_segment_configuration where role = 'p' and content > -1); diff --git a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_ao_bitmap.out b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_ao_bitmap.out index ebda60ec248..707f70681c5 100644 --- a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_ao_bitmap.out +++ b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_ao_bitmap.out @@ -1,14 +1,14 @@ DROP TABLE IF EXISTS reindex_crtab_ao_bitmap; -DROP +DROP TABLE CREATE TABLE reindex_crtab_ao_bitmap (a INT) WITH (appendonly=true); -CREATE +CREATE TABLE insert into reindex_crtab_ao_bitmap select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into reindex_crtab_ao_bitmap select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 create index idx_reindex_crtab_ao_bitmap on reindex_crtab_ao_bitmap USING bitmap(a); -CREATE +CREATE INDEX select 1 as oid_same_on_all_segs from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_ao_bitmap' group by oid having count(*) = (select count(*) from gp_segment_configuration where role='p' and content > -1); oid_same_on_all_segs ---------------------- @@ -30,6 +30,8 @@ REINDEX CREATE 1: COMMIT; COMMIT +2<: <... completed> +CREATE INDEX 2: COMMIT; COMMIT 3: SELECT 1 AS oid_same_on_all_segs from gp_dist_random('pg_class') WHERE relname = 'idx_reindex_crtab_ao_bitmap' GROUP BY oid having count(*) = (SELECT count(*) FROM gp_segment_configuration WHERE role='p' AND content > -1); diff --git a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_ao_btree.out b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_ao_btree.out index 65b18f1147e..d9edc941eec 100644 --- a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_ao_btree.out +++ b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_ao_btree.out @@ -1,14 +1,14 @@ DROP TABLE IF EXISTS reindex_crtab_ao_btree; -DROP +DROP TABLE CREATE TABLE reindex_crtab_ao_btree (a INT) WITH (appendonly=true); -CREATE +CREATE TABLE insert into reindex_crtab_ao_btree select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into reindex_crtab_ao_btree select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 create index idx_reindex_crtab_ao_btree on reindex_crtab_ao_btree(a); -CREATE +CREATE INDEX select 1 as oid_same_on_all_segs from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_ao_btree' group by oid having count(*) = (select count(*) from gp_segment_configuration where role='p' and content > -1); oid_same_on_all_segs ---------------------- @@ -30,6 +30,8 @@ REINDEX CREATE 1: COMMIT; COMMIT +2<: <... completed> +CREATE INDEX 2: COMMIT; COMMIT 3: SELECT 1 AS oid_same_on_all_segs from gp_dist_random('pg_class') WHERE relname = 'idx_reindex_crtab_ao_btree' GROUP BY oid having count(*) = (SELECT count(*) FROM gp_segment_configuration WHERE role='p' AND content > -1); diff --git a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_aoco_bitmap.out b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_aoco_bitmap.out index 4f7b6f9bd23..5fab7906557 100644 --- a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_aoco_bitmap.out +++ b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_aoco_bitmap.out @@ -1,14 +1,14 @@ DROP TABLE IF EXISTS reindex_crtab_aoco_bitmap; -DROP +DROP TABLE CREATE TABLE reindex_crtab_aoco_bitmap (a INT) WITH (appendonly=true, orientation=column); -CREATE +CREATE TABLE insert into reindex_crtab_aoco_bitmap select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into reindex_crtab_aoco_bitmap select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 create index idx_reindex_crtab_aoco_bitmap on reindex_crtab_aoco_bitmap USING bitmap(a); -CREATE +CREATE INDEX select 1 as oid_same_on_all_segs from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_aoco_bitmap' group by oid having count(*) = (select count(*) from gp_segment_configuration where role='p' and content > -1); oid_same_on_all_segs ---------------------- @@ -30,6 +30,8 @@ REINDEX CREATE 1: COMMIT; COMMIT +2<: <... completed> +CREATE INDEX 2: COMMIT; COMMIT 3: SELECT 1 AS oid_same_on_all_segs from gp_dist_random('pg_class') WHERE relname = 'idx_reindex_crtab_aoco_bitmap' GROUP BY oid having count(*) = (SELECT count(*) FROM gp_segment_configuration WHERE role='p' AND content > -1); diff --git a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_aoco_btree.out b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_aoco_btree.out index c7a70b9ccd0..639ed4c0553 100644 --- a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_aoco_btree.out +++ b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_aoco_btree.out @@ -1,14 +1,14 @@ DROP TABLE IF EXISTS reindex_crtab_aoco_btree; -DROP +DROP TABLE CREATE TABLE reindex_crtab_aoco_btree (a INT) WITH (appendonly=true, orientation=column); -CREATE +CREATE TABLE insert into reindex_crtab_aoco_btree select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into reindex_crtab_aoco_btree select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 create index idx_reindex_crtab_aoco_btree on reindex_crtab_aoco_btree(a); -CREATE +CREATE INDEX select 1 as oid_same_on_all_segs from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_aoco_btree' group by oid having count(*) = (select count(*) from gp_segment_configuration where role='p' and content > -1); oid_same_on_all_segs ---------------------- @@ -30,6 +30,8 @@ REINDEX CREATE 1: COMMIT; COMMIT +2<: <... completed> +CREATE INDEX 2: COMMIT; COMMIT 3: SELECT 1 AS oid_same_on_all_segs from gp_dist_random('pg_class') WHERE relname = 'idx_reindex_crtab_aoco_btree' GROUP BY oid having count(*) = (SELECT count(*) FROM gp_segment_configuration WHERE role='p' AND content > -1); diff --git a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_heap_bitmap.out b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_heap_bitmap.out index 15bbec1e6a7..2ae538a7b0c 100644 --- a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_heap_bitmap.out +++ b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_heap_bitmap.out @@ -1,14 +1,14 @@ DROP TABLE IF EXISTS reindex_crtab_heap_bitmap; -DROP +DROP TABLE CREATE TABLE reindex_crtab_heap_bitmap (a INT); -CREATE +CREATE TABLE insert into reindex_crtab_heap_bitmap select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into reindex_crtab_heap_bitmap select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 create index idx_reindex_crtab_heap_bitmap on reindex_crtab_heap_bitmap(a); -CREATE +CREATE INDEX select 1 as oid_same_on_all_segs from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_heap_bitmap' group by oid having count(*) = (select count(*) from gp_segment_configuration where role='p' and content > -1); oid_same_on_all_segs ---------------------- @@ -27,7 +27,7 @@ BEGIN 1: REINDEX index idx_reindex_crtab_heap_bitmap; REINDEX 2: create index idx_reindex_crtab_heap_bitmap2 on reindex_crtab_heap_bitmap(a); -CREATE +CREATE INDEX 1: COMMIT; COMMIT 2: COMMIT; diff --git a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_heap_btree.out b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_heap_btree.out index 7bae7e1d940..a25e65b12ea 100644 --- a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_heap_btree.out +++ b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_heap_btree.out @@ -1,14 +1,14 @@ DROP TABLE IF EXISTS reindex_crtab_heap_btree; -DROP +DROP TABLE CREATE TABLE reindex_crtab_heap_btree (a INT); -CREATE +CREATE TABLE insert into reindex_crtab_heap_btree select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into reindex_crtab_heap_btree select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 create index idx_reindex_crtab_heap_btree on reindex_crtab_heap_btree(a); -CREATE +CREATE INDEX select 1 as oid_same_on_all_segs from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_heap_btree' group by oid having count(*) = (select count(*) from gp_segment_configuration where role='p' and content > -1); oid_same_on_all_segs ---------------------- @@ -27,7 +27,7 @@ BEGIN 1: REINDEX index idx_reindex_crtab_heap_btree; REINDEX 2: create index idx_reindex_crtab_heap_btree2 on reindex_crtab_heap_btree(a); -CREATE +CREATE INDEX 1: COMMIT; COMMIT 2: COMMIT; diff --git a/src/test/isolation2/expected/reindex/reindextable_while_addpart_ao_part_btree.out b/src/test/isolation2/expected/reindex/reindextable_while_addpart_ao_part_btree.out index cefd1bf866b..8da2942791f 100644 --- a/src/test/isolation2/expected/reindex/reindextable_while_addpart_ao_part_btree.out +++ b/src/test/isolation2/expected/reindex/reindextable_while_addpart_ao_part_btree.out @@ -1,24 +1,24 @@ DROP TABLE IF EXISTS reindex_crtabforadd_part_ao_btree; -DROP +DROP TABLE CREATE TABLE reindex_crtabforadd_part_ao_btree (id int, date date, amt decimal(10,2)) with (appendonly=true) DISTRIBUTED BY (id) PARTITION BY RANGE (date) ( PARTITION sales_Jul13 START (date '2013-07-01') INCLUSIVE , PARTITION sales_Aug13 START (date '2013-08-01') INCLUSIVE , PARTITION sales_Sep13 START (date '2013-09-01') INCLUSIVE END (date '2014-01-01') EXCLUSIVE ); -CREATE +CREATE TABLE Insert into reindex_crtabforadd_part_ao_btree select i, to_date('2013-07-'||i, 'YYYY/MM/DD') , 19.21+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 Insert into reindex_crtabforadd_part_ao_btree select i, to_date('2013-08-'||i, 'YYYY/MM/DD') , 9.31+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 Insert into reindex_crtabforadd_part_ao_btree select i, to_date('2013-09-'||i, 'YYYY/MM/DD') , 12.25+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 Insert into reindex_crtabforadd_part_ao_btree select i, to_date('2013-11-'||i, 'YYYY/MM/DD') , 29.51+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 create index on reindex_crtabforadd_part_ao_btree(id); -CREATE +CREATE INDEX -- start_ignore create table before_reindex_crtabforadd_part_ao_btree as select oid as c_oid, gp_segment_id as c_gp_segment_id, relfilenode as c_relfilenode, relname as c_relname from gp_dist_random('pg_class') where relname like 'reindex_crtabforadd_part_ao_btree%_idx'; -CREATE 12 +SELECT 12 -- end_ignore select c_relname, 1 as have_same_number_of_rows from before_reindex_crtabforadd_part_ao_btree group by c_oid, c_relname having count(*) = (select count(*) from gp_segment_configuration where role = 'p' and content > -1); @@ -38,7 +38,7 @@ DELETE 8 1: BEGIN; BEGIN 1: LOCK reindex_crtabforadd_part_ao_btree IN ACCESS EXCLUSIVE MODE; -LOCK +LOCK TABLE 2&: REINDEX TABLE reindex_crtabforadd_part_ao_btree; 3&: alter table reindex_crtabforadd_part_ao_btree add partition p1 START (date '2013-05-01') INCLUSIVE with(appendonly=true); 1: COMMIT; @@ -46,9 +46,9 @@ COMMIT 2<: <... completed> REINDEX 3<: <... completed> -ALTER +ALTER TABLE 3: Insert into reindex_crtabforadd_part_ao_btree values(9,'2013-05-22',14.22); -INSERT 1 +INSERT 0 1 3: select count(*) from reindex_crtabforadd_part_ao_btree where id = 29; count ------- diff --git a/src/test/isolation2/expected/reindex/reindextable_while_addpart_aoco_part_btree.out b/src/test/isolation2/expected/reindex/reindextable_while_addpart_aoco_part_btree.out index 89d80f1a9fb..6db2d2d1c52 100644 --- a/src/test/isolation2/expected/reindex/reindextable_while_addpart_aoco_part_btree.out +++ b/src/test/isolation2/expected/reindex/reindextable_while_addpart_aoco_part_btree.out @@ -1,24 +1,24 @@ DROP TABLE IF EXISTS reindex_crtabforadd_part_aoco_btree; -DROP +DROP TABLE CREATE TABLE reindex_crtabforadd_part_aoco_btree (id int, date date, amt decimal(10,2)) with (appendonly=true, orientation=column) DISTRIBUTED BY (id) PARTITION BY RANGE (date) ( PARTITION sales_Jul13 START (date '2013-07-01') INCLUSIVE , PARTITION sales_Aug13 START (date '2013-08-01') INCLUSIVE , PARTITION sales_Sep13 START (date '2013-09-01') INCLUSIVE END (date '2014-01-01') EXCLUSIVE ); -CREATE +CREATE TABLE Insert into reindex_crtabforadd_part_aoco_btree select i, to_date('2013-07-'||i, 'YYYY/MM/DD') , 19.21+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 Insert into reindex_crtabforadd_part_aoco_btree select i, to_date('2013-08-'||i, 'YYYY/MM/DD') , 9.31+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 Insert into reindex_crtabforadd_part_aoco_btree select i, to_date('2013-09-'||i, 'YYYY/MM/DD') , 12.25+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 Insert into reindex_crtabforadd_part_aoco_btree select i, to_date('2013-11-'||i, 'YYYY/MM/DD') , 29.51+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 create index on reindex_crtabforadd_part_aoco_btree(id); -CREATE +CREATE INDEX -- start_ignore create table before_reindex_crtabforadd_part_aoco_btree as select oid as c_oid, gp_segment_id as c_gp_segment_id, relfilenode as c_relfilenode, relname as c_relname from gp_dist_random('pg_class') where relname like 'reindex_crtabforadd_part_aoco_btree%_idx'; -CREATE 12 +SELECT 12 -- end_ignore select c_relname, 1 as have_same_number_of_rows from before_reindex_crtabforadd_part_aoco_btree group by c_oid, c_relname having count(*) = (select count(*) from gp_segment_configuration where role = 'p' and content > -1); @@ -38,7 +38,7 @@ DELETE 8 1: BEGIN; BEGIN 1: LOCK reindex_crtabforadd_part_aoco_btree IN ACCESS EXCLUSIVE MODE; -LOCK +LOCK TABLE 2&: REINDEX TABLE reindex_crtabforadd_part_aoco_btree; 3&: alter table reindex_crtabforadd_part_aoco_btree add default partition part_others with(appendonly=true, orientation=column); 1: COMMIT; @@ -46,9 +46,9 @@ COMMIT 2<: <... completed> REINDEX 3<: <... completed> -ALTER +ALTER TABLE 3: Insert into reindex_crtabforadd_part_aoco_btree values(29,'2013-04-22',12.52); -INSERT 1 +INSERT 0 1 3: select count(*) from reindex_crtabforadd_part_aoco_btree where id = 29; count ------- diff --git a/src/test/isolation2/expected/reindex/reindextable_while_addpart_heap_part_btree.out b/src/test/isolation2/expected/reindex/reindextable_while_addpart_heap_part_btree.out index eed3c9c024e..49ed4aa95b4 100644 --- a/src/test/isolation2/expected/reindex/reindextable_while_addpart_heap_part_btree.out +++ b/src/test/isolation2/expected/reindex/reindextable_while_addpart_heap_part_btree.out @@ -1,24 +1,24 @@ DROP TABLE IF EXISTS reindex_crtabforadd_part_heap_btree; -DROP +DROP TABLE CREATE TABLE reindex_crtabforadd_part_heap_btree (id int, date date, amt decimal(10,2)) DISTRIBUTED BY (id) PARTITION BY RANGE (date) ( PARTITION sales_Jul13 START (date '2013-07-01') INCLUSIVE , PARTITION sales_Aug13 START (date '2013-08-01') INCLUSIVE , PARTITION sales_Sep13 START (date '2013-09-01') INCLUSIVE END (date '2014-01-01') EXCLUSIVE ); -CREATE +CREATE TABLE Insert into reindex_crtabforadd_part_heap_btree select i, to_date('2013-07-'||i, 'YYYY/MM/DD') , 19.21+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 Insert into reindex_crtabforadd_part_heap_btree select i, to_date('2013-08-'||i, 'YYYY/MM/DD') , 9.31+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 Insert into reindex_crtabforadd_part_heap_btree select i, to_date('2013-09-'||i, 'YYYY/MM/DD') , 12.25+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 Insert into reindex_crtabforadd_part_heap_btree select i, to_date('2013-11-'||i, 'YYYY/MM/DD') , 29.51+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 create index on reindex_crtabforadd_part_heap_btree(id); -CREATE +CREATE INDEX -- start_ignore create table before_reindex_crtabforadd_part_heap_btree as select oid as c_oid, gp_segment_id as c_gp_segment_id, relfilenode as c_relfilenode, relname as c_relname from gp_dist_random('pg_class') where relname like 'reindex_crtabforadd_part_heap_btree%_idx'; -CREATE 12 +SELECT 12 -- end_ignore select c_relname, 1 as have_same_number_of_rows from before_reindex_crtabforadd_part_heap_btree group by c_oid, c_relname having count(*) = (select count(*) from gp_segment_configuration where role = 'p' and content > -1); @@ -38,7 +38,7 @@ DELETE 8 1: BEGIN; BEGIN 1: LOCK reindex_crtabforadd_part_heap_btree IN ACCESS EXCLUSIVE MODE; -LOCK +LOCK TABLE 2&: REINDEX TABLE reindex_crtabforadd_part_heap_btree; 3&: alter table reindex_crtabforadd_part_heap_btree add partition new_p START (date '2013-06-01') INCLUSIVE ; 1: COMMIT; @@ -46,9 +46,9 @@ COMMIT 2<: <... completed> REINDEX 3<: <... completed> -ALTER +ALTER TABLE 3: Insert into reindex_crtabforadd_part_heap_btree values(29,'2013-06-09',14.20); -INSERT 1 +INSERT 0 1 3: select count(*) from reindex_crtabforadd_part_heap_btree where id = 29; count ------- diff --git a/src/test/isolation2/expected/reindex/reindextable_while_altertable_ao_part_btree.out b/src/test/isolation2/expected/reindex/reindextable_while_altertable_ao_part_btree.out index 52b56766ec5..d467c88c2b6 100644 --- a/src/test/isolation2/expected/reindex/reindextable_while_altertable_ao_part_btree.out +++ b/src/test/isolation2/expected/reindex/reindextable_while_altertable_ao_part_btree.out @@ -1,24 +1,24 @@ DROP TABLE IF EXISTS reindex_crtabforalter_part_ao_btree; -DROP +DROP TABLE CREATE TABLE reindex_crtabforalter_part_ao_btree (id int, date date, amt decimal(10,2)) with (appendonly=true) DISTRIBUTED BY (id) PARTITION BY RANGE (date) ( PARTITION sales_Jul13 START (date '2013-07-01') INCLUSIVE , PARTITION sales_Aug13 START (date '2013-08-01') INCLUSIVE , PARTITION sales_Sep13 START (date '2013-09-01') INCLUSIVE END (date '2014-01-01') EXCLUSIVE ); -CREATE +CREATE TABLE Insert into reindex_crtabforalter_part_ao_btree select i, to_date('2013-07-'||i, 'YYYY/MM/DD') , 19.21+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 Insert into reindex_crtabforalter_part_ao_btree select i, to_date('2013-08-'||i, 'YYYY/MM/DD') , 9.31+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 Insert into reindex_crtabforalter_part_ao_btree select i, to_date('2013-09-'||i, 'YYYY/MM/DD') , 12.25+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 Insert into reindex_crtabforalter_part_ao_btree select i, to_date('2013-11-'||i, 'YYYY/MM/DD') , 29.51+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 create index on reindex_crtabforalter_part_ao_btree(id); -CREATE +CREATE INDEX -- start_ignore create table before_reindex_crtabforalter_part_ao_btree as select oid as c_oid, gp_segment_id as c_gp_segment_id, relfilenode as c_relfilenode, relname as c_relname from gp_dist_random('pg_class') where relname like 'reindex_crtabforalter_part_ao_btree%_idx'; -CREATE 12 +SELECT 12 -- end_ignore select c_relname, 1 as have_same_number_of_rows from before_reindex_crtabforalter_part_ao_btree group by c_oid, c_relname having count(*) = (select count(*) from gp_segment_configuration where role = 'p' and content > -1); @@ -38,7 +38,7 @@ DELETE 8 1: BEGIN; BEGIN 1: LOCK reindex_crtabforalter_part_ao_btree IN ACCESS EXCLUSIVE MODE; -LOCK +LOCK TABLE 2&: REINDEX TABLE reindex_crtabforalter_part_ao_btree; 3&: alter table reindex_crtabforalter_part_ao_btree drop column amt; 1: COMMIT; @@ -46,7 +46,7 @@ COMMIT 2<: <... completed> REINDEX 3<: <... completed> -ALTER +ALTER TABLE 3: select count(*) from reindex_crtabforalter_part_ao_btree where id = 29; count ------- diff --git a/src/test/isolation2/expected/reindex/reindextable_while_altertable_aoco_part_btree.out b/src/test/isolation2/expected/reindex/reindextable_while_altertable_aoco_part_btree.out index 6780e137f16..00a6b537348 100644 --- a/src/test/isolation2/expected/reindex/reindextable_while_altertable_aoco_part_btree.out +++ b/src/test/isolation2/expected/reindex/reindextable_while_altertable_aoco_part_btree.out @@ -1,24 +1,24 @@ DROP TABLE IF EXISTS reindex_crtabforalter_part_aoco_btree; -DROP +DROP TABLE CREATE TABLE reindex_crtabforalter_part_aoco_btree (id int, date date, amt decimal(10,2)) with (appendonly=true, orientation=column) DISTRIBUTED BY (id) PARTITION BY RANGE (date) ( PARTITION sales_Jul13 START (date '2013-07-01') INCLUSIVE , PARTITION sales_Aug13 START (date '2013-08-01') INCLUSIVE , PARTITION sales_Sep13 START (date '2013-09-01') INCLUSIVE END (date '2014-01-01') EXCLUSIVE ); -CREATE +CREATE TABLE Insert into reindex_crtabforalter_part_aoco_btree select i, to_date('2013-07-'||i, 'YYYY/MM/DD') , 19.21+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 Insert into reindex_crtabforalter_part_aoco_btree select i, to_date('2013-08-'||i, 'YYYY/MM/DD') , 9.31+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 Insert into reindex_crtabforalter_part_aoco_btree select i, to_date('2013-09-'||i, 'YYYY/MM/DD') , 12.25+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 Insert into reindex_crtabforalter_part_aoco_btree select i, to_date('2013-11-'||i, 'YYYY/MM/DD') , 29.51+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 create index on reindex_crtabforalter_part_aoco_btree(id); -CREATE +CREATE INDEX -- start_ignore create table before_reindex_crtabforalter_part_aoco_btree as select oid as c_oid, gp_segment_id as c_gp_segment_id, relfilenode as c_relfilenode, relname as c_relname from gp_dist_random('pg_class') where relname like 'reindex_crtabforalter_part_aoco_btree%_idx'; -CREATE 12 +SELECT 12 -- end_ignore select c_relname, 1 as have_same_number_of_rows from before_reindex_crtabforalter_part_aoco_btree group by c_oid, c_relname having count(*) = (select count(*) from gp_segment_configuration where role = 'p' and content > -1); @@ -38,7 +38,7 @@ DELETE 8 1: BEGIN; BEGIN 1: LOCK reindex_crtabforalter_part_aoco_btree IN ACCESS EXCLUSIVE MODE; -LOCK +LOCK TABLE 2&: REINDEX TABLE reindex_crtabforalter_part_aoco_btree; 3&: alter table reindex_crtabforalter_part_aoco_btree drop column amt; 1: COMMIT; @@ -46,7 +46,7 @@ COMMIT 2<: <... completed> REINDEX 3<: <... completed> -ALTER +ALTER TABLE 3: select count(*) from reindex_crtabforalter_part_aoco_btree where id = 29; count ------- diff --git a/src/test/isolation2/expected/reindex/reindextable_while_altertable_heap_part_btree.out b/src/test/isolation2/expected/reindex/reindextable_while_altertable_heap_part_btree.out index 0a63be4559e..975e2c93301 100644 --- a/src/test/isolation2/expected/reindex/reindextable_while_altertable_heap_part_btree.out +++ b/src/test/isolation2/expected/reindex/reindextable_while_altertable_heap_part_btree.out @@ -1,24 +1,24 @@ DROP TABLE IF EXISTS reindex_crtabforalter_part_heap_btree; -DROP +DROP TABLE CREATE TABLE reindex_crtabforalter_part_heap_btree (id int, date date, amt decimal(10,2)) DISTRIBUTED BY (id) PARTITION BY RANGE (date) ( PARTITION sales_Jul13 START (date '2013-07-01') INCLUSIVE , PARTITION sales_Aug13 START (date '2013-08-01') INCLUSIVE , PARTITION sales_Sep13 START (date '2013-09-01') INCLUSIVE END (date '2014-01-01') EXCLUSIVE ); -CREATE +CREATE TABLE Insert into reindex_crtabforalter_part_heap_btree select i, to_date('2013-07-'||i, 'YYYY/MM/DD') , 19.21+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 Insert into reindex_crtabforalter_part_heap_btree select i, to_date('2013-08-'||i, 'YYYY/MM/DD') , 9.31+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 Insert into reindex_crtabforalter_part_heap_btree select i, to_date('2013-09-'||i, 'YYYY/MM/DD') , 12.25+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 Insert into reindex_crtabforalter_part_heap_btree select i, to_date('2013-11-'||i, 'YYYY/MM/DD') , 29.51+i from generate_series(10,30) i; -INSERT 21 +INSERT 0 21 create index on reindex_crtabforalter_part_heap_btree(id); -CREATE +CREATE INDEX -- start_ignore create table before_reindex_crtabforalter_part_heap_btree as select oid as c_oid, gp_segment_id as c_gp_segment_id, relfilenode as c_relfilenode, relname as c_relname from gp_dist_random('pg_class') where relname like 'reindex_crtabforalter_part_heap_btree%_idx'; -CREATE 12 +SELECT 12 -- end_ignore select c_relname, 1 as have_same_number_of_rows from before_reindex_crtabforalter_part_heap_btree group by c_oid, c_relname having count(*) = (select count(*) from gp_segment_configuration where role = 'p' and content > -1); @@ -38,7 +38,7 @@ DELETE 8 1: BEGIN; BEGIN 1: LOCK reindex_crtabforalter_part_heap_btree IN ACCESS EXCLUSIVE MODE; -LOCK +LOCK TABLE 2&: REINDEX TABLE reindex_crtabforalter_part_heap_btree; 3&: alter table reindex_crtabforalter_part_heap_btree drop column amt; 1: COMMIT; @@ -46,7 +46,7 @@ COMMIT 2<: <... completed> REINDEX 3<: <... completed> -ALTER +ALTER TABLE 3: select count(*) from reindex_crtabforalter_part_heap_btree where id = 29; count ------- diff --git a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_ao_bitmap.out b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_ao_bitmap.out index ed48fe86ad7..c78baee8b1e 100644 --- a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_ao_bitmap.out +++ b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_ao_bitmap.out @@ -1,14 +1,14 @@ DROP TABLE IF EXISTS reindex_crtab_ao_bitmap; -DROP +DROP TABLE CREATE TABLE reindex_crtab_ao_bitmap (a INT) WITH (appendonly=true); -CREATE +CREATE TABLE insert into reindex_crtab_ao_bitmap select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into reindex_crtab_ao_bitmap select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 create index idx_reindex_crtab_ao_bitmap on reindex_crtab_ao_bitmap USING BITMAP(a); -CREATE +CREATE INDEX -- @Description Ensures that a reindex table during reindex index operations is ok -- @@ -17,7 +17,7 @@ DELETE 254 1: BEGIN; BEGIN 1: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_ao_bitmap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_reindex_crtab_ao_bitmap'); -CREATE 4 +SELECT 4 2: BEGIN; BEGIN 1: REINDEX index idx_reindex_crtab_ao_bitmap; @@ -32,7 +32,7 @@ REINDEX -- validates that reindex command in session 1 indeed generates new -- relfilenode for the index. 1: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_ao_bitmap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_reindex_crtab_ao_bitmap'); -INSERT 4 +INSERT 0 4 -- Expect two distinct relfilenodes per segment in old_relfilenodes table. 1: select distinct count(distinct relfilenode), relname from old_relfilenodes group by dbid, relname; count | relname @@ -44,7 +44,7 @@ COMMIT -- After session 2 commits, the relfilenode it assigned to the index -- is visible to session 1. 1: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_ao_bitmap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_reindex_crtab_ao_bitmap'); -INSERT 4 +INSERT 0 4 -- Expect three distinct relfilenodes per segment in old_relfilenodes table. 1: select distinct count(distinct relfilenode), relname from old_relfilenodes group by dbid, relname; count | relname diff --git a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_ao_part_btree.out b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_ao_part_btree.out index d17fb59d8fd..f9864b0f48b 100644 --- a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_ao_part_btree.out +++ b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_ao_part_btree.out @@ -1,14 +1,14 @@ DROP TABLE IF EXISTS reindex_crtab_part_ao_btree; -DROP +DROP TABLE CREATE TABLE reindex_crtab_part_ao_btree ( id INTEGER, owner VARCHAR, description VARCHAR, property BOX, poli POLYGON, target CIRCLE, v VARCHAR, t TEXT, f FLOAT, p POINT, c CIRCLE, filler VARCHAR DEFAULT 'Big data is difficult to work with using most relational database management systems and desktop statistics and visualization packages, requiring instead massively parallel software running on tens, hundreds, or even thousands of servers.What is considered big data varies depending on the capabilities of the organization managing the set, and on the capabilities of the applications.This is here just to take up space so that we use more pages of data and sequential scans take a lot more time. ') with (appendonly=true) DISTRIBUTED BY (id) PARTITION BY RANGE (id) ( PARTITION p_one START('1') INCLUSIVE END ('10') EXCLUSIVE, DEFAULT PARTITION de_fault ); -CREATE +CREATE TABLE insert into reindex_crtab_part_ao_btree (id, owner, description, property, poli, target) select i, 'user' || i, 'Testing GiST Index', '((3, 1300), (33, 1330))','( (22,660), (57, 650), (68, 660) )', '( (76, 76), 76)' from generate_series(1,1000) i ; -INSERT 1000 +INSERT 0 1000 insert into reindex_crtab_part_ao_btree (id, owner, description, property, poli, target) select i, 'user' || i, 'Testing GiST Index', '((3, 1300), (33, 1330))','( (22,660), (57, 650), (68, 660) )', '( (76, 76), 76)' from generate_series(1,1000) i ; -INSERT 1000 +INSERT 0 1000 create index on reindex_crtab_part_ao_btree(id); -CREATE +CREATE INDEX -- @product_version gpdb: [4.3.4.0 -],4.3.4.0O2 -- @Description Ensures that a reindex table during reindex index operations is ok -- @@ -16,11 +16,11 @@ CREATE DELETE FROM reindex_crtab_part_ao_btree WHERE id < 128; DELETE 254 3: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'reindex_crtab_part_ao_btree%_idx' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'reindex_crtab_part_ao_btree%_idx'); -CREATE 12 +SELECT 12 1: BEGIN; BEGIN 1: LOCK reindex_crtab_part_ao_btree IN ACCESS EXCLUSIVE MODE; -LOCK +LOCK TABLE 2&: REINDEX TABLE reindex_crtab_part_ao_btree; 3:BEGIN; BEGIN @@ -34,7 +34,7 @@ REINDEX -- validates that reindex command in session 3 indeed generates new -- relfilenode for the index. 3: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'reindex_crtab_part_ao_btree%_idx' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'reindex_crtab_part_ao_btree%_idx'); -INSERT 12 +INSERT 0 12 -- Expect two distinct relfilenodes for one segment in old_relfilenodes table. -- CBDB#26: This test actually assumes when txn1 commits, its lock is acquired by -- txn3, and txn2 is blocked by it. Normally this is the case, but when the system @@ -60,7 +60,7 @@ COMMIT 2<: <... completed> REINDEX 3: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'reindex_crtab_part_ao_btree%_idx' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'reindex_crtab_part_ao_btree%_idx'); -INSERT 12 +INSERT 0 12 -- Expect three distinct relfilenodes per segment for "1_prt_de_fault" index. -- CBDB#26: Same as L45. 3: select relname, relname = 'reindex_crtab_part_ao_btree_1_prt_de_fault_id_idx' and res.cnt in (2, 3) as special_case_for_de_fault_id_idx, case when relname = 'reindex_crtab_part_ao_btree_1_prt_de_fault_id_idx' then -1 else res.cnt end as count from (select distinct count(distinct relfilenode) as cnt, relname from old_relfilenodes group by dbid, relname) as res; diff --git a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_aoco_bitmap.out b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_aoco_bitmap.out index 8c766da8fc8..85238f7aa65 100644 --- a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_aoco_bitmap.out +++ b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_aoco_bitmap.out @@ -1,14 +1,14 @@ DROP TABLE IF EXISTS reindex_crtab_aoco_bitmap; -DROP +DROP TABLE CREATE TABLE reindex_crtab_aoco_bitmap (a INT) WITH (appendonly=true, orientation=column); -CREATE +CREATE TABLE insert into reindex_crtab_aoco_bitmap select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into reindex_crtab_aoco_bitmap select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 create index idx_reindex_crtab_aoco_bitmap on reindex_crtab_aoco_bitmap USING BITMAP(a); -CREATE +CREATE INDEX -- @Description Ensures that a reindex table during reindex index operations is ok -- @@ -17,7 +17,7 @@ DELETE 254 1: BEGIN; BEGIN 1: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_aoco_bitmap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_reindex_crtab_aoco_bitmap'); -CREATE 4 +SELECT 4 2: BEGIN; BEGIN 1: REINDEX index idx_reindex_crtab_aoco_bitmap; @@ -32,7 +32,7 @@ REINDEX -- validates that reindex command in session 1 indeed generates new -- relfilenode for the index. 1: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_aoco_bitmap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_reindex_crtab_aoco_bitmap'); -INSERT 4 +INSERT 0 4 -- Expect two distinct relfilenodes per segment in old_relfilenodes table. 1: select distinct count(distinct relfilenode), relname from old_relfilenodes group by dbid, relname; count | relname @@ -44,7 +44,7 @@ COMMIT -- After session 2 commits, the relfilenode it assigned to the index -- is visible to session 1. 1: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_aoco_bitmap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_reindex_crtab_aoco_bitmap'); -INSERT 4 +INSERT 0 4 -- Expect three distinct relfilenodes per segment in old_relfilenodes table. 1: select distinct count(distinct relfilenode), relname from old_relfilenodes group by dbid, relname; count | relname diff --git a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_aoco_part_btree.out b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_aoco_part_btree.out index 7dd094ee895..627a989e6df 100644 --- a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_aoco_part_btree.out +++ b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_aoco_part_btree.out @@ -1,25 +1,25 @@ DROP TABLE IF EXISTS reindex_crtab_part_aoco_btree; -DROP +DROP TABLE CREATE TABLE reindex_crtab_part_aoco_btree ( id INTEGER, owner VARCHAR, description VARCHAR, property BOX, poli POLYGON, target CIRCLE, v VARCHAR, t TEXT, f FLOAT, p POINT, c CIRCLE, filler VARCHAR DEFAULT 'Big data is difficult to work with using most relational database management systems and desktop statistics and visualization packages, requiring instead massively parallel software running on tens, hundreds, or even thousands of servers.What is considered big data varies depending on the capabilities of the organization managing the set, and on the capabilities of the applications.This is here just to take up space so that we use more pages of data and sequential scans take a lot more time. ') with (appendonly=true,orientation=column) DISTRIBUTED BY (id) PARTITION BY RANGE (id) ( PARTITION p_one START('1') INCLUSIVE END ('10') EXCLUSIVE, DEFAULT PARTITION de_fault ); -CREATE +CREATE TABLE insert into reindex_crtab_part_aoco_btree (id, owner, description, property, poli, target) select i, 'user' || i, 'Testing GiST Index', '((3, 1300), (33, 1330))','( (22,660), (57, 650), (68, 660) )', '( (76, 76), 76)' from generate_series(1,1000) i ; -INSERT 1000 +INSERT 0 1000 insert into reindex_crtab_part_aoco_btree (id, owner, description, property, poli, target) select i, 'user' || i, 'Testing GiST Index', '((3, 1300), (33, 1330))','( (22,660), (57, 650), (68, 660) )', '( (76, 76), 76)' from generate_series(1,1000) i ; -INSERT 1000 +INSERT 0 1000 create index on reindex_crtab_part_aoco_btree(id); -CREATE +CREATE INDEX -- @Description Ensures that a reindex table during reindex index operations is ok -- DELETE FROM reindex_crtab_part_aoco_btree WHERE id < 128; DELETE 254 3: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'reindex_crtab_part_aoco_btree%_idx' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'reindex_crtab_part_aoco_btree%_idx'); -CREATE 12 +SELECT 12 1: BEGIN; BEGIN 1: LOCK reindex_crtab_part_aoco_btree IN ACCESS EXCLUSIVE MODE; -LOCK +LOCK TABLE 2&: REINDEX TABLE reindex_crtab_part_aoco_btree; 3: BEGIN; BEGIN @@ -33,7 +33,7 @@ REINDEX -- validates that reindex command in session 3 indeed generates new -- relfilenode for the index. 3: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'reindex_crtab_part_aoco_btree%_idx' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'reindex_crtab_part_aoco_btree%_idx'); -INSERT 12 +INSERT 0 12 -- Expect two distinct relfilenodes for one segment in old_relfilenodes table. -- CBDB#26: This test actually assumes when txn1 commits, its lock is acquired by -- txn3, and txn2 is blocked by it. Normally this is the case, but when the system @@ -59,7 +59,7 @@ COMMIT 2<: <... completed> REINDEX 3: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'reindex_crtab_part_aoco_btree%_idx' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'reindex_crtab_part_aoco_btree%_idx'); -INSERT 12 +INSERT 0 12 -- Expect three distinct relfilenodes per segment for "1_prt_de_fault" index. -- CBDB#26: Same as L45. 3: select relname, relname = 'reindex_crtab_part_aoco_btree_1_prt_de_fault_id_idx' and res.cnt in (2, 3) as special_case_for_de_fault_id_idx, case when relname = 'reindex_crtab_part_aoco_btree_1_prt_de_fault_id_idx' then -1 else res.cnt end as count from (select distinct count(distinct relfilenode) as cnt, relname from old_relfilenodes group by dbid, relname) as res; diff --git a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_heap_bitmap.out b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_heap_bitmap.out index 0a9689cb030..c19661cea66 100644 --- a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_heap_bitmap.out +++ b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_heap_bitmap.out @@ -1,14 +1,14 @@ DROP TABLE IF EXISTS reindex_crtab_heap_bitmap; -DROP +DROP TABLE CREATE TABLE reindex_crtab_heap_bitmap (a INT); -CREATE +CREATE TABLE insert into reindex_crtab_heap_bitmap select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into reindex_crtab_heap_bitmap select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 create index idx_reindex_crtab_heap_bitmap on reindex_crtab_heap_bitmap USING BITMAP(a); -CREATE +CREATE INDEX -- @Description Ensures that a reindex table during reindex index operations is ok -- @@ -17,7 +17,7 @@ DELETE 254 1: BEGIN; BEGIN 1: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_heap_bitmap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_reindex_crtab_heap_bitmap'); -CREATE 4 +SELECT 4 2: BEGIN; BEGIN 1: REINDEX index idx_reindex_crtab_heap_bitmap; @@ -32,7 +32,7 @@ REINDEX -- validates that reindex command in session 1 indeed generates new -- relfilenode for the index. 1: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_heap_bitmap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_reindex_crtab_heap_bitmap'); -INSERT 4 +INSERT 0 4 -- Expect two distinct relfilenodes per segment in old_relfilenodes table. 1: select distinct count(distinct relfilenode), relname from old_relfilenodes group by dbid, relname; count | relname @@ -44,7 +44,7 @@ COMMIT -- After session 2 commits, the relfilenode it assigned to the index -- is visible to session 1. 1: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_heap_bitmap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_reindex_crtab_heap_bitmap'); -INSERT 4 +INSERT 0 4 -- Expect three distinct relfilenodes per segment in old_relfilenodes table. 1: select distinct count(distinct relfilenode), relname from old_relfilenodes group by dbid, relname; count | relname diff --git a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_heap_part_btree.out b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_heap_part_btree.out index cf671261bce..31d58838e33 100644 --- a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_heap_part_btree.out +++ b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_heap_part_btree.out @@ -1,25 +1,25 @@ DROP TABLE IF EXISTS reindex_crtab_part_heap_btree; -DROP +DROP TABLE CREATE TABLE reindex_crtab_part_heap_btree ( id INTEGER, owner VARCHAR, description VARCHAR, property BOX, poli POLYGON, target CIRCLE, v VARCHAR, t TEXT, f FLOAT, p POINT, c CIRCLE, filler VARCHAR DEFAULT 'Big data is difficult to work with using most relational database management systems and desktop statistics and visualization packages, requiring instead massively parallel software running on tens, hundreds, or even thousands of servers.What is considered big data varies depending on the capabilities of the organization managing the set, and on the capabilities of the applications.This is here just to take up space so that we use more pages of data and sequential scans take a lot more time. ')DISTRIBUTED BY (id) PARTITION BY RANGE (id) ( PARTITION p_one START('1') INCLUSIVE END ('10') EXCLUSIVE, DEFAULT PARTITION de_fault ); -CREATE +CREATE TABLE insert into reindex_crtab_part_heap_btree (id, owner, description, property, poli, target) select i, 'user' || i, 'Testing GiST Index', '((3, 1300), (33, 1330))','( (22,660), (57, 650), (68, 660) )', '( (76, 76), 76)' from generate_series(1,1000) i ; -INSERT 1000 +INSERT 0 1000 insert into reindex_crtab_part_heap_btree (id, owner, description, property, poli, target) select i, 'user' || i, 'Testing GiST Index', '((3, 1300), (33, 1330))','( (22,660), (57, 650), (68, 660) )', '( (76, 76), 76)' from generate_series(1,1000) i ; -INSERT 1000 +INSERT 0 1000 create index on reindex_crtab_part_heap_btree(id); -CREATE +CREATE INDEX -- @Description Ensures that a reindex table during reindex index operations is ok -- DELETE FROM reindex_crtab_part_heap_btree WHERE id < 128; DELETE 254 3: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'reindex_crtab_part_heap_btree%_idx' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'reindex_crtab_part_heap_btree%_idx'); -CREATE 12 +SELECT 12 1: BEGIN; BEGIN 1: LOCK reindex_crtab_part_heap_btree IN ACCESS EXCLUSIVE MODE; -LOCK +LOCK TABLE 2&: REINDEX TABLE reindex_crtab_part_heap_btree; 3: BEGIN; BEGIN @@ -33,7 +33,7 @@ REINDEX -- validates that reindex command in session 3 indeed generates new -- relfilenode for the index. 3: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'reindex_crtab_part_heap_btree%_idx' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'reindex_crtab_part_heap_btree%_idx'); -INSERT 12 +INSERT 0 12 -- Expect two distinct relfilenodes for one segment in old_relfilenodes table. -- CBDB#26: This test actually assumes when txn1 commits, its lock is acquired by -- txn3, and txn2 is blocked by it. Normally this is the case, but when the system @@ -59,7 +59,7 @@ COMMIT 2<: <... completed> REINDEX 3: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'reindex_crtab_part_heap_btree%_idx' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'reindex_crtab_part_heap_btree%_idx'); -INSERT 12 +INSERT 0 12 -- Expect three distinct relfilenodes per segment for "1_prt_de_fault" index. -- CBDB#26: Same as L45. 3: select relname, relname = 'reindex_crtab_part_heap_btree_1_prt_de_fault_id_idx' and res.cnt in (2, 3) as special_case_for_de_fault_id_idx, case when relname = 'reindex_crtab_part_heap_btree_1_prt_de_fault_id_idx' then -1 else res.cnt end as count from (select distinct count(distinct relfilenode) as cnt, relname from old_relfilenodes group by dbid, relname) as res; diff --git a/src/test/isolation2/expected/reindex/repeatable_read_reindex_with_insert_heap.out b/src/test/isolation2/expected/reindex/repeatable_read_reindex_with_insert_heap.out index 03fd18bee3f..c6a8ffd473a 100644 --- a/src/test/isolation2/expected/reindex/repeatable_read_reindex_with_insert_heap.out +++ b/src/test/isolation2/expected/reindex/repeatable_read_reindex_with_insert_heap.out @@ -1,24 +1,24 @@ DROP TABLE IF EXISTS reindex_serialize_tab_heap; -DROP +DROP TABLE CREATE TABLE reindex_serialize_tab_heap (a INT, b text, c date, d numeric, e bigint, f char(10), g float) distributed by (a); -CREATE +CREATE TABLE insert into reindex_serialize_tab_heap select i, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,10) i; -INSERT 10 +INSERT 0 10 create index idxa_reindex_serialize_tab_heap on reindex_serialize_tab_heap(a); -CREATE +CREATE INDEX create index idxb_reindex_serialize_tab_heap on reindex_serialize_tab_heap(b); -CREATE +CREATE INDEX create index idxc_reindex_serialize_tab_heap on reindex_serialize_tab_heap(c); -CREATE +CREATE INDEX create index idxd_reindex_serialize_tab_heap on reindex_serialize_tab_heap(d); -CREATE +CREATE INDEX create index idxe_reindex_serialize_tab_heap on reindex_serialize_tab_heap(e); -CREATE +CREATE INDEX create index idxf_reindex_serialize_tab_heap on reindex_serialize_tab_heap(f); -CREATE +CREATE INDEX create index idxg_reindex_serialize_tab_heap on reindex_serialize_tab_heap(g); -CREATE +CREATE INDEX -- start_ignore SET gp_create_table_random_default_distribution=off; SET @@ -35,7 +35,7 @@ SET 2: BEGIN; BEGIN 2: insert into reindex_serialize_tab_heap values(99,'green',now(),10,15.10); -INSERT 1 +INSERT 0 1 2: COMMIT; COMMIT 1: select a,b,d,e,f,g from reindex_serialize_tab_heap order by 1; diff --git a/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_column_heap.out b/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_column_heap.out index e4218e9ccd5..160bfca9f4f 100644 --- a/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_column_heap.out +++ b/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_column_heap.out @@ -1,30 +1,30 @@ DROP TABLE IF EXISTS reindex_serialize_tab_heap; -DROP +DROP TABLE CREATE TABLE reindex_serialize_tab_heap (a INT, b text, c date, d numeric, e bigint, f char(10), g float) distributed by (a); -CREATE +CREATE TABLE insert into reindex_serialize_tab_heap select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i; -INSERT 1000 +INSERT 0 1000 insert into reindex_serialize_tab_heap select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i; -INSERT 1000 +INSERT 0 1000 insert into reindex_serialize_tab_heap select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i; -INSERT 1000 +INSERT 0 1000 insert into reindex_serialize_tab_heap select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i; -INSERT 1000 +INSERT 0 1000 create index idxa_reindex_serialize_tab_heap on reindex_serialize_tab_heap(a); -CREATE +CREATE INDEX create index idxb_reindex_serialize_tab_heap on reindex_serialize_tab_heap(b); -CREATE +CREATE INDEX create index idxc_reindex_serialize_tab_heap on reindex_serialize_tab_heap(c); -CREATE +CREATE INDEX create index idxd_reindex_serialize_tab_heap on reindex_serialize_tab_heap(d); -CREATE +CREATE INDEX create index idxe_reindex_serialize_tab_heap on reindex_serialize_tab_heap(e); -CREATE +CREATE INDEX create index idxf_reindex_serialize_tab_heap on reindex_serialize_tab_heap(f); -CREATE +CREATE INDEX create index idxg_reindex_serialize_tab_heap on reindex_serialize_tab_heap(g); -CREATE +CREATE INDEX -- start_ignore SET gp_create_table_random_default_distribution=off; SET @@ -41,13 +41,13 @@ SET dummy select to establish snapshot (1 row) 1: alter table reindex_serialize_tab_heap drop column c; -ALTER +ALTER TABLE 1: COMMIT; COMMIT -- Remember index relfilenodes from master and segments before -- reindex. 2: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'idx%_reindex_serialize_tab_heap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'idx%_reindex_serialize_tab_heap'); -CREATE 28 +SELECT 28 2: reindex table reindex_serialize_tab_heap; REINDEX 2: COMMIT; diff --git a/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_index_ao.out b/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_index_ao.out index 93c1a82b07e..63a4317af6b 100644 --- a/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_index_ao.out +++ b/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_index_ao.out @@ -1,30 +1,30 @@ DROP TABLE IF EXISTS reindex_serialize_tab_ao; -DROP +DROP TABLE CREATE TABLE reindex_serialize_tab_ao (a INT, b text, c date, d numeric, e bigint, f char(10), g float) with (appendonly=True) distributed by (a); -CREATE +CREATE TABLE insert into reindex_serialize_tab_ao select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i; -INSERT 1000 +INSERT 0 1000 insert into reindex_serialize_tab_ao select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i; -INSERT 1000 +INSERT 0 1000 insert into reindex_serialize_tab_ao select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i; -INSERT 1000 +INSERT 0 1000 insert into reindex_serialize_tab_ao select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i; -INSERT 1000 +INSERT 0 1000 create index idxa_reindex_serialize_tab_ao on reindex_serialize_tab_ao(a); -CREATE +CREATE INDEX create index idxb_reindex_serialize_tab_ao on reindex_serialize_tab_ao(b); -CREATE +CREATE INDEX create index idxc_reindex_serialize_tab_ao on reindex_serialize_tab_ao(c); -CREATE +CREATE INDEX create index idxd_reindex_serialize_tab_ao on reindex_serialize_tab_ao(d); -CREATE +CREATE INDEX create index idxe_reindex_serialize_tab_ao on reindex_serialize_tab_ao(e); -CREATE +CREATE INDEX create index idxf_reindex_serialize_tab_ao on reindex_serialize_tab_ao(f); -CREATE +CREATE INDEX create index idxg_reindex_serialize_tab_ao on reindex_serialize_tab_ao(g); -CREATE +CREATE INDEX -- start_ignore SET gp_create_table_random_default_distribution=off; SET @@ -41,13 +41,13 @@ SET dummy select to establish snapshot (1 row) 1: drop index idxg_reindex_serialize_tab_ao; -DROP +DROP INDEX 1: COMMIT; COMMIT -- Remember index relfilenodes from master and segments before -- reindex. 2: create table old_ao_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'idx%_reindex_serialize_tab_ao' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'idx%_reindex_serialize_tab_ao'); -CREATE 28 +SELECT 28 2: reindex table reindex_serialize_tab_ao; REINDEX 2: COMMIT; diff --git a/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_index_heap.out b/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_index_heap.out index 16382071948..fa3476c6cd5 100644 --- a/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_index_heap.out +++ b/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_index_heap.out @@ -1,27 +1,27 @@ CREATE TABLE reindex_dropindex_serialize_tab_heap (a INT, b text, c date, d numeric, e bigint, f char(10), g float) distributed by (a); -CREATE +CREATE TABLE insert into reindex_dropindex_serialize_tab_heap select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i; -INSERT 1000 +INSERT 0 1000 insert into reindex_dropindex_serialize_tab_heap select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i; -INSERT 1000 +INSERT 0 1000 insert into reindex_dropindex_serialize_tab_heap select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i; -INSERT 1000 +INSERT 0 1000 insert into reindex_dropindex_serialize_tab_heap select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i; -INSERT 1000 +INSERT 0 1000 create index idxa_reindex_dropindex_serialize_tab_heap on reindex_dropindex_serialize_tab_heap(a); -CREATE +CREATE INDEX create index idxb_reindex_dropindex_serialize_tab_heap on reindex_dropindex_serialize_tab_heap(b); -CREATE +CREATE INDEX create index idxc_reindex_dropindex_serialize_tab_heap on reindex_dropindex_serialize_tab_heap(c); -CREATE +CREATE INDEX create index idxd_reindex_dropindex_serialize_tab_heap on reindex_dropindex_serialize_tab_heap(d); -CREATE +CREATE INDEX create index idxe_reindex_dropindex_serialize_tab_heap on reindex_dropindex_serialize_tab_heap(e); -CREATE +CREATE INDEX create index idxf_reindex_dropindex_serialize_tab_heap on reindex_dropindex_serialize_tab_heap(f); -CREATE +CREATE INDEX create index idxg_reindex_dropindex_serialize_tab_heap on reindex_dropindex_serialize_tab_heap(g); -CREATE +CREATE INDEX -- start_ignore SET gp_create_table_random_default_distribution=off; SET @@ -38,13 +38,13 @@ SET dummy select to establish snapshot (1 row) 1: drop index idxg_reindex_dropindex_serialize_tab_heap; -DROP +DROP INDEX 1: COMMIT; COMMIT -- Remember index relfilenodes from master and segments before -- reindex. 2: create table old_heap_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'idx%_reindex_dropindex_serialize_tab_heap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'idx%_reindex_dropindex_serialize_tab_heap'); -CREATE 28 +SELECT 28 2: reindex table reindex_dropindex_serialize_tab_heap; REINDEX 2: COMMIT; diff --git a/src/test/isolation2/expected/reindex/vacuum_analyze_while_reindex_ao_btree.out b/src/test/isolation2/expected/reindex/vacuum_analyze_while_reindex_ao_btree.out index a7ab01a3611..fc69bb61f5b 100644 --- a/src/test/isolation2/expected/reindex/vacuum_analyze_while_reindex_ao_btree.out +++ b/src/test/isolation2/expected/reindex/vacuum_analyze_while_reindex_ao_btree.out @@ -1,10 +1,10 @@ DROP TABLE IF EXISTS reindex_ao; -DROP +DROP TABLE CREATE TABLE reindex_ao (a INT) WITH (appendonly=true); -CREATE +CREATE TABLE insert into reindex_ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 analyze reindex_ao; ANALYZE select 1 as reltuples_same_as_count from pg_class where relname = 'reindex_ao' and reltuples = (select count(*) from reindex_ao); @@ -13,13 +13,13 @@ select 1 as reltuples_same_as_count from pg_class where relname = 'reindex_ao' 1 (1 row) insert into reindex_ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 select 1 as reltuples_same_as_count from pg_class where relname = 'reindex_ao' and reltuples = (select count(*) from reindex_ao); reltuples_same_as_count ------------------------- (0 rows) create index idx_btree_reindex_vacuum_analyze_ao on reindex_ao(a); -CREATE +CREATE INDEX -- @Description Ensures that a vacuum during reindex operations is ok -- @@ -30,7 +30,7 @@ BEGIN -- Remember index relfilenodes from master and segments before -- reindex. 1: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_btree_reindex_vacuum_analyze_ao' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_btree_reindex_vacuum_analyze_ao'); -CREATE 4 +SELECT 4 1: REINDEX index idx_btree_reindex_vacuum_analyze_ao; REINDEX 2&: VACUUM ANALYZE reindex_ao; @@ -58,4 +58,4 @@ COMMIT 1 (1 row) 3: INSERT INTO reindex_ao VALUES (0); -INSERT 1 +INSERT 0 1 diff --git a/src/test/isolation2/expected/reindex/vacuum_while_reindex_ao_bitmap.out b/src/test/isolation2/expected/reindex/vacuum_while_reindex_ao_bitmap.out index 6c3c5b14514..f8e2e01ae57 100644 --- a/src/test/isolation2/expected/reindex/vacuum_while_reindex_ao_bitmap.out +++ b/src/test/isolation2/expected/reindex/vacuum_while_reindex_ao_bitmap.out @@ -1,14 +1,14 @@ DROP TABLE IF EXISTS reindex_ao; -DROP +DROP TABLE CREATE TABLE reindex_ao (a INT) WITH (appendonly=true); -CREATE +CREATE TABLE insert into reindex_ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into reindex_ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 create index idx_bitmap_reindex_ao on reindex_ao USING bitmap(a); -CREATE +CREATE INDEX -- @Description Ensures that a vacuum during reindex operations is ok -- @@ -19,7 +19,7 @@ BEGIN -- Remember index relfilenodes from master and segments before -- reindex. 1: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_bitmap_reindex_ao' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_bitmap_reindex_ao'); -CREATE 4 +SELECT 4 1: REINDEX index idx_bitmap_reindex_ao; REINDEX 2&: VACUUM reindex_ao; @@ -41,4 +41,4 @@ COMMIT 0 (1 row) 3: INSERT INTO reindex_ao VALUES (0); -INSERT 1 +INSERT 0 1 diff --git a/src/test/isolation2/expected/reindex/vacuum_while_reindex_heap_btree.out b/src/test/isolation2/expected/reindex/vacuum_while_reindex_heap_btree.out index c049f48081f..cf8e6e3dbd9 100644 --- a/src/test/isolation2/expected/reindex/vacuum_while_reindex_heap_btree.out +++ b/src/test/isolation2/expected/reindex/vacuum_while_reindex_heap_btree.out @@ -1,14 +1,14 @@ DROP TABLE IF EXISTS reindex_heap; -DROP +DROP TABLE CREATE TABLE reindex_heap (a INT); -CREATE +CREATE TABLE insert into reindex_heap select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into reindex_heap select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 create index idx_btree_reindex_heap on reindex_heap(a); -CREATE +CREATE INDEX -- @Description Ensures that a vacuum during reindex operations is ok -- @@ -19,7 +19,7 @@ BEGIN -- Remember index relfilenodes from master and segments before -- reindex. 1: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_btree_reindex_heap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_btree_reindex_heap'); -CREATE 4 +SELECT 4 1: REINDEX index idx_btree_reindex_heap; REINDEX 2&: VACUUM reindex_heap; @@ -41,4 +41,4 @@ COMMIT 0 (1 row) 3: INSERT INTO reindex_heap VALUES (0); -INSERT 1 +INSERT 0 1 diff --git a/src/test/isolation2/expected/reindex/vacuum_while_reindex_heap_btree_toast.out b/src/test/isolation2/expected/reindex/vacuum_while_reindex_heap_btree_toast.out index 6b6254e7230..e17213f0b22 100644 --- a/src/test/isolation2/expected/reindex/vacuum_while_reindex_heap_btree_toast.out +++ b/src/test/isolation2/expected/reindex/vacuum_while_reindex_heap_btree_toast.out @@ -1,14 +1,14 @@ DROP TABLE IF EXISTS reindex_toast_heap; -DROP +DROP TABLE CREATE TABLE reindex_toast_heap (a text, b int); -CREATE +CREATE TABLE alter table reindex_toast_heap alter column a set storage external; -ALTER +ALTER TABLE insert into reindex_toast_heap select repeat('123456789',10000), i from generate_series(1,100) i; -INSERT 100 +INSERT 0 100 create index idx_btree_reindex_toast_heap on reindex_toast_heap(b); -CREATE +CREATE INDEX -- @Description Ensures that a vacuum during reindex operations is ok -- @@ -19,7 +19,7 @@ BEGIN -- Remember index relfilenodes from master and segments before -- reindex. 1: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_btree_reindex_toast_heap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_btree_reindex_toast_heap'); -CREATE 4 +SELECT 4 1: REINDEX index idx_btree_reindex_toast_heap; REINDEX 2&: VACUUM reindex_toast_heap; @@ -41,4 +41,4 @@ COMMIT 0 (1 row) 3: INSERT INTO reindex_toast_heap VALUES (0); -INSERT 1 +INSERT 0 1 diff --git a/src/test/isolation2/expected/reindex_gpfastsequence.out b/src/test/isolation2/expected/reindex_gpfastsequence.out index bb25ca9f4f0..e49fb145130 100644 --- a/src/test/isolation2/expected/reindex_gpfastsequence.out +++ b/src/test/isolation2/expected/reindex_gpfastsequence.out @@ -1,11 +1,11 @@ -- Test concurrent reindex gp_fastsequence and insert on an AO table create table test_fastseqence ( a int, b char(20)) with (appendonly = true, orientation=column); -CREATE +CREATE TABLE create index test_fastseqence_idx on test_fastseqence(b); -CREATE +CREATE INDEX insert into test_fastseqence select i , 'aa'||i from generate_series(1,100) i; -INSERT 100 +INSERT 0 100 select gp_inject_fault_infinite('reindex_relation', 'suspend', 2); gp_inject_fault_infinite @@ -32,7 +32,7 @@ select gp_inject_fault('reindex_relation', 'reset', 2); 1<: <... completed> REINDEX 2<: <... completed> -INSERT 100 +INSERT 0 100 -- Validate that gp_fastsequence works as expected after reindex SELECT 1 AS oid_same_on_all_segs from gp_dist_random('pg_class') WHERE relname = 'gp_fastsequence_objid_objmod_index' GROUP BY oid having count(*) = (SELECT count(*) FROM gp_segment_configuration WHERE role='p' AND content > -1); @@ -53,7 +53,7 @@ select last_sequence from gp_dist_random('gp_fastsequence') where objid = (selec (6 rows) insert into test_fastseqence select i , 'aa'||i from generate_series(1,100) i; -INSERT 100 +INSERT 0 100 select last_sequence from gp_dist_random('gp_fastsequence') where objid = (select segrelid from pg_appendonly where relid = (select oid from pg_class where relname = 'test_fastseqence')); last_sequence diff --git a/src/test/isolation2/expected/reorganize_after_ao_vacuum_skip_drop.out b/src/test/isolation2/expected/reorganize_after_ao_vacuum_skip_drop.out index 604c84856a0..18cc24c26d1 100644 --- a/src/test/isolation2/expected/reorganize_after_ao_vacuum_skip_drop.out +++ b/src/test/isolation2/expected/reorganize_after_ao_vacuum_skip_drop.out @@ -3,9 +3,9 @@ -- rewrites the relation differently than other ALTER operations. CREATE TABLE reorganize_after_ao_vacuum_skip_drop (a INT, b INT) WITH (appendonly=true); -CREATE +CREATE TABLE INSERT INTO reorganize_after_ao_vacuum_skip_drop SELECT i as a, i as b FROM generate_series(1, 10) AS i; -INSERT 10 +INSERT 0 10 DELETE FROM reorganize_after_ao_vacuum_skip_drop; DELETE 10 @@ -28,7 +28,7 @@ BEGIN 2: VACUUM reorganize_after_ao_vacuum_skip_drop; VACUUM 1: END; -END +COMMIT -- We should see an aoseg in state 2 (AOSEG_STATE_AWAITING_DROP) 0U: SELECT segno, state FROM gp_toolkit.__gp_aoseg('reorganize_after_ao_vacuum_skip_drop'); @@ -40,7 +40,7 @@ END -- The AO relation should be rewritten and AppendOnlyHash entry invalidated 1: ALTER TABLE reorganize_after_ao_vacuum_skip_drop SET WITH (reorganize=true); -ALTER +ALTER TABLE 0U: SELECT segno, state FROM gp_toolkit.__gp_aoseg('reorganize_after_ao_vacuum_skip_drop'); segno | state -------+------- @@ -49,7 +49,7 @@ ALTER -- Check if insert goes into segno 1 instead of segno 2. If it did not -- go into segno 1, there was a leak in the AppendOnlyHash entry. 1: INSERT INTO reorganize_after_ao_vacuum_skip_drop SELECT i as a, i as b FROM generate_series(1, 100) AS i; -INSERT 100 +INSERT 0 100 0U: SELECT segno, tupcount > 0, state FROM gp_toolkit.__gp_aoseg('reorganize_after_ao_vacuum_skip_drop'); segno | ?column? | state -------+----------+------- diff --git a/src/test/isolation2/expected/resgroup/resgroup_alter_concurrency.out b/src/test/isolation2/expected/resgroup/resgroup_alter_concurrency.out index 7eecfacc404..817dc592962 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_alter_concurrency.out +++ b/src/test/isolation2/expected/resgroup/resgroup_alter_concurrency.out @@ -1,28 +1,28 @@ -- create a resource group when gp_resource_manager is queue DROP ROLE IF EXISTS role_concurrency_test; -DROP +DROP ROLE -- start_ignore DROP RESOURCE GROUP rg_concurrency_test; ERROR: resource group "rg_concurrency_test" does not exist -- end_ignore CREATE RESOURCE GROUP rg_concurrency_test WITH(concurrency=1, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test; -CREATE +CREATE ROLE -- After a 'q' command the client connection is disconnected but the -- QD may still be alive, if we then query pg_stat_activity quick enough -- we might still see this session with query ''. -- A filter is put to filter out this kind of quitted sessions. CREATE OR REPLACE VIEW rg_activity_status AS SELECT rsgname, wait_event_type, state, query FROM pg_stat_activity WHERE rsgname='rg_concurrency_test' AND query <> ''; -CREATE +CREATE VIEW -- -- 1. increase concurrency after pending queries -- ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 1; -ALTER +ALTER RESOURCE GROUP 11:SET ROLE role_concurrency_test; SET @@ -37,10 +37,10 @@ SET 22&:BEGIN; ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 2; -ALTER +ALTER RESOURCE GROUP 11:END; -END +COMMIT 11q: ... 21<: <... completed> BEGIN @@ -55,9 +55,9 @@ SELECT * FROM rg_activity_status; (2 rows) 21:END; -END +COMMIT 22:END; -END +COMMIT 21q: ... 22q: ... @@ -71,7 +71,7 @@ SELECT * FROM rg_activity_status; -- ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 1; -ALTER +ALTER RESOURCE GROUP 11:SET ROLE role_concurrency_test; SET @@ -79,7 +79,7 @@ SET BEGIN ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 2; -ALTER +ALTER RESOURCE GROUP 21:SET ROLE role_concurrency_test; SET @@ -98,7 +98,7 @@ SELECT * FROM rg_activity_status; (3 rows) 11:END; -END +COMMIT 11q: ... 22<: <... completed> BEGIN @@ -111,9 +111,9 @@ SELECT * FROM rg_activity_status; (2 rows) 21:END; -END +COMMIT 22:END; -END +COMMIT 21q: ... 22q: ... @@ -126,7 +126,7 @@ SELECT * FROM rg_activity_status; -- 3. decrease concurrency -- ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 10; -ALTER +ALTER RESOURCE GROUP 11:SET ROLE role_concurrency_test; SET 11:BEGIN; @@ -153,7 +153,7 @@ SET BEGIN ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 1; -ALTER +ALTER RESOURCE GROUP 11q: ... 12q: ... @@ -174,13 +174,13 @@ SELECT pg_sleep(1); -- 4. increase concurrency from 0 -- DROP ROLE role_concurrency_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_concurrency_test; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP rg_concurrency_test WITH(concurrency=0, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test; -CREATE +CREATE ROLE 11:SET ROLE role_concurrency_test; SET @@ -192,7 +192,7 @@ SELECT * FROM rg_activity_status; (1 row) ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 1; -ALTER +ALTER RESOURCE GROUP 11<: <... completed> BEGIN @@ -203,7 +203,7 @@ SELECT * FROM rg_activity_status; (1 row) 11:END; -END +COMMIT 11q: ... -- @@ -213,14 +213,14 @@ END -- ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 1; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_activity_status; rsgname | wait_event_type | state | query ---------+-----------------+-------+------- (0 rows) ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 0; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_activity_status; rsgname | wait_event_type | state | query ---------+-----------------+-------+------- @@ -233,7 +233,7 @@ SELECT * FROM rg_activity_status; -- ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 1; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_activity_status; rsgname | wait_event_type | state | query ---------+-----------------+-------+------- @@ -250,7 +250,7 @@ SELECT * FROM rg_activity_status; (1 row) ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 0; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_activity_status; rsgname | wait_event_type | state | query ---------------------+-----------------+---------------------+-------- @@ -258,7 +258,7 @@ SELECT * FROM rg_activity_status; (1 row) 11:END; -END +COMMIT 11q: ... -- @@ -268,7 +268,7 @@ END -- ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 1; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_activity_status; rsgname | wait_event_type | state | query ---------+-----------------+-------+------- @@ -289,7 +289,7 @@ SELECT * FROM rg_activity_status; (2 rows) ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 0; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_activity_status; rsgname | wait_event_type | state | query ---------------------+-----------------+---------------------+-------- @@ -298,7 +298,7 @@ SELECT * FROM rg_activity_status; (2 rows) 11:END; -END +COMMIT 11q: ... SELECT * FROM rg_activity_status; rsgname | wait_event_type | state | query @@ -321,51 +321,51 @@ SELECT * FROM rg_activity_status; -- 6: drop a resgroup with concurrency=0 and pending queries DROP ROLE IF EXISTS role_concurrency_test; -DROP +DROP ROLE -- start_ignore DROP RESOURCE GROUP rg_concurrency_test; -DROP +DROP RESOURCE GROUP -- end_ignore CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=0, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test; -CREATE +CREATE ROLE 61:SET ROLE role_concurrency_test; SET 61&:BEGIN; ALTER ROLE role_concurrency_test RESOURCE GROUP none; -ALTER +ALTER ROLE DROP RESOURCE GROUP rg_concurrency_test; -DROP +DROP RESOURCE GROUP 61<: <... completed> BEGIN 61:END; -END +COMMIT 61q: ... -- 7: drop a role with concurrency=0 and pending queries DROP ROLE IF EXISTS role_concurrency_test; -DROP +DROP ROLE -- start_ignore DROP RESOURCE GROUP rg_concurrency_test; ERROR: resource group "rg_concurrency_test" does not exist -- end_ignore CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=0, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test; -CREATE +CREATE ROLE 61:SET ROLE role_concurrency_test; SET 61&:BEGIN; DROP ROLE role_concurrency_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_concurrency_test; -DROP +DROP RESOURCE GROUP 61<: <... completed> ERROR: role with Oid 213301 was dropped @@ -375,7 +375,7 @@ DETAIL: Cannot execute commands anymore, please terminate this session. -- cleanup -- start_ignore DROP VIEW rg_activity_status; -DROP +DROP VIEW DROP ROLE role_concurrency_test; DROP DROP RESOURCE GROUP rg_concurrency_test; diff --git a/src/test/isolation2/expected/resgroup/resgroup_assign_slot_fail.out b/src/test/isolation2/expected/resgroup/resgroup_assign_slot_fail.out index a0f8f82984d..0d137952aea 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_assign_slot_fail.out +++ b/src/test/isolation2/expected/resgroup/resgroup_assign_slot_fail.out @@ -2,15 +2,15 @@ -- test the slot will be unassigned correctly. DROP ROLE IF EXISTS role_test; -DROP +DROP ROLE -- start_ignore DROP RESOURCE GROUP rg_test; ERROR: resource group "rg_test" does not exist -- end_ignore CREATE RESOURCE GROUP rg_test WITH (concurrency=2, cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_test RESOURCE GROUP rg_test; -CREATE +CREATE ROLE 1: SET ROLE role_test; SET @@ -35,14 +35,14 @@ ERROR: fault triggered, fault name:'resgroup_assigned_on_master' fault type:'er 2: BEGIN; BEGIN 1: END; -END +COMMIT 2: END; -END +COMMIT 1q: ... 2q: ... --clean up DROP ROLE role_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_test; -DROP +DROP RESOURCE GROUP diff --git a/src/test/isolation2/expected/resgroup/resgroup_auxiliary_tools_v1.out b/src/test/isolation2/expected/resgroup/resgroup_auxiliary_tools_v1.out index 1120930c158..01e019377d6 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_auxiliary_tools_v1.out +++ b/src/test/isolation2/expected/resgroup/resgroup_auxiliary_tools_v1.out @@ -75,7 +75,7 @@ def check_group_shares(name): cpu_weight = int(plpy.execute(''' SELECT value FRO # check default groups check_group_shares('default_group') check_group_shares('admin_group') check_group_shares('system_group') # check user groups check_group_shares('rg1_cpu_test') check_group_shares('rg2_cpu_test') return True $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION -- check whether the queries running on the specific core set @@ -92,7 +92,7 @@ expect_cpu = [] for token in tokens: if token.find('-') != -1: interval = token.split("-") num1 = interval[0] num2 = interval[1] for num in range(int(num1), int(num2) + 1): expect_cpu.append(str(num)) else: expect_cpu.append(token) sess_ids = get_all_sess_ids_in_group(grp) for i in range(1000): time.sleep(0.01) if not check(expect_cpu, sess_ids): return False return True $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION -- create a resource group that contains all the cpu cores 0: CREATE OR REPLACE FUNCTION create_allcores_group(grp TEXT) RETURNS BOOL AS $$ import subprocess @@ -100,7 +100,7 @@ file = "/sys/fs/cgroup/cpuset/gpdb/cpuset.cpus" fd = open(file) line = fd.readli # plpy SPI will always start a transaction, but res group cannot be created in a transaction. ret = subprocess.run(['psql', 'postgres', '-c' , '{}'.format(sql)], stdout=subprocess.PIPE) if ret.returncode != 0: plpy.error('failed to create resource group.\n {} \n {}'.format(ret.stdout, ret.stderr)) file = "/sys/fs/cgroup/cpuset/gpdb/1/cpuset.cpus" fd = open(file) line = fd.readline() fd.close() line = line.strip('\n') if line != "0": return False return True $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION -- check whether the cpuset value in cgroup is valid according to the rule 0: CREATE OR REPLACE FUNCTION check_cpuset_rules() RETURNS BOOL AS $$ def get_all_group_which_cpuset_is_set(): sql = "select groupid,cpuset from gp_toolkit.gp_resgroup_config where cpuset != '-1'" result = plpy.execute(sql) return result @@ -111,7 +111,7 @@ config_groups = get_all_group_which_cpuset_is_set() groups_cpuset = set([]) if not (config_cpuset.issubset(cgroup_cpuset) and cgroup_cpuset.issubset(config_cpuset)): return False # check whether cpuset in resource group union default group is universal set default_cpuset = get_cgroup_cpuset(1) all_cpuset = get_cgroup_cpuset(0) if not (default_cpuset | groups_cpuset).issubset(all_cpuset): return False if not all_cpuset.issubset(default_cpuset | groups_cpuset): return False # if all the cores are allocated to resource group, default group must has a core left if len(default_cpuset & groups_cpuset) > 0 and (len(default_cpuset) != 1 or (not default_cpuset.issubset(all_cpuset))): return False return True $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION 0: CREATE OR REPLACE FUNCTION is_session_in_group(pid integer, groupname text) RETURNS BOOL AS $$ import subprocess @@ -123,4 +123,4 @@ path = "/sys/fs/cgroup/cpu/gpdb/{}/cgroup.procs".format(groupid) stdout = subpro return set(session_pids).issubset(set(cgroups_pids)) for host in hosts: if not get_result(host): return False return True $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION diff --git a/src/test/isolation2/expected/resgroup/resgroup_auxiliary_tools_v2.out b/src/test/isolation2/expected/resgroup/resgroup_auxiliary_tools_v2.out index 779e4dd1bcf..1ca18c1414c 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_auxiliary_tools_v2.out +++ b/src/test/isolation2/expected/resgroup/resgroup_auxiliary_tools_v2.out @@ -74,7 +74,7 @@ def check_group_shares(name): cpu_weight = int(plpy.execute(''' SELECT value FRO # check default groups check_group_shares('default_group') check_group_shares('admin_group') check_group_shares('system_group') # check user groups check_group_shares('rg1_cpu_test') check_group_shares('rg2_cpu_test') return True $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION -- check whether the queries running on the specific core set @@ -91,7 +91,7 @@ expect_cpu = [] for token in tokens: if token.find('-') != -1: interval = token.split("-") num1 = interval[0] num2 = interval[1] for num in range(int(num1), int(num2) + 1): expect_cpu.append(str(num)) else: expect_cpu.append(token) sess_ids = get_all_sess_ids_in_group(grp) for i in range(1000): time.sleep(0.01) if not check(expect_cpu, sess_ids): return False return True $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION -- create a resource group that contains all the cpu cores 0: CREATE OR REPLACE FUNCTION create_allcores_group(grp TEXT) RETURNS BOOL AS $$ import subprocess @@ -99,7 +99,7 @@ file = "/sys/fs/cgroup/gpdb/cpuset.cpus" fd = open(file) line = fd.readline() fd # plpy SPI will always start a transaction, but res group cannot be created in a transaction. ret = subprocess.run(['psql', 'postgres', '-c' , '{}'.format(sql)], stdout=subprocess.PIPE) if ret.returncode != 0: plpy.error('failed to create resource group.\n {} \n {}'.format(ret.stdout, ret.stderr)) file = "/sys/fs/cgroup/gpdb/1/cpuset.cpus" fd = open(file) line = fd.readline() fd.close() line = line.strip('\n') if line != "0": return False return True $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION -- check whether the cpuset value in cgroup is valid according to the rule 0: CREATE OR REPLACE FUNCTION check_cpuset_rules() RETURNS BOOL AS $$ def get_all_group_which_cpuset_is_set(): sql = "select groupid,cpuset from gp_toolkit.gp_resgroup_config where cpuset != '-1'" result = plpy.execute(sql) return result @@ -110,7 +110,7 @@ config_groups = get_all_group_which_cpuset_is_set() groups_cpuset = set([]) if not (config_cpuset.issubset(cgroup_cpuset) and cgroup_cpuset.issubset(config_cpuset)): return False # check whether cpuset in resource group union default group is universal set default_cpuset = get_cgroup_cpuset(1) all_cpuset = get_cgroup_cpuset(0) if not (default_cpuset | groups_cpuset).issubset(all_cpuset): return False if not all_cpuset.issubset(default_cpuset | groups_cpuset): return False # if all the cores are allocated to resource group, default group must has a core left if len(default_cpuset & groups_cpuset) > 0 and (len(default_cpuset) != 1 or (not default_cpuset.issubset(all_cpuset))): return False return True $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION 0: CREATE OR REPLACE FUNCTION is_session_in_group(pid integer, groupname text) RETURNS BOOL AS $$ import subprocess @@ -122,26 +122,4 @@ path = "/sys/fs/cgroup/gpdb/{}/cgroup.procs".format(groupid) stdout = subprocess return set(session_pids).issubset(set(cgroups_pids)) for host in hosts: if not get_result(host): return False return True $$ LANGUAGE plpython3u; -CREATE - -0: CREATE OR REPLACE FUNCTION check_cgroup_io_max(groupname text, tablespace_name text, parameters text) RETURNS BOOL AS $$ import ctypes import os -postgres = ctypes.CDLL(None) get_bdi_of_path = postgres['get_bdi_of_path'] get_tablespace_path = postgres['get_tablespace_path'] get_tablespace_oid = postgres['get_tablespace_oid'] -# get group oid sql = "select groupid from gp_toolkit.gp_resgroup_config where groupname = '%s'" % groupname result = plpy.execute(sql) groupid = result[0]['groupid'] -cgroup_path = "/sys/fs/cgroup/gpdb/%d" % groupid -# get path of tablespace spcoid = get_tablespace_oid(tablespace_name.encode('utf-8'), False) location = ctypes.cast(get_tablespace_path(spcoid), ctypes.c_char_p).value -if location == "": return False -bdi = get_bdi_of_path(location) major = os.major(bdi) minor = os.minor(bdi) -match_string = "{}:{} {}".format(major, minor, parameters) match = False with open(os.path.join(cgroup_path, "io.max")) as f: for line in f.readlines(): line = line.strip() if match_string == line: match = True break -return match -$$ LANGUAGE plpython3u; -CREATE - -0: CREATE OR REPLACE FUNCTION mkdir(dirname text) RETURNS BOOL AS $$ import os -if os.path.exists(dirname): return True -try: os.makedirs(dirname) except Exception as e: plpy.error("cannot create dir {}".format(e)) else: return True $$ LANGUAGE plpython3u; -CREATE - -0: CREATE OR REPLACE FUNCTION rmdir(dirname text) RETURNS BOOL AS $$ import shutil import os -if not os.path.exists(dirname): return True -try: shutil.rmtree(dirname) except Exception as e: plpy.error("cannot remove dir {}".format(e)) else: return True $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION diff --git a/src/test/isolation2/expected/resgroup/resgroup_bypass.out b/src/test/isolation2/expected/resgroup/resgroup_bypass.out index 5cff41d745f..a9c294edb2c 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_bypass.out +++ b/src/test/isolation2/expected/resgroup/resgroup_bypass.out @@ -11,14 +11,20 @@ ERROR: resource group "rg_bypass" does not exist -- create a resource group with concurrency = 1. CREATE RESOURCE GROUP rg_bypass WITH(cpu_max_percent=20, concurrency=1); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_bypass RESOURCE GROUP rg_bypass; -CREATE +CREATE ROLE SET ROLE role_bypass; SET CREATE TABLE t_bypass(a int) distributed by (a); -CREATE +CREATE TABLE + +-- gp_resource_group_bypass can only be set by super user +-- below set statement will error out +set gp_resource_group_bypass = 1; +ERROR: permission denied to set parameter "gp_resource_group_bypass" + RESET ROLE; RESET @@ -164,7 +170,7 @@ SELECT gp_inject_fault('func_init_plan_end', 'reset', 1); (1 row) 1<: <... completed> -INSERT 1 +INSERT 0 1 1q: ... 2q: ... @@ -221,7 +227,7 @@ SELECT gp_inject_fault('func_init_plan_end', 'reset', 1); -- min_cost will not work for above. -- alter resource group's min_cost ALTER RESOURCE GROUP rg_bypass SET min_cost 500; -ALTER +ALTER RESOURCE GROUP ANALYZE t_bypass; ANALYZE -- Session1: for quries with cost under the min_cost limit, they will be unassigned and bypassed. @@ -279,9 +285,9 @@ SELECT gp_inject_fault('func_init_plan_end', 'reset', 1); -- cleanup -- start_ignore DROP TABLE t_bypass; -DROP +DROP TABLE DROP ROLE role_bypass; -DROP +DROP ROLE DROP RESOURCE GROUP rg_bypass; -DROP +DROP RESOURCE GROUP -- end_ignore diff --git a/src/test/isolation2/expected/resgroup/resgroup_bypass_catalog.out b/src/test/isolation2/expected/resgroup/resgroup_bypass_catalog.out index f32527713f7..2299c072a5c 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_bypass_catalog.out +++ b/src/test/isolation2/expected/resgroup/resgroup_bypass_catalog.out @@ -1,10 +1,10 @@ CREATE RESOURCE GROUP rg_test_catalog WITH (CONCURRENCY=2, CPU_MAX_PERCENT=10); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_test_catalog RESOURCE GROUP rg_test_catalog; -CREATE +CREATE ROLE CREATE FUNCTION rg_test_udf() RETURNS integer AS $$ return 1 $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION -- take 1 slot 1: SET ROLE role_test_catalog; diff --git a/src/test/isolation2/expected/resgroup/resgroup_cancel_terminate_concurrency.out b/src/test/isolation2/expected/resgroup/resgroup_cancel_terminate_concurrency.out index e1c3c77403c..25e5363080b 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_cancel_terminate_concurrency.out +++ b/src/test/isolation2/expected/resgroup/resgroup_cancel_terminate_concurrency.out @@ -1,18 +1,18 @@ -- test1: cancel a query that is waiting for a slot DROP ROLE IF EXISTS role_concurrency_test; -DROP +DROP ROLE -- start_ignore DROP RESOURCE GROUP rg_concurrency_test; ERROR: resource group "rg_concurrency_test" does not exist -- end_ignore CREATE OR REPLACE VIEW rg_concurrency_view AS SELECT wait_event_type IS NOT NULL as waiting, wait_event_type, state, query, rsgname FROM pg_stat_activity WHERE rsgname='rg_concurrency_test'; -CREATE +CREATE VIEW CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=1, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test; -CREATE +CREATE ROLE 1:SET ROLE role_concurrency_test; SET 1:BEGIN; @@ -37,7 +37,7 @@ SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE wait_event_type='Resou t (2 rows) 1:END; -END +COMMIT 2<: <... completed> ERROR: canceling statement due to user request 3<: <... completed> @@ -52,22 +52,22 @@ SELECT * FROM rg_concurrency_view; 2q: ... 3q: ... DROP ROLE role_concurrency_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_concurrency_test; -DROP +DROP RESOURCE GROUP -- test2: terminate a query that is waiting for a slot DROP ROLE IF EXISTS role_concurrency_test; -DROP +DROP ROLE -- start_ignore DROP RESOURCE GROUP rg_concurrency_test; ERROR: resource group "rg_concurrency_test" does not exist -- end_ignore CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=1, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test; -CREATE +CREATE ROLE 1:SET ROLE role_concurrency_test; SET 1:BEGIN; @@ -92,7 +92,7 @@ SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE wait_event_type='Re t (2 rows) 1:END; -END +COMMIT 2<: <... completed> FATAL: terminating connection due to administrator command server closed the connection unexpectedly @@ -111,22 +111,22 @@ SELECT * FROM rg_concurrency_view; 2q: ... 3q: ... DROP ROLE role_concurrency_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_concurrency_test; -DROP +DROP RESOURCE GROUP -- test3: cancel a query that is running DROP ROLE IF EXISTS role_concurrency_test; -DROP +DROP ROLE -- start_ignore DROP RESOURCE GROUP rg_concurrency_test; ERROR: resource group "rg_concurrency_test" does not exist -- end_ignore CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=2, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test; -CREATE +CREATE ROLE 1:SET ROLE role_concurrency_test; SET 1&:SELECT pg_sleep(10000); @@ -172,22 +172,22 @@ SELECT * FROM rg_concurrency_view; 6q: ... 7q: ... DROP ROLE role_concurrency_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_concurrency_test; -DROP +DROP RESOURCE GROUP -- test4: terminate a query that is running DROP ROLE IF EXISTS role_concurrency_test; -DROP +DROP ROLE -- start_ignore DROP RESOURCE GROUP rg_concurrency_test; ERROR: resource group "rg_concurrency_test" does not exist -- end_ignore CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=2, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test; -CREATE +CREATE ROLE 1:SET ROLE role_concurrency_test; SET 1&:SELECT pg_sleep(10000); @@ -239,26 +239,26 @@ SELECT * FROM rg_concurrency_view; 6q: ... 7q: ... DROP ROLE role_concurrency_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_concurrency_test; -DROP +DROP RESOURCE GROUP -- test5: terminate a query waiting for a slot, that opens a transaction on exit callback DROP ROLE IF EXISTS role_concurrency_test; -DROP +DROP ROLE -- start_ignore DROP RESOURCE GROUP rg_concurrency_test; ERROR: resource group "rg_concurrency_test" does not exist -- end_ignore CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=1, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test; -CREATE +CREATE ROLE 1:SET ROLE role_concurrency_test; SET 1:CREATE TEMP TABLE tmp(a INT); -CREATE +CREATE TABLE 2:SET ROLE role_concurrency_test; SET 2:BEGIN; @@ -292,9 +292,9 @@ SELECT * FROM rg_concurrency_view; 1q: ... 2q: ... DROP ROLE role_concurrency_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_concurrency_test; -DROP +DROP RESOURCE GROUP DROP VIEW rg_concurrency_view; -DROP +DROP VIEW diff --git a/src/test/isolation2/expected/resgroup/resgroup_concurrency.out b/src/test/isolation2/expected/resgroup/resgroup_concurrency.out index dda3764ba3d..556d8a757b4 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_concurrency.out +++ b/src/test/isolation2/expected/resgroup/resgroup_concurrency.out @@ -1,15 +1,15 @@ -- test1: test gp_toolkit.gp_resgroup_status and pg_stat_activity -- create a resource group when gp_resource_manager is queue DROP ROLE IF EXISTS role_concurrency_test; -DROP +DROP ROLE -- start_ignore DROP RESOURCE GROUP rg_concurrency_test; ERROR: resource group "rg_concurrency_test" does not exist -- end_ignore CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=2, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test; -CREATE +CREATE ROLE -- no query has been assigned to the this group @@ -44,13 +44,13 @@ SELECT wait_event from pg_stat_activity where query = 'BEGIN;' and state = 'acti rg_concurrency_test (1 row) 2:END; -END +COMMIT 3:END; -END +COMMIT 4<: <... completed> BEGIN 4:END; -END +COMMIT 2q: ... 3q: ... 4q: ... @@ -60,24 +60,24 @@ SELECT r.rsgname, num_running, num_queueing, num_queued, num_executed FROM gp_to rg_concurrency_test | 0 | 0 | 1 | 3 (1 row) DROP ROLE role_concurrency_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_concurrency_test; -DROP +DROP RESOURCE GROUP -- test2: test alter concurrency -- Create a resource group with concurrency=2. Prepare 2 running transactions and 1 queueing transactions. -- Alter concurrency 2->3, the queueing transaction will be woken up, the 'value' of pg_resgroupcapability -- will be set to 3. DROP ROLE IF EXISTS role_concurrency_test; -DROP +DROP ROLE -- start_ignore DROP RESOURCE GROUP rg_concurrency_test; ERROR: resource group "rg_concurrency_test" does not exist -- end_ignore CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=2, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test; -CREATE +CREATE ROLE 12:SET ROLE role_concurrency_test; SET 12:BEGIN; @@ -101,7 +101,7 @@ SELECT concurrency FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_concur 2 (1 row) ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 3; -ALTER +ALTER RESOURCE GROUP SELECT r.rsgname, num_running, num_queueing, num_queued, num_executed FROM gp_toolkit.gp_resgroup_status s, pg_resgroup r WHERE s.groupid=r.oid AND r.rsgname='rg_concurrency_test'; rsgname | num_running | num_queueing | num_queued | num_executed ---------------------+-------------+--------------+------------+-------------- @@ -113,33 +113,33 @@ SELECT concurrency FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_concur 3 (1 row) 12:END; -END +COMMIT 13:END; -END +COMMIT 14<: <... completed> BEGIN 14:END; -END +COMMIT 12q: ... 13q: ... 14q: ... DROP ROLE role_concurrency_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_concurrency_test; -DROP +DROP RESOURCE GROUP -- test3: test alter concurrency -- Create a resource group with concurrency=3. Prepare 3 running transactions, and 1 queueing transaction. DROP ROLE IF EXISTS role_concurrency_test; -DROP +DROP ROLE -- start_ignore DROP RESOURCE GROUP rg_concurrency_test; ERROR: resource group "rg_concurrency_test" does not exist -- end_ignore CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=3, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test; -CREATE +CREATE ROLE 22:SET ROLE role_concurrency_test; SET 22:BEGIN; @@ -167,7 +167,7 @@ SELECT concurrency FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_concur (1 row) -- Alter concurrency 3->2, the 'value' of pg_resgroupcapability will be set to 2. ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 2; -ALTER +ALTER RESOURCE GROUP SELECT concurrency FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_concurrency_test'; concurrency ------------- @@ -175,7 +175,7 @@ SELECT concurrency FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_concur (1 row) -- When one transaction is finished, queueing transaction won't be woken up. There're 2 running transactions and 1 queueing transaction. 24:END; -END +COMMIT SELECT r.rsgname, num_running, num_queueing, num_queued, num_executed FROM gp_toolkit.gp_resgroup_status s, pg_resgroup r WHERE s.groupid=r.oid AND r.rsgname='rg_concurrency_test'; rsgname | num_running | num_queueing | num_queued | num_executed ---------------------+-------------+--------------+------------+-------------- @@ -190,7 +190,7 @@ SELECT r.rsgname, num_running, num_queueing, num_queued, num_executed FROM gp_to (1 row) -- Finish another transaction, one queueing transaction will be woken up, there're 2 running transactions and 1 queueing transaction. 22:END; -END +COMMIT SELECT r.rsgname, num_running, num_queueing, num_queued, num_executed FROM gp_toolkit.gp_resgroup_status s, pg_resgroup r WHERE s.groupid=r.oid AND r.rsgname='rg_concurrency_test'; rsgname | num_running | num_queueing | num_queued | num_executed ---------------------+-------------+--------------+------------+-------------- @@ -198,7 +198,7 @@ SELECT r.rsgname, num_running, num_queueing, num_queued, num_executed FROM gp_to (1 row) -- Alter concurrency 2->2, the 'value' of pg_resgroupcapability will be set to 2. ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 2; -ALTER +ALTER RESOURCE GROUP SELECT concurrency FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_concurrency_test'; concurrency ------------- @@ -206,7 +206,7 @@ SELECT concurrency FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_concur (1 row) -- Finish another transaction, one queueing transaction will be woken up, there're 2 running transactions and 0 queueing transaction. 23:END; -END +COMMIT SELECT r.rsgname, num_running, num_queueing, num_queued, num_executed FROM gp_toolkit.gp_resgroup_status s, pg_resgroup r WHERE s.groupid=r.oid AND r.rsgname='rg_concurrency_test'; rsgname | num_running | num_queueing | num_queued | num_executed ---------------------+-------------+--------------+------------+-------------- @@ -217,30 +217,30 @@ BEGIN 25<: <... completed> BEGIN 25:END; -END +COMMIT 24:END; -END +COMMIT 22q: ... 23q: ... 24q: ... 25q: ... DROP ROLE role_concurrency_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_concurrency_test; -DROP +DROP RESOURCE GROUP -- test4: concurrently drop resource group DROP ROLE IF EXISTS role_concurrency_test; -DROP +DROP ROLE -- start_ignore DROP RESOURCE GROUP rg_concurrency_test; ERROR: resource group "rg_concurrency_test" does not exist -- end_ignore CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=2, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test; -CREATE +CREATE ROLE -- DROP should fail if there're running transactions 32:SET ROLE role_concurrency_test; @@ -248,32 +248,32 @@ SET 32:BEGIN; BEGIN DROP ROLE role_concurrency_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_concurrency_test; ERROR: cannot drop resource group "rg_concurrency_test" HINT: The resource group is currently managing 1 query(ies) and cannot be dropped. Terminate the queries first or try dropping the group later. The view pg_stat_activity tracks the queries managed by resource groups. 32:END; -END +COMMIT DROP ROLE IF EXISTS role_concurrency_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_concurrency_test; -DROP +DROP RESOURCE GROUP -- test5: QD exit before QE DROP ROLE IF EXISTS role_concurrency_test; -DROP +DROP ROLE -- start_ignore DROP RESOURCE GROUP rg_concurrency_test; ERROR: resource group "rg_concurrency_test" does not exist -- end_ignore CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=1, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test; -CREATE +CREATE ROLE 51:SET ROLE role_concurrency_test; SET 51:BEGIN; @@ -299,22 +299,22 @@ ERROR: canceling statement due to user request 51q: ... 52q: ... DROP ROLE role_concurrency_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_concurrency_test; -DROP +DROP RESOURCE GROUP -- test6: cancel a query that is waiting for a slot DROP ROLE IF EXISTS role_concurrency_test; -DROP +DROP ROLE -- start_ignore DROP RESOURCE GROUP rg_concurrency_test; ERROR: resource group "rg_concurrency_test" does not exist -- end_ignore CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=1, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test; -CREATE +CREATE ROLE 51:SET ROLE role_concurrency_test; SET 51:BEGIN; @@ -327,23 +327,23 @@ SET BEGIN 52q: ... DROP ROLE role_concurrency_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_concurrency_test; -DROP +DROP RESOURCE GROUP -- test7: terminate a query that is waiting for a slot DROP ROLE IF EXISTS role_concurrency_test; -DROP +DROP ROLE -- start_ignore DROP RESOURCE GROUP rg_concurrency_test; ERROR: resource group "rg_concurrency_test" does not exist -- end_ignore CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=1, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test; -CREATE +CREATE ROLE 61:SET ROLE role_concurrency_test; SET 61:BEGIN; @@ -364,22 +364,22 @@ server closed the connection unexpectedly 61q: ... 62q: ... DROP ROLE role_concurrency_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_concurrency_test; -DROP +DROP RESOURCE GROUP -- test8: create a resgroup with concurrency=0 DROP ROLE IF EXISTS role_concurrency_test; -DROP +DROP ROLE -- start_ignore DROP RESOURCE GROUP rg_concurrency_test; ERROR: resource group "rg_concurrency_test" does not exist -- end_ignore CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=0, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test; -CREATE +CREATE ROLE 61:SET ROLE role_concurrency_test; SET 61&:BEGIN; @@ -392,34 +392,34 @@ SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE wait_event_type='Resou ERROR: canceling statement due to user request 61q: ... DROP ROLE role_concurrency_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_concurrency_test; -DROP +DROP RESOURCE GROUP -- -- Test cursors, pl/* functions only take one slot. -- -- set concurrency to 1 CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=1, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test; -CREATE +CREATE ROLE -- declare cursors and verify that it only takes one resource group slot 71:SET ROLE TO role_concurrency_test; SET 71:CREATE TABLE foo_concurrency_test as select i as c1 , i as c2 from generate_series(1, 1000) i; -CREATE 1000 +SELECT 1000 71:CREATE TABLE bar_concurrency_test as select i as c1 , i as c2 from generate_series(1, 1000) i; -CREATE 1000 +SELECT 1000 71:BEGIN; BEGIN 71:DECLARE c1 CURSOR for select c1, c2 from foo_concurrency_test order by c1 limit 10; -DECLARE +DECLARE CURSOR 71:DECLARE c2 CURSOR for select c1, c2 from bar_concurrency_test order by c1 limit 10; -DECLARE +DECLARE CURSOR 71:DECLARE c3 CURSOR for select count(*) from foo_concurrency_test t1, bar_concurrency_test t2 where t1.c2 = t2.c2; -DECLARE +DECLARE CURSOR 71:Fetch ALL FROM c1; c1 | c2 ----+---- @@ -454,11 +454,11 @@ DECLARE 1000 (1 row) 71:END; -END +COMMIT -- create a pl function and verify that it only takes one resource group slot. CREATE OR REPLACE FUNCTION func_concurrency_test () RETURNS integer as /*in func*/ $$ /*in func*/ DECLARE /*in func*/ tmprecord RECORD; /*in func*/ ret integer; /*in func*/ BEGIN /*in func*/ SELECT count(*) INTO ret FROM foo_concurrency_test; /*in func*/ FOR tmprecord IN SELECT * FROM bar_concurrency_test LOOP /*in func*/ SELECT count(*) INTO ret FROM foo_concurrency_test; /*in func*/ END LOOP; /*in func*/ /*in func*/ select 1/0; /*in func*/ EXCEPTION /*in func*/ WHEN division_by_zero THEN /*in func*/ SELECT count(*) INTO ret FROM foo_concurrency_test; /*in func*/ raise NOTICE 'divided by zero'; /*in func*/ RETURN ret; /*in func*/ END; /*in func*/ $$ /*in func*/ LANGUAGE plpgsql; -CREATE +CREATE FUNCTION 71: select func_concurrency_test(); func_concurrency_test @@ -484,7 +484,7 @@ PREPARE 2 | 2 (1 row) 71:END; -END +COMMIT 71:PREPARE p3 (integer) as select * from foo_concurrency_test where c2=$1; PREPARE 71:PREPARE p4 (integer) as select * from bar_concurrency_test where c2=$1; @@ -501,10 +501,10 @@ PREPARE (1 row) DROP TABLE foo_concurrency_test; -DROP +DROP TABLE DROP TABLE bar_concurrency_test; -DROP +DROP TABLE DROP ROLE role_concurrency_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_concurrency_test; -DROP +DROP RESOURCE GROUP diff --git a/src/test/isolation2/expected/resgroup/resgroup_cpu_max_percent.out b/src/test/isolation2/expected/resgroup/resgroup_cpu_max_percent.out index ae21059fcb6..9c5c2d47741 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_cpu_max_percent.out +++ b/src/test/isolation2/expected/resgroup/resgroup_cpu_max_percent.out @@ -1,17 +1,17 @@ -- start_ignore DROP VIEW IF EXISTS cancel_all; -DROP +DROP VIEW DROP ROLE IF EXISTS role1_cpu_test; -DROP +DROP ROLE DROP ROLE IF EXISTS role2_cpu_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg1_cpu_test; ERROR: resource group "rg1_cpu_test" does not exist DROP RESOURCE GROUP rg2_cpu_test; ERROR: resource group "rg2_cpu_test" does not exist CREATE LANGUAGE plpython3u; -CREATE +ERROR: language "plpython3u" already exists -- end_ignore -- @@ -19,16 +19,16 @@ CREATE -- DROP TABLE IF EXISTS cpu_usage_samples; -DROP +DROP TABLE CREATE TABLE cpu_usage_samples (sample text); -CREATE +CREATE TABLE -- fetch_sample: select cpu_usage from gp_toolkit.gp_resgroup_status -- and dump them into text in json format then save them in db for -- further analysis. CREATE OR REPLACE FUNCTION fetch_sample() RETURNS text AS $$ import json group_cpus = plpy.execute(''' SELECT groupname, cpu_usage FROM gp_toolkit.gp_resgroup_status_per_host ''') plpy.notice(group_cpus) json_text = json.dumps(dict([(row['groupname'], float(row['cpu_usage'])) for row in group_cpus])) plpy.execute(''' INSERT INTO cpu_usage_samples VALUES ('{value}') '''.format(value=json_text)) return json_text $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION -- verify_cpu_usage: calculate each QE's average cpu usage using all the data in -- the table cpu_usage_sample. And compare the average value to the expected value. @@ -36,22 +36,22 @@ CREATE CREATE OR REPLACE FUNCTION verify_cpu_usage(groupname TEXT, expect_cpu_usage INT, err_rate INT) RETURNS BOOL AS $$ import json import functools all_info = plpy.execute(''' SELECT sample::json->'{name}' AS cpu FROM cpu_usage_samples '''.format(name=groupname)) usage = float(all_info[0]['cpu']) return abs(usage - expect_cpu_usage) <= err_rate $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION CREATE OR REPLACE FUNCTION busy() RETURNS void AS $$ import os import signal n = 15 for i in range(n): if os.fork() == 0: # children must quit without invoking the atexit hooks signal.signal(signal.SIGINT, lambda a, b: os._exit(0)) signal.signal(signal.SIGQUIT, lambda a, b: os._exit(0)) signal.signal(signal.SIGTERM, lambda a, b: os._exit(0)) # generate pure cpu load while True: pass os.wait() $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION CREATE VIEW cancel_all AS SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE query LIKE 'SELECT * FROM % WHERE busy%'; -CREATE +CREATE VIEW -- create two resource groups CREATE RESOURCE GROUP rg1_cpu_test WITH (concurrency=5, cpu_max_percent=-1, cpu_weight=100); -CREATE +CREATE RESOURCE GROUP CREATE RESOURCE GROUP rg2_cpu_test WITH (concurrency=5, cpu_max_percent=-1, cpu_weight=200); -CREATE +CREATE RESOURCE GROUP -- -- check gpdb cgroup configuration @@ -65,13 +65,13 @@ select check_cgroup_configuration(); -- lower admin_group's cpu_max_percent to minimize its side effect ALTER RESOURCE GROUP admin_group SET cpu_max_percent 1; -ALTER +ALTER RESOURCE GROUP -- create two roles and assign them to above groups CREATE ROLE role1_cpu_test RESOURCE GROUP rg1_cpu_test; -CREATE +CREATE ROLE CREATE ROLE role2_cpu_test RESOURCE GROUP rg2_cpu_test; -CREATE +CREATE ROLE GRANT ALL ON FUNCTION busy() TO role1_cpu_test; GRANT GRANT ALL ON FUNCTION busy() TO role2_cpu_test; @@ -115,7 +115,7 @@ SET -- start_ignore -- Gather CPU usage statistics into cpu_usage_samples TRUNCATE TABLE cpu_usage_samples; -TRUNCATE +TRUNCATE TABLE SELECT fetch_sample(); fetch_sample --------------------------------------------------------------------------------------------------------------- @@ -167,7 +167,7 @@ SELECT pg_sleep(1.7); (1 row) TRUNCATE TABLE cpu_usage_samples; -TRUNCATE +TRUNCATE TABLE SELECT fetch_sample(); fetch_sample --------------------------------------------------------------------------------------------------------------- @@ -290,7 +290,7 @@ SET -- start_ignore TRUNCATE TABLE cpu_usage_samples; -TRUNCATE +TRUNCATE TABLE SELECT fetch_sample(); fetch_sample ---------------------------------------------------------------------------------------------------------------- @@ -342,7 +342,7 @@ SELECT pg_sleep(1.7); (1 row) TRUNCATE TABLE cpu_usage_samples; -TRUNCATE +TRUNCATE TABLE SELECT fetch_sample(); fetch_sample ---------------------------------------------------------------------------------------------------------------- @@ -462,9 +462,9 @@ ERROR: canceling statement due to user request -- Test cpu max percent ALTER RESOURCE GROUP rg1_cpu_test set cpu_max_percent 10; -ALTER +ALTER RESOURCE GROUP ALTER RESOURCE GROUP rg2_cpu_test set cpu_max_percent 20; -ALTER +ALTER RESOURCE GROUP -- prepare parallel queries in the two groups 10: SET ROLE TO role1_cpu_test; @@ -509,7 +509,7 @@ SET -- start_ignore 1:TRUNCATE TABLE cpu_usage_samples; -TRUNCATE +TRUNCATE TABLE 1:SELECT fetch_sample(); fetch_sample --------------------------------------------------------------------------------------------------------------- @@ -685,7 +685,7 @@ SET -- start_ignore 1:TRUNCATE TABLE cpu_usage_samples; -TRUNCATE +TRUNCATE TABLE 1:SELECT fetch_sample(); fetch_sample ---------------------------------------------------------------------------------------------------------------- @@ -737,7 +737,7 @@ TRUNCATE (1 row) 1:TRUNCATE TABLE cpu_usage_samples; -TRUNCATE +TRUNCATE TABLE 1:SELECT fetch_sample(); fetch_sample --------------------------------------------------------------------------------------------------------------- @@ -857,7 +857,7 @@ ERROR: canceling statement due to user request -- restore admin_group's cpu_max_percent 2:ALTER RESOURCE GROUP admin_group SET cpu_max_percent 10; -ALTER +ALTER RESOURCE GROUP -- cleanup 2:REVOKE ALL ON FUNCTION busy() FROM role1_cpu_test; @@ -865,10 +865,10 @@ REVOKE 2:REVOKE ALL ON FUNCTION busy() FROM role2_cpu_test; REVOKE 2:DROP ROLE role1_cpu_test; -DROP +DROP ROLE 2:DROP ROLE role2_cpu_test; -DROP +DROP ROLE 2:DROP RESOURCE GROUP rg1_cpu_test; -DROP +DROP RESOURCE GROUP 2:DROP RESOURCE GROUP rg2_cpu_test; -DROP +DROP RESOURCE GROUP diff --git a/src/test/isolation2/expected/resgroup/resgroup_cpuset.out b/src/test/isolation2/expected/resgroup/resgroup_cpuset.out index 0ff087de3a6..6969d621422 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_cpuset.out +++ b/src/test/isolation2/expected/resgroup/resgroup_cpuset.out @@ -1,30 +1,30 @@ -- start_ignore DROP VIEW IF EXISTS busy; -DROP +DROP VIEW DROP VIEW IF EXISTS cancel_all; -DROP +DROP VIEW DROP TABLE IF EXISTS bigtable; -DROP +DROP TABLE CREATE LANGUAGE plpython3u; CREATE -- end_ignore CREATE TABLE bigtable AS SELECT i AS c1, 'abc' AS c2 FROM generate_series(1,50000) i; -CREATE 50000 +SELECT 50000 CREATE OR REPLACE FUNCTION get_cpu_cores() RETURNS INTEGER AS $$ import os return os.cpu_count() $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION -CREATE VIEW busy AS SELECT count(*) FROM bigtable t1, bigtable t2, bigtable t3, bigtable t4, bigtable t5 WHERE 0 != (t1.c1 % 2 + 10000) AND 0 != (t2.c1 % 2 + 10000) AND 0 != (t3.c1 % 2 + 10000) AND 0 != (t4.c1 % 2 + 10000) AND 0 != (t5.c1 % 2 + 10000) ; -CREATE +CREATE VIEW busy AS SELECT count(*) FROM bigtable t1, bigtable t2, bigtable t3, bigtable t4, bigtable t5 WHERE 0 = (t1.c1 % 2 + 10000)! AND 0 = (t2.c1 % 2 + 10000)! AND 0 = (t3.c1 % 2 + 10000)! AND 0 = (t4.c1 % 2 + 10000)! AND 0 = (t5.c1 % 2 + 10000)! ; +CREATE VIEW CREATE VIEW cancel_all AS SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE query LIKE 'SELECT * FROM busy%'; -CREATE +CREATE VIEW CREATE RESOURCE GROUP rg1_cpuset_test WITH (cpuset='0'); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role1_cpuset_test RESOURCE GROUP rg1_cpuset_test; -CREATE +CREATE ROLE GRANT ALL ON busy TO role1_cpuset_test; GRANT @@ -52,7 +52,7 @@ BEGIN (1 row) ALTER RESOURCE GROUP rg1_cpuset_test SET cpuset '1'; -ALTER +ALTER RESOURCE GROUP select pg_sleep(2); pg_sleep ---------- @@ -66,7 +66,7 @@ select pg_sleep(2); (1 row) ALTER RESOURCE GROUP rg1_cpuset_test SET cpuset '0,1'; -ALTER +ALTER RESOURCE GROUP select pg_sleep(2); pg_sleep ---------- @@ -79,11 +79,11 @@ select pg_sleep(2); t (1 row) 11: END; -END +COMMIT -- change to cpu_max_percent while the transaction is running ALTER RESOURCE GROUP rg1_cpuset_test SET cpu_max_percent 70; -ALTER +ALTER RESOURCE GROUP -- cancel the transaction -- start_ignore @@ -180,49 +180,49 @@ SELECT check_cpuset_rules(); t (1 row) CREATE RESOURCE GROUP rg1_test_group WITH (cpuset='0'); -CREATE +CREATE RESOURCE GROUP SELECT check_cpuset_rules(); check_cpuset_rules -------------------- t (1 row) CREATE RESOURCE GROUP rg2_test_group WITH (cpuset='1'); -CREATE +CREATE RESOURCE GROUP SELECT check_cpuset_rules(); check_cpuset_rules -------------------- t (1 row) ALTER RESOURCE GROUP rg1_test_group SET cpu_max_percent 1; -ALTER +ALTER RESOURCE GROUP SELECT check_cpuset_rules(); check_cpuset_rules -------------------- t (1 row) ALTER RESOURCE GROUP rg1_test_group SET cpuset '0'; -ALTER +ALTER RESOURCE GROUP SELECT check_cpuset_rules(); check_cpuset_rules -------------------- t (1 row) ALTER RESOURCE GROUP rg1_test_group SET cpu_max_percent 1; -ALTER +ALTER RESOURCE GROUP SELECT check_cpuset_rules(); check_cpuset_rules -------------------- t (1 row) DROP RESOURCE GROUP rg1_test_group; -DROP +DROP RESOURCE GROUP SELECT check_cpuset_rules(); check_cpuset_rules -------------------- t (1 row) DROP RESOURCE GROUP rg2_test_group; -DROP +DROP RESOURCE GROUP SELECT check_cpuset_rules(); check_cpuset_rules -------------------- @@ -241,7 +241,7 @@ SELECT check_cpuset_rules(); t (1 row) DROP RESOURCE GROUP rg1_test_group; -DROP +DROP RESOURCE GROUP SELECT check_cpuset_rules(); check_cpuset_rules -------------------- @@ -277,9 +277,9 @@ ERROR: resource group "rg1_test_group" does not exist -- test segment/master cpuset CREATE RESOURCE GROUP rg_multi_cpuset1 WITH (concurrency=2, cpuset='0;0'); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP rg_multi_cpuset1 set CPUSET '1;1'; -ALTER +ALTER RESOURCE GROUP select groupname,cpuset from gp_toolkit.gp_resgroup_config where groupname='rg_multi_cpuset1'; groupname | cpuset ------------------+-------- @@ -287,23 +287,23 @@ select groupname,cpuset from gp_toolkit.gp_resgroup_config where groupname='rg_m (1 row) DROP RESOURCE GROUP rg_multi_cpuset1; -DROP +DROP RESOURCE GROUP REVOKE ALL ON busy FROM role1_cpuset_test; REVOKE DROP ROLE role1_cpuset_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg1_cpuset_test; -DROP +DROP RESOURCE GROUP DROP FUNCTION check_cpuset_rules(); -DROP +DROP FUNCTION DROP FUNCTION check_cpuset(TEXT, TEXT); -DROP +DROP FUNCTION DROP FUNCTION create_allcores_group(TEXT); -DROP +DROP FUNCTION DROP VIEW cancel_all; -DROP +DROP VIEW DROP VIEW busy; -DROP +DROP VIEW DROP TABLE bigtable; -DROP +DROP TABLE diff --git a/src/test/isolation2/expected/resgroup/resgroup_cpuset_empty_default.out b/src/test/isolation2/expected/resgroup/resgroup_cpuset_empty_default.out index c84e98b5779..1ef006a10f6 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_cpuset_empty_default.out +++ b/src/test/isolation2/expected/resgroup/resgroup_cpuset_empty_default.out @@ -24,7 +24,7 @@ CREATE RESOURCE GROUP -- Alter a resource group from / to all the cpu cores should also work. ALTER RESOURCE GROUP rg1_cpuset_test SET cpuset '0'; -ALTER +ALTER RESOURCE GROUP ! psql -d isolation2resgrouptest -Ac "ALTER RESOURCE GROUP rg1_cpuset_test SET cpuset '0-$(($(nproc)-1))'"; ALTER RESOURCE GROUP @@ -35,4 +35,4 @@ ALTER RESOURCE GROUP -- Cleanup in a new connection as the default one is disconnected by gpstop 10: DROP RESOURCE GROUP rg1_cpuset_test; -DROP +DROP RESOURCE GROUP diff --git a/src/test/isolation2/expected/resgroup/resgroup_disable_resgroup.out b/src/test/isolation2/expected/resgroup/resgroup_disable_resgroup.out index ae3324c605d..1ccaf65d732 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_disable_resgroup.out +++ b/src/test/isolation2/expected/resgroup/resgroup_disable_resgroup.out @@ -13,6 +13,6 @@ SHOW gp_resource_manager; -- reset settings ALTER RESOURCE GROUP admin_group SET concurrency 10; -ALTER +ALTER RESOURCE GROUP ALTER RESOURCE GROUP default_group SET concurrency 20; -ALTER +ALTER RESOURCE GROUP diff --git a/src/test/isolation2/expected/resgroup/resgroup_dumpinfo.out b/src/test/isolation2/expected/resgroup/resgroup_dumpinfo.out index dc57a23fa17..c04ece07ef6 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_dumpinfo.out +++ b/src/test/isolation2/expected/resgroup/resgroup_dumpinfo.out @@ -1,7 +1,7 @@ DROP ROLE IF EXISTS role_dumpinfo_test; -DROP +DROP ROLE DROP ROLE IF EXISTS role_permission; -DROP +DROP ROLE -- start_ignore DROP RESOURCE GROUP rg_dumpinfo_test; ERROR: resource group "rg_dumpinfo_test" does not exist @@ -20,12 +20,12 @@ r = plpy.execute("select value from pg_resgroup_get_status_kv('dump');") json_te plpy.execute("""CREATE TEMPORARY TABLE t_pg_resgroup_get_status_kv AS SELECT * FROM pg_resgroup_get_status_kv('dump');""") r = plpy.execute("SELECT value FROM t_pg_resgroup_get_status_kv;") json_text = r[0]['value'] json_obj = json.loads(json_text) return validate(json_obj, n) $$ LANGUAGE plpython3u; -CREATE +CREATE FUNCTION CREATE RESOURCE GROUP rg_dumpinfo_test WITH (concurrency=2, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_dumpinfo_test RESOURCE GROUP rg_dumpinfo_test; -CREATE +CREATE ROLE 2:SET ROLE role_dumpinfo_test; SET @@ -46,19 +46,19 @@ SELECT dump_test_check(); (1 row) 2:END; -END +COMMIT 3:END; -END +COMMIT 4<: <... completed> BEGIN 4:END; -END +COMMIT 2q: ... 3q: ... 4q: ... CREATE ROLE role_permission; -CREATE +CREATE ROLE SET ROLE role_permission; SET select value from pg_resgroup_get_status_kv('dump'); @@ -81,12 +81,10 @@ SELECT count(*) FROM pg_resgroup_get_status_kv(NULL); (1 row) DROP ROLE role_dumpinfo_test; -DROP +DROP ROLE DROP ROLE role_permission; -DROP +DROP ROLE DROP RESOURCE GROUP rg_dumpinfo_test; -DROP --- start_ignore +DROP RESOURCE GROUP DROP LANGUAGE plpython3u CASCADE; -DROP --- end_ignore +DROP LANGUAGE diff --git a/src/test/isolation2/expected/resgroup/resgroup_functions.out b/src/test/isolation2/expected/resgroup/resgroup_functions.out index 7835bb2f3a8..67a95310d2b 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_functions.out +++ b/src/test/isolation2/expected/resgroup/resgroup_functions.out @@ -5,10 +5,10 @@ SELECT s.groupid, s.num_running, s.num_queueing, s.num_queued, s.num_executed FR (0 rows) -- end_ignore CREATE TEMP TABLE resgroup_function_test(LIKE gp_toolkit.gp_resgroup_status); -CREATE +CREATE TABLE INSERT INTO resgroup_function_test(groupid, num_running, num_queueing, num_queued, num_executed) SELECT s.groupid, s.num_running, s.num_queueing, s.num_queued, s.num_executed FROM pg_resgroup_get_status(NULL::oid) s(groupid, num_running, num_queueing, num_queued, num_executed, total_queue_duration) LIMIT 1; -INSERT 1 +INSERT 0 1 SELECT count(num_executed)>0 FROM resgroup_function_test WHERE num_executed IS NOT NULL; ?column? diff --git a/src/test/isolation2/expected/resgroup/resgroup_large_group_id.out b/src/test/isolation2/expected/resgroup/resgroup_large_group_id.out index fc132ebd271..3f5b3aee71e 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_large_group_id.out +++ b/src/test/isolation2/expected/resgroup/resgroup_large_group_id.out @@ -6,7 +6,7 @@ select gp_inject_fault('bump_oid', 'skip', dbid) from gp_segment_configuration w (1 row) create resource group rg_large_oid with (cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP select gp_inject_fault('bump_oid', 'reset', dbid) from gp_segment_configuration where role = 'p' and content = -1; gp_inject_fault @@ -28,4 +28,4 @@ select count(*) > 0 from pg_resgroup_get_status(NULL); (1 row) drop resource group rg_large_oid; -DROP +DROP RESOURCE GROUP diff --git a/src/test/isolation2/expected/resgroup/resgroup_memory_limit.out b/src/test/isolation2/expected/resgroup/resgroup_memory_limit.out index 41a568503e0..fb270001fd6 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_memory_limit.out +++ b/src/test/isolation2/expected/resgroup/resgroup_memory_limit.out @@ -7,28 +7,20 @@ DROP RESOURCE GROUP rg_memory_test; -- create a pl function to show the memory used by a process CREATE OR REPLACE FUNCTION func_memory_test (text) RETURNS text as /*in func*/ $$ /*in func*/ DECLARE /*in func*/ ln text; /*in func*/ tmp text[]; /*in func*/ match bool := false; /*in func*/ BEGIN /*in func*/ FOR ln IN execute format('explain analyze %s', $1) LOOP /*in func*/ IF NOT match THEN /*in func*/ tmp := regexp_match(ln, 'Memory used: (.*)'); /*in func*/ IF tmp IS NOT null THEN /*in func*/ match := true; /*in func*/ END IF; /*in func*/ END IF; /*in func*/ END LOOP; /*in func*/ RETURN tmp[1]; /*in func*/ END; /*in func*/ $$ /*in func*/ LANGUAGE plpgsql; -CREATE - --- memory_quota range is [0, INT_MAX] or equals to -1 -CREATE RESOURCE GROUP rg_memory_range WITH(memory_quota=-100, cpu_max_percent=20, concurrency=2); -ERROR: memory_quota range is [0, INT_MAX] or equals to -1 -CREATE RESOURCE GROUP rg_memory_range WITH(memory_quota=-1, cpu_max_percent=20, concurrency=2); -CREATE RESOURCE GROUP -DROP RESOURCE GROUP rg_memory_range; -DROP RESOURCE GROUP +CREATE FUNCTION -- create a resource group with memory limit 100 Mb -CREATE RESOURCE GROUP rg_memory_test WITH(memory_quota=100, cpu_max_percent=20, concurrency=2); -CREATE +CREATE RESOURCE GROUP rg_memory_test WITH(memory_limit=100, cpu_max_percent=20, concurrency=2); +CREATE RESOURCE GROUP CREATE ROLE role_memory_test RESOURCE GROUP rg_memory_test; -CREATE +CREATE ROLE -- session1: explain memory used by query -- user requests less than statement_mem, set query's memory limit to statement_mem 1: SET ROLE TO role_memory_test; SET 1: CREATE TABLE t_memory_limit(a int); -CREATE +CREATE TABLE 1: BEGIN; BEGIN 1: SHOW statement_mem; @@ -42,9 +34,9 @@ BEGIN 256000kB (1 row) --- session2: test alter resource group's memory quota -2: ALTER RESOURCE GROUP rg_memory_test SET memory_quota 1000; -ALTER +-- session2: test alter resource group's memory limit +2: ALTER RESOURCE GROUP rg_memory_test SET memory_limit 1000; +ALTER RESOURCE GROUP -- memory used will grow up to 500 Mb 1: SELECT func_memory_test('SELECT * FROM t_memory_limit'); @@ -53,7 +45,7 @@ ALTER 512000kB (1 row) 1: END; -END +COMMIT -- set gp_resgroup_memory_query_fixed_mem to 200MB 1: SET gp_resgroup_memory_query_fixed_mem to 204800; SET @@ -79,10 +71,10 @@ ERROR: Invalid input for gp_resgroup_memory_query_fixed_mem, must be less than -- clean DROP FUNCTION func_memory_test(text); -DROP +DROP FUNCTION DROP TABLE t_memory_limit; -DROP +DROP TABLE DROP ROLE IF EXISTS role_memory_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_memory_test; -DROP +DROP RESOURCE GROUP diff --git a/src/test/isolation2/expected/resgroup/resgroup_move_query.out b/src/test/isolation2/expected/resgroup/resgroup_move_query.out index 720f8491d6e..8fafc13449b 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_move_query.out +++ b/src/test/isolation2/expected/resgroup/resgroup_move_query.out @@ -21,9 +21,9 @@ CREATE -- end_ignore DROP ROLE IF EXISTS role_move_query; -DROP +DROP ROLE DROP ROLE IF EXISTS role_move_query_small; -DROP +DROP ROLE -- start_ignore DROP RESOURCE GROUP rg_move_query; ERROR: resource group "rg_move_query" does not exist @@ -32,16 +32,16 @@ ERROR: resource group "rg_move_query_small" does not exist -- end_ignore CREATE RESOURCE GROUP rg_move_query WITH (concurrency=1, cpu_max_percent=20); -CREATE +CREATE RESOURCE GROUP CREATE RESOURCE GROUP rg_move_query_small WITH (concurrency=1, cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_move_query RESOURCE GROUP rg_move_query; -CREATE +CREATE ROLE CREATE ROLE role_move_query_small RESOURCE GROUP rg_move_query_small; -CREATE +CREATE ROLE CREATE EXTENSION IF NOT EXISTS gp_inject_fault; -CREATE +CREATE EXTENSION @@ -74,11 +74,11 @@ SELECT is_session_in_group(pid, 'default_group') FROM pg_stat_activity WHERE wai f (1 row) 1: END; -END +COMMIT 2<: <... completed> BEGIN 2: END; -END +COMMIT -- test3: the destination group will wake up 'pg_resgroup_move_query' when a new slot become available 1: SET ROLE role_move_query; @@ -140,7 +140,7 @@ BEGIN (1 row) 2&: SELECT pg_resgroup_move_query(pid, 'rg_move_query') FROM pg_stat_activity WHERE query LIKE '%pg_class%' AND rsgname='rg_move_query_small'; 1: END; -END +COMMIT 1: SELECT gp_wait_until_triggered_fault('resource_group_give_away_begin', 1, dbid) FROM gp_segment_configuration where role = 'p' and content = -1; gp_wait_until_triggered_fault ------------------------------- @@ -185,7 +185,7 @@ BEGIN (1 row) 2&: SELECT pg_resgroup_move_query(pid, 'rg_move_query') FROM pg_stat_activity WHERE query LIKE '%pg_class%' AND rsgname='rg_move_query_small'; 1: END; -END +COMMIT 1q: ... 3: SELECT gp_wait_until_triggered_fault('resource_group_give_away_begin', 1, dbid) FROM gp_segment_configuration where role = 'p' and content = -1; gp_wait_until_triggered_fault @@ -289,7 +289,7 @@ ERROR: canceling statement due to user request 1 (1 row) 1: END; -END +COMMIT 2: SELECT num_running FROM gp_toolkit.gp_resgroup_status WHERE groupname='rg_move_query'; num_running ------------- @@ -386,7 +386,7 @@ RESET f (1 row) 1: END; -END +COMMIT 3: SELECT num_running FROM gp_toolkit.gp_resgroup_status WHERE groupname='rg_move_query'; num_running ------------- @@ -445,7 +445,7 @@ ERROR: cannot send signal to process t (1 row) 1: END; -END +COMMIT 3: SELECT num_running FROM gp_toolkit.gp_resgroup_status WHERE groupname='rg_move_query'; num_running ------------- @@ -496,7 +496,7 @@ RESET 0 (1 row) 1: END; -END +COMMIT -- Test10: check entrydb queries working -- Previously, we sent a signal to only one process - dispatcher or entrydb. @@ -547,13 +547,13 @@ BEGIN t (1 row) 1: END; -END +COMMIT DROP ROLE role_move_query; -DROP +DROP ROLE DROP RESOURCE GROUP rg_move_query; -DROP +DROP RESOURCE GROUP DROP ROLE role_move_query_small; -DROP +DROP ROLE DROP RESOURCE GROUP rg_move_query_small; -DROP +DROP RESOURCE GROUP diff --git a/src/test/isolation2/expected/resgroup/resgroup_name_convention.out b/src/test/isolation2/expected/resgroup/resgroup_name_convention.out index 9f200bb2beb..a54b7df8ff9 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_name_convention.out +++ b/src/test/isolation2/expected/resgroup/resgroup_name_convention.out @@ -19,7 +19,7 @@ -- CREATE OR REPLACE VIEW rg_name_view AS SELECT S.groupname, C.concurrency FROM gp_toolkit.gp_resgroup_config C, gp_toolkit.gp_resgroup_status S WHERE C.groupid = S.groupid AND C.groupname != 'default_group' AND C.groupname != 'admin_group' AND C.groupname != 'system_group' ORDER BY C.groupid; -CREATE +CREATE VIEW -- TODO: need to cleanup all existing resgroups @@ -29,65 +29,65 @@ CREATE -- by default resgroup names have the form of [_a-zA-Z][_a-zA-Z0-9]* CREATE RESOURCE GROUP rgNameTest01 WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP rgNameTest01 SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency --------------+------------- rgnametest01 | 2 (1 row) DROP RESOURCE GROUP rgNameTest01; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP __rg_name_test_01__ WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP __rg_name_test_01__ SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency ---------------------+------------- __rg_name_test_01__ | 2 (1 row) DROP RESOURCE GROUP __rg_name_test_01__; -DROP +DROP RESOURCE GROUP -- min length is 1 character CREATE RESOURCE GROUP Z WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP DROP RESOURCE GROUP Z; -DROP +DROP RESOURCE GROUP -- max length is 63 characters CREATE RESOURCE GROUP max012345678901234567890123456789012345678901234567890123456789 WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP max012345678901234567890123456789012345678901234567890123456789 SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency -----------------------------------------------------------------+------------- max012345678901234567890123456789012345678901234567890123456789 | 2 (1 row) DROP RESOURCE GROUP max012345678901234567890123456789012345678901234567890123456789; -DROP +DROP RESOURCE GROUP -- characters exceed the max length are ignored CREATE RESOURCE GROUP max012345678901234567890123456789012345678901234567890123456789further WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP max012345678901234567890123456789012345678901234567890123456789are SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency -----------------------------------------------------------------+------------- max012345678901234567890123456789012345678901234567890123456789 | 2 (1 row) DROP RESOURCE GROUP max012345678901234567890123456789012345678901234567890123456789ignored; -DROP +DROP RESOURCE GROUP -- special characters are allowed with double quotation marks -- white spaces CREATE RESOURCE GROUP "newlines s p a c e s t a b s" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP "newlines s p a c e s t a b s" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency ------------------------------+------------- @@ -96,92 +96,92 @@ s p a c e s t a b s | 2 (1 row) DROP RESOURCE GROUP "newlines s p a c e s t a b s"; -DROP +DROP RESOURCE GROUP -- punctuations CREATE RESOURCE GROUP "!#$%&`()*+,-./:;<=>?@[]^_{|}~" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP "!#$%&`()*+,-./:;<=>?@[]^_{|}~" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency -------------------------------+------------- !#$%&`()*+,-./:;<=>?@[]^_{|}~ | 2 (1 row) DROP RESOURCE GROUP "!#$%&`()*+,-./:;<=>?@[]^_{|}~"; -DROP +DROP RESOURCE GROUP -- quotation marks CREATE RESOURCE GROUP "'' are 2 single quotation marks" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP "'' are 2 single quotation marks" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency ---------------------------------+------------- '' are 2 single quotation marks | 2 (1 row) DROP RESOURCE GROUP "'' are 2 single quotation marks"; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP """ is 1 double quotation mark" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP """ is 1 double quotation mark" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency ------------------------------+------------- " is 1 double quotation mark | 2 (1 row) DROP RESOURCE GROUP """ is 1 double quotation mark"; -DROP +DROP RESOURCE GROUP -- nothing special with leading character CREATE RESOURCE GROUP "0 as prefix" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP "0 as prefix" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency -------------+------------- 0 as prefix | 2 (1 row) DROP RESOURCE GROUP "0 as prefix"; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP " leading space" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP " leading space" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency ----------------+------------- leading space | 2 (1 row) DROP RESOURCE GROUP " leading space"; -DROP +DROP RESOURCE GROUP -- backslash is not used as the escape character CREATE RESOURCE GROUP "\\ are two backslashes" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP "\\ are two backslashes" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency ------------------------+------------- \\ are two backslashes | 2 (1 row) DROP RESOURCE GROUP "\\ are two backslashes"; -DROP +DROP RESOURCE GROUP -- below are octal, hex and unicode representations of "rg1" CREATE RESOURCE GROUP "\o162\o147\o61" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP CREATE RESOURCE GROUP "\x72\x67\x31" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP CREATE RESOURCE GROUP "\u0072\u0067\u0031" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP "\o162\o147\o61" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP ALTER RESOURCE GROUP "\x72\x67\x31" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP ALTER RESOURCE GROUP "\u0072\u0067\u0031" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency --------------------+------------- @@ -195,84 +195,84 @@ SELECT * FROM rg_name_view; DROP RESOURCE GROUP "rg1"; ERROR: resource group "rg1" does not exist DROP RESOURCE GROUP "\o162\o147\o61"; -DROP +DROP RESOURCE GROUP DROP RESOURCE GROUP "\x72\x67\x31"; -DROP +DROP RESOURCE GROUP DROP RESOURCE GROUP "\u0072\u0067\u0031"; -DROP +DROP RESOURCE GROUP -- unicode escapes are supported CREATE RESOURCE GROUP U&"\0441\043B\043E\043D" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP U&"\0441\043B\043E\043D" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency -----------+------------- слон | 2 (1 row) DROP RESOURCE GROUP U&"\0441\043B\043E\043D"; -DROP +DROP RESOURCE GROUP -- unicode representation of "rg1" CREATE RESOURCE GROUP U&"\0072\0067\0031" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP "rg1" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency -----------+------------- rg1 | 2 (1 row) DROP RESOURCE GROUP "rg1"; -DROP +DROP RESOURCE GROUP -- CJK characters are allowed with or without double quotation marks CREATE RESOURCE GROUP 资源组 WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP "资源组" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency -----------+------------- 资源组 | 2 (1 row) DROP RESOURCE GROUP 资源组; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP リソース・グループ WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP "リソース・グループ" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency -----------+------------- リソース・グループ | 2 (1 row) DROP RESOURCE GROUP リソース・グループ; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP 자원그룹 WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP "자원그룹" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency -----------+------------- 자원그룹 | 2 (1 row) DROP RESOURCE GROUP 자원그룹; -DROP +DROP RESOURCE GROUP -- names are case sensitive, -- but are always converted to lower case unless around with quotation marks CREATE RESOURCE GROUP "RG_NAME_TEST" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP CREATE RESOURCE GROUP rg_Name_Test WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP CREATE RESOURCE GROUP "rg_name_test" WITH (cpu_max_percent=10); ERROR: resource group "rg_name_test" already exists ALTER RESOURCE GROUP Rg_NaMe_TeSt SET concurrency 2; -ALTER +ALTER RESOURCE GROUP ALTER RESOURCE GROUP "RG_NAME_TEST" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency --------------+------------- @@ -280,101 +280,101 @@ SELECT * FROM rg_name_view; rg_name_test | 2 (2 rows) DROP RESOURCE GROUP "RG_NAME_TEST"; -DROP +DROP RESOURCE GROUP DROP RESOURCE GROUP RG_nAME_tEST; -DROP +DROP RESOURCE GROUP -- reserved names are all lower case: "default_group", "admin_group", "none", -- they can be used by users with at least one upper case character. CREATE RESOURCE GROUP "None" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP "None" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency -----------+------------- None | 2 (1 row) DROP RESOURCE GROUP "None"; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP "NONE" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP "NONE" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency -----------+------------- NONE | 2 (1 row) DROP RESOURCE GROUP "NONE"; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP "DEFAULT_GROup" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP "DEFAULT_GROup" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency ---------------+------------- DEFAULT_GROup | 2 (1 row) DROP RESOURCE GROUP "DEFAULT_GROup"; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP "ADMIN_GROUP" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP "ADMIN_GROUP" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency -------------+------------- ADMIN_GROUP | 2 (1 row) DROP RESOURCE GROUP "ADMIN_GROUP"; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP "with" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP "with" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency -----------+------------- with | 2 (1 row) DROP RESOURCE GROUP "with"; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP "WITH" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP "WITH" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency -----------+------------- WITH | 2 (1 row) DROP RESOURCE GROUP "WITH"; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP "group" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP "group" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency -----------+------------- group | 2 (1 row) DROP RESOURCE GROUP "group"; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP "create" WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP "create" SET concurrency 2; -ALTER +ALTER RESOURCE GROUP SELECT * FROM rg_name_view; groupname | concurrency -----------+------------- create | 2 (1 row) DROP RESOURCE GROUP "create"; -DROP +DROP RESOURCE GROUP -- -- negative diff --git a/src/test/isolation2/expected/resgroup/resgroup_recreate.out b/src/test/isolation2/expected/resgroup/resgroup_recreate.out index 3c7ea9cb814..edd0dfbd024 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_recreate.out +++ b/src/test/isolation2/expected/resgroup/resgroup_recreate.out @@ -1,37 +1,37 @@ -- start_ignore DROP ROLE IF EXISTS r1; -DROP +DROP ROLE DROP RESOURCE GROUP rg1; ERROR: resource group "rg1" does not exist -- end_ignore CREATE RESOURCE GROUP rg1 WITH (concurrency=2, cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP CREATE ROLE r1 RESOURCE GROUP rg1; -CREATE +CREATE ROLE 1: SET ROLE r1; SET 1: BEGIN; BEGIN 1: END; -END +COMMIT ALTER ROLE r1 RESOURCE GROUP none; -ALTER +ALTER ROLE DROP RESOURCE GROUP rg1; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP rg1 WITH (concurrency=2, cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER ROLE r1 RESOURCE GROUP rg1; -ALTER +ALTER ROLE 1: BEGIN; BEGIN 1: END; -END +COMMIT DROP ROLE r1; -DROP +DROP ROLE DROP RESOURCE GROUP rg1; -DROP +DROP RESOURCE GROUP diff --git a/src/test/isolation2/expected/resgroup/resgroup_seg_down_2pc.out b/src/test/isolation2/expected/resgroup/resgroup_seg_down_2pc.out index 560444f0671..d657d9f37bd 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_seg_down_2pc.out +++ b/src/test/isolation2/expected/resgroup/resgroup_seg_down_2pc.out @@ -4,12 +4,15 @@ -- The expectation is "alter resource group" can run successfully since the mirror segment is UP. -- After recover the segment, there is no error or blocking. +create extension if not exists gp_inject_fault; +CREATE EXTENSION + -- set these values purely to cut down test time, as default fts trigger is -- every min and 5 retries alter system set gp_fts_probe_interval to 10; -ALTER +ALTER SYSTEM alter system set gp_fts_probe_retries to 0; -ALTER +ALTER SYSTEM select pg_reload_conf(); pg_reload_conf ---------------- @@ -17,7 +20,7 @@ select pg_reload_conf(); (1 row) 1:create resource group rgroup_seg_down with (cpu_max_percent=35, CONCURRENCY=10); -CREATE +CREATE RESOURCE GROUP -- inject an error in function dtm_broadcast_commit_prepared, that is before QD broadcasts commit prepared command to QEs 2:select gp_inject_fault_infinite('dtm_broadcast_commit_prepared', 'suspend', dbid) from gp_segment_configuration where role='p' and content=-1; @@ -58,7 +61,7 @@ CREATE Success: (2 rows) 1<: <... completed> -ALTER +ALTER RESOURCE GROUP -- make sure "alter resource group" has taken effect. 1:select concurrency from gp_toolkit.gp_resgroup_config where groupname = 'rgroup_seg_down'; concurrency @@ -105,12 +108,12 @@ ALTER 20 (1 row) 1:drop resource group rgroup_seg_down; -DROP +DROP RESOURCE GROUP 1:alter system reset gp_fts_probe_interval; -ALTER +ALTER SYSTEM 1:alter system reset gp_fts_probe_retries; -ALTER +ALTER SYSTEM 1:select pg_reload_conf(); pg_reload_conf ---------------- diff --git a/src/test/isolation2/expected/resgroup/resgroup_syntax.out b/src/test/isolation2/expected/resgroup/resgroup_syntax.out index 1b645564f2d..bd6bd8234df 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_syntax.out +++ b/src/test/isolation2/expected/resgroup/resgroup_syntax.out @@ -3,32 +3,32 @@ -- ---------------------------------------------------------------------- DROP ROLE IF EXISTS rg_test_role; -DROP +DROP ROLE -- positive CREATE ROLE rg_test_role; -CREATE +CREATE ROLE SELECT rolresgroup FROM pg_authid WHERE rolname = 'rg_test_role'; rolresgroup ------------- 6437 (1 row) CREATE ROLE rg_test_role_super SUPERUSER; -CREATE +CREATE ROLE SELECT rolresgroup FROM pg_authid WHERE rolname = 'rg_test_role_super'; rolresgroup ------------- 6438 (1 row) ALTER ROLE rg_test_role_super NOSUPERUSER; -ALTER +ALTER ROLE SELECT rolresgroup FROM pg_authid WHERE rolname = 'rg_test_role_super'; rolresgroup ------------- 6437 (1 row) ALTER ROLE rg_test_role_super SUPERUSER; -ALTER +ALTER ROLE SELECT rolresgroup FROM pg_authid WHERE rolname = 'rg_test_role_super'; rolresgroup ------------- @@ -36,14 +36,14 @@ SELECT rolresgroup FROM pg_authid WHERE rolname = 'rg_test_role_super'; (1 row) ALTER ROLE rg_test_role RESOURCE GROUP none; -ALTER +ALTER ROLE SELECT rolresgroup FROM pg_authid WHERE rolname = 'rg_test_role'; rolresgroup ------------- 6437 (1 row) ALTER ROLE rg_test_role_super RESOURCE GROUP none; -ALTER +ALTER ROLE SELECT rolresgroup FROM pg_authid WHERE rolname = 'rg_test_role_super'; rolresgroup ------------- @@ -51,14 +51,14 @@ SELECT rolresgroup FROM pg_authid WHERE rolname = 'rg_test_role_super'; (1 row) ALTER ROLE rg_test_role RESOURCE GROUP default_group; -ALTER +ALTER ROLE SELECT rolresgroup FROM pg_authid WHERE rolname = 'rg_test_role'; rolresgroup ------------- 6437 (1 row) ALTER ROLE rg_test_role_super RESOURCE GROUP admin_group; -ALTER +ALTER ROLE SELECT rolresgroup FROM pg_authid WHERE rolname = 'rg_test_role_super'; rolresgroup ------------- @@ -76,7 +76,7 @@ ERROR: only superuser can be assigned to admin resgroup CREATE ROLE r_test_system_group RESOURCE GROUP system_group; ERROR: assigning to system resgroup is not allowed CREATE ROLE r_test_system_group; -CREATE +CREATE ROLE ALTER ROLE r_test_system_group RESOURCE GROUP system_group; ERROR: assigning to system resgroup is not allowed @@ -88,11 +88,11 @@ ERROR: only superuser can be assigned to admin resgroup -- cleanup DROP ROLE rg_test_role; -DROP +DROP ROLE DROP ROLE rg_test_role_super; -DROP +DROP ROLE DROP ROLE r_test_system_group; -DROP +DROP ROLE -- ---------------------------------------------------------------------- -- Test: create/drop a resource group @@ -123,11 +123,11 @@ ERROR: resource group name "none" is reserved -- multiple resource groups can't share the same name CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=10); ERROR: resource group "rg_test_group" already exists DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP -- can't specify the resource limit type multiple times CREATE RESOURCE GROUP rg_test_group WITH (concurrency=1, cpu_max_percent=5, concurrency=1); @@ -180,7 +180,7 @@ CREATE RESOURCE GROUP rg_test_group WITH (cpuset='0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, ERROR: the length of cpuset reached the upper limit 1024 -- can't alter to invalid cpuset CREATE RESOURCE GROUP rg_test_group WITH (cpuset='0'); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP rg_test_group set CPUSET ''; ERROR: cpuset invalid ALTER RESOURCE GROUP rg_test_group set CPUSET ','; @@ -219,7 +219,7 @@ ERROR: cpu cores 1024 are unavailable on the system ALTER RESOURCE GROUP rg_test_group set CPUSET '0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,'; ERROR: the length of cpuset reached the upper limit 1024 DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP -- can't drop non-exist resource group DROP RESOURCE GROUP non_exist_group; ERROR: resource group "non_exist_group" does not exist @@ -235,68 +235,68 @@ ERROR: resource group "none" does not exist -- positive CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP SELECT groupname,concurrency,cpu_max_percent, cpu_weight FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_test_group'; groupname | concurrency | cpu_max_percent | cpu_weight ---------------+-------------+-----------------+------------ rg_test_group | 20 | 10 | 100 (1 row) DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP rg_test_group WITH (concurrency=1, cpuset='0'); -CREATE +CREATE RESOURCE GROUP SELECT groupname,concurrency,cpu_max_percent, cpu_weight FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_test_group'; groupname | concurrency | cpu_max_percent | cpu_weight ---------------+-------------+-----------------+------------ rg_test_group | 1 | -1 | 100 (1 row) DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=10, cpu_weight=500); -CREATE +CREATE RESOURCE GROUP SELECT groupname,concurrency,cpu_max_percent, cpu_weight FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_test_group'; groupname | concurrency | cpu_max_percent | cpu_weight ---------------+-------------+-----------------+------------ rg_test_group | 20 | 10 | 500 (1 row) DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=-1, cpu_weight=500); -CREATE +CREATE RESOURCE GROUP SELECT groupname,concurrency,cpu_max_percent, cpu_weight FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_test_group'; groupname | concurrency | cpu_max_percent | cpu_weight ---------------+-------------+-----------------+------------ rg_test_group | 20 | -1 | 500 (1 row) DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP rg_test_group WITH (cpuset='0', cpu_weight=500); -CREATE +CREATE RESOURCE GROUP SELECT groupname,concurrency,cpu_max_percent, cpu_weight FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_test_group'; groupname | concurrency | cpu_max_percent | cpu_weight ---------------+-------------+-----------------+------------ rg_test_group | 20 | -1 | 500 (1 row) DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP rg_test_group WITH (cpuset='0'); -CREATE +CREATE RESOURCE GROUP SELECT groupname,concurrency,cpu_max_percent,cpu_weight FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_test_group'; groupname | concurrency | cpu_max_percent | cpu_weight ---------------+-------------+-----------------+------------ rg_test_group | 20 | -1 | 100 (1 row) DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP rg_test_group WITH (cpuset='0;0-1'); -CREATE +CREATE RESOURCE GROUP SELECT groupname,concurrency,cpu_max_percent,cpu_weight,cpuset FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_test_group'; groupname | concurrency | cpu_max_percent | cpu_weight | cpuset ---------------+-------------+-----------------+------------+-------- rg_test_group | 20 | -1 | 100 | 0;0-1 (1 row) DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP -- ---------------------------------------------------------------------- -- Test: boundary check in create resource group syntax -- ---------------------------------------------------------------------- @@ -315,11 +315,11 @@ ERROR: concurrency range is [0, 'max_connections'] -- negative: the cores of cpuset in different groups mustn't overlap CREATE RESOURCE GROUP rg_test_group1 WITH (cpuset='0'); -CREATE +CREATE RESOURCE GROUP CREATE RESOURCE GROUP rg_test_group2 WITH (cpuset='0'); ERROR: cpu cores 0 are used by resource group rg_test_group1 DROP RESOURCE GROUP rg_test_group1; -DROP +DROP RESOURCE GROUP -- negative: cpu_weight should be in [1, 500] CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=10, cpu_weight=0); @@ -333,63 +333,63 @@ ERROR: cpu_weight range is [1, 500] -- positive: cpu_max_percent should be in [1, 100] CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=60); -CREATE +CREATE RESOURCE GROUP DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=1); -CREATE +CREATE RESOURCE GROUP DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP -- positive: cpu_weight should be in [1, 500] CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=10, cpu_weight=100); -CREATE +CREATE RESOURCE GROUP DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=10, cpu_weight=500); -CREATE +CREATE RESOURCE GROUP DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP -- positive: concurrency should be in [0, max_connections] CREATE RESOURCE GROUP rg_test_group WITH (concurrency=0, cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP rg_test_group WITH (concurrency=1, cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP rg_test_group WITH (concurrency=25, cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP rg1_test_group WITH (concurrency=1, cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP CREATE RESOURCE GROUP rg2_test_group WITH (concurrency=1, cpu_max_percent=500); ERROR: cpu_max_percent range is [1, 100] or equals to -1 DROP RESOURCE GROUP rg1_test_group; -DROP +DROP RESOURCE GROUP DROP RESOURCE GROUP rg2_test_group; ERROR: resource group "rg2_test_group" does not exist -- positive: min_cost should be in [0,INT32_MAX] CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=10, min_cost=0); -CREATE +CREATE RESOURCE GROUP CREATE RESOURCE GROUP rg1_test_group WITH (cpu_max_percent=10, min_cost=2147483647); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP rg_test_group SET min_cost 2147483647; -ALTER +ALTER RESOURCE GROUP ALTER RESOURCE GROUP rg1_test_group SET min_cost 0; -ALTER +ALTER RESOURCE GROUP DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP DROP RESOURCE GROUP rg1_test_group; -DROP +DROP RESOURCE GROUP -- negative: min_cost should be in [0,INT32_MAX] CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=10, min_cost=-1); @@ -397,7 +397,7 @@ ERROR: The min_cost value can't be less than 0. CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=10, min_cost=2147483648); ERROR: capability min_cost is out of range CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=10, min_cost=0); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP rg_test_group SET min_cost -1; ERROR: The min_cost value can't be less than 0. ALTER RESOURCE GROUP rg_test_group SET min_cost 2147483648; @@ -405,14 +405,14 @@ ERROR: syntax error at or near "2147483648" LINE 1: ALTER RESOURCE GROUP rg_test_group SET min_cost 2147483648; ^ DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP -- -- ---------------------------------------------------------------------- -- Test: alter a resource group -- ---------------------------------------------------------------------- CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=5); -CREATE +CREATE RESOURCE GROUP -- ALTER RESOURCE GROUP SET CONCURRENCY N -- negative: concurrency should be in [1, max_connections] @@ -444,13 +444,13 @@ LINE 1: ALTER RESOURCE GROUP rg_test_group SET CONCURRENCY '1'; ^ -- positive: concurrency should be in [1, max_connections] ALTER RESOURCE GROUP rg_test_group SET CONCURRENCY 0; -ALTER +ALTER RESOURCE GROUP ALTER RESOURCE GROUP rg_test_group SET CONCURRENCY 1; -ALTER +ALTER RESOURCE GROUP ALTER RESOURCE GROUP rg_test_group SET CONCURRENCY 2; -ALTER +ALTER RESOURCE GROUP ALTER RESOURCE GROUP rg_test_group SET CONCURRENCY 25; -ALTER +ALTER RESOURCE GROUP -- ALTER RESOURCE GROUP SET cpu_max_percent VALUE -- negative: cpu_max_percent should be in [1, 100] @@ -459,7 +459,7 @@ ERROR: syntax error at or near "0.1" LINE 1: ALTER RESOURCE GROUP rg_test_group SET cpu_max_percent -0.1; ^ ALTER RESOURCE GROUP rg_test_group SET cpu_max_percent -1; -ALTER +ALTER RESOURCE GROUP ALTER RESOURCE GROUP rg_test_group SET cpu_max_percent 0; ERROR: cpu_max_percent range is [1, 100] or equals to -1 ALTER RESOURCE GROUP rg_test_group SET cpu_max_percent 0.7; @@ -471,7 +471,7 @@ ERROR: syntax error at or near "1.7" LINE 1: ALTER RESOURCE GROUP rg_test_group SET cpu_max_percent 1.7; ^ ALTER RESOURCE GROUP rg_test_group SET cpu_max_percent 61; -ALTER +ALTER RESOURCE GROUP ALTER RESOURCE GROUP rg_test_group SET cpu_max_percent a; ERROR: syntax error at or near "a" LINE 1: ALTER RESOURCE GROUP rg_test_group SET cpu_max_percent a; @@ -490,35 +490,35 @@ LINE 1: ALTER RESOURCE GROUP rg_test_group SET cpu_max_percent 0.2%; ^ -- positive: cpu_max_percent should be in [1, 100] ALTER RESOURCE GROUP rg_test_group SET cpu_max_percent 1; -ALTER +ALTER RESOURCE GROUP ALTER RESOURCE GROUP rg_test_group SET cpu_max_percent 2; -ALTER +ALTER RESOURCE GROUP ALTER RESOURCE GROUP rg_test_group SET cpu_max_percent 60; -ALTER +ALTER RESOURCE GROUP DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP -- positive: cpuset and cpu_max_percent are exclusive, -- if cpu_max_percent is set, cpuset is empty -- if cpuset is set, cpuset is -1 CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP ALTER RESOURCE GROUP rg_test_group SET CPUSET '0'; -ALTER +ALTER RESOURCE GROUP SELECT groupname,cpu_max_percent,cpuset FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_test_group'; groupname | cpu_max_percent | cpuset ---------------+-----------------+-------- rg_test_group | -1 | 0 (1 row) ALTER RESOURCE GROUP rg_test_group SET cpu_max_percent 10; -ALTER +ALTER RESOURCE GROUP SELECT groupname,cpu_max_percent,cpuset FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_test_group'; groupname | cpu_max_percent | cpuset ---------------+-----------------+-------- rg_test_group | 10 | -1 (1 row) DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=10, concurrency=5); CREATE @@ -569,11 +569,11 @@ DROP (exited with code 0) -- end_ignore 0: CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP 0: ALTER RESOURCE GROUP rg_test_group SET cpu_max_percent 100; -ALTER +ALTER RESOURCE GROUP 0: DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP -- start_ignore !\retcode gpconfig -c gp_resource_group_cpu_limit -v 1; -- start_ignore diff --git a/src/test/isolation2/expected/resgroup/resgroup_transaction.out b/src/test/isolation2/expected/resgroup/resgroup_transaction.out index baad66ef535..72555b37214 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_transaction.out +++ b/src/test/isolation2/expected/resgroup/resgroup_transaction.out @@ -9,7 +9,7 @@ ERROR: resource group "rg_test_group" does not exist -- helper view to check the resgroup status CREATE OR REPLACE VIEW rg_test_monitor AS SELECT groupname, concurrency, cpu_max_percent FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_test_group'; -CREATE +CREATE VIEW -- ---------------------------------------------------------------------- -- Test: create/alter/drop a resource group in transaction block @@ -21,7 +21,7 @@ BEGIN CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=5); ERROR: CREATE RESOURCE GROUP cannot run inside a transaction block END; -END +ROLLBACK SELECT * FROM rg_test_monitor; groupname | concurrency | cpu_max_percent -----------+-------------+----------------- @@ -29,13 +29,13 @@ SELECT * FROM rg_test_monitor; -- ALTER RESOURCE GROUP cannot run inside a transaction block CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=5); -CREATE +CREATE RESOURCE GROUP BEGIN; BEGIN ALTER RESOURCE GROUP rg_test_group SET CONCURRENCY 10; ERROR: ALTER RESOURCE GROUP cannot run inside a transaction block END; -END +ROLLBACK SELECT * FROM rg_test_monitor; groupname | concurrency | cpu_max_percent ---------------+-------------+----------------- @@ -48,7 +48,7 @@ BEGIN DROP RESOURCE GROUP rg_test_group; ERROR: DROP RESOURCE GROUP cannot run inside a transaction block END; -END +ROLLBACK SELECT * FROM rg_test_monitor; groupname | concurrency | cpu_max_percent ---------------+-------------+----------------- @@ -56,7 +56,7 @@ SELECT * FROM rg_test_monitor; (1 row) DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP -- ---------------------------------------------------------------------- @@ -74,7 +74,7 @@ SELECT 1; CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=5); ERROR: CREATE RESOURCE GROUP cannot run inside a transaction block END; -END +ROLLBACK SELECT * FROM rg_test_monitor; groupname | concurrency | cpu_max_percent -----------+-------------+----------------- @@ -82,7 +82,7 @@ SELECT * FROM rg_test_monitor; -- ALTER RESOURCE GROUP cannot run inside a transaction block CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=5); -CREATE +CREATE RESOURCE GROUP BEGIN; BEGIN SELECT 1; @@ -93,7 +93,7 @@ SELECT 1; ALTER RESOURCE GROUP rg_test_group SET CONCURRENCY 10; ERROR: ALTER RESOURCE GROUP cannot run inside a transaction block END; -END +ROLLBACK SELECT * FROM rg_test_monitor; groupname | concurrency | cpu_max_percent ---------------+-------------+----------------- @@ -111,7 +111,7 @@ SELECT 1; DROP RESOURCE GROUP rg_test_group; ERROR: DROP RESOURCE GROUP cannot run inside a transaction block END; -END +ROLLBACK SELECT * FROM rg_test_monitor; groupname | concurrency | cpu_max_percent ---------------+-------------+----------------- @@ -119,7 +119,7 @@ SELECT * FROM rg_test_monitor; (1 row) DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP -- ---------------------------------------------------------------------- @@ -136,7 +136,7 @@ ERROR: CREATE RESOURCE GROUP cannot run inside a transaction block ROLLBACK TO SAVEPOINT rg_savepoint; ROLLBACK ABORT; -ABORT +ROLLBACK SELECT * FROM rg_test_monitor; groupname | concurrency | cpu_max_percent -----------+-------------+----------------- @@ -144,7 +144,7 @@ SELECT * FROM rg_test_monitor; -- ALTER RESOURCE GROUP cannot run inside a subtransaction CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=5); -CREATE +CREATE RESOURCE GROUP BEGIN; BEGIN SAVEPOINT rg_savepoint; @@ -154,7 +154,7 @@ ERROR: ALTER RESOURCE GROUP cannot run inside a transaction block ROLLBACK TO SAVEPOINT rg_savepoint; ROLLBACK ABORT; -ABORT +ROLLBACK SELECT * FROM rg_test_monitor; groupname | concurrency | cpu_max_percent ---------------+-------------+----------------- @@ -171,7 +171,7 @@ ERROR: DROP RESOURCE GROUP cannot run inside a transaction block ROLLBACK TO SAVEPOINT rg_savepoint; ROLLBACK ABORT; -ABORT +ROLLBACK SELECT * FROM rg_test_monitor; groupname | concurrency | cpu_max_percent ---------------+-------------+----------------- @@ -179,20 +179,20 @@ SELECT * FROM rg_test_monitor; (1 row) DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP -- ---------------------------------------------------------------------- -- Test: create/alter/drop a resource group in function call -- ---------------------------------------------------------------------- CREATE OR REPLACE FUNCTION rg_create_func() RETURNS VOID AS $$ CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=5) $$ LANGUAGE SQL; -CREATE +CREATE FUNCTION CREATE OR REPLACE FUNCTION rg_alter_func() RETURNS VOID AS $$ ALTER RESOURCE GROUP rg_test_group SET CONCURRENCY 10 $$ LANGUAGE SQL; -CREATE +CREATE FUNCTION CREATE OR REPLACE FUNCTION rg_drop_func() RETURNS VOID AS $$ DROP RESOURCE GROUP rg_test_group $$ LANGUAGE SQL; -CREATE +CREATE FUNCTION -- CREATE RESOURCE GROUP cannot run inside a function call SELECT * FROM rg_create_func(); @@ -205,7 +205,7 @@ SELECT * FROM rg_test_monitor; -- ALTER RESOURCE GROUP cannot run inside a function call CREATE RESOURCE GROUP rg_test_group WITH (cpu_max_percent=5); -CREATE +CREATE RESOURCE GROUP SELECT * FROM rg_alter_func(); ERROR: ALTER RESOURCE GROUP cannot be executed from a function CONTEXT: SQL function "rg_alter_func" statement 1 @@ -226,14 +226,14 @@ SELECT * FROM rg_test_monitor; (1 row) DROP RESOURCE GROUP rg_test_group; -DROP +DROP RESOURCE GROUP DROP FUNCTION rg_create_func(); -DROP +DROP FUNCTION DROP FUNCTION rg_alter_func(); -DROP +DROP FUNCTION DROP FUNCTION rg_drop_func(); -DROP +DROP FUNCTION -- cleanup DROP VIEW rg_test_monitor; -DROP +DROP VIEW diff --git a/src/test/isolation2/expected/resgroup/resgroup_unassign_entrydb.out b/src/test/isolation2/expected/resgroup/resgroup_unassign_entrydb.out index 86eed429db8..b29443853a9 100644 --- a/src/test/isolation2/expected/resgroup/resgroup_unassign_entrydb.out +++ b/src/test/isolation2/expected/resgroup/resgroup_unassign_entrydb.out @@ -5,15 +5,15 @@ -- start_ignore DROP ROLE IF EXISTS role_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_test; ERROR: resource group "rg_test" does not exist -- end_ignore CREATE RESOURCE GROUP rg_test WITH (concurrency=2, cpu_max_percent=10); -CREATE +CREATE RESOURCE GROUP CREATE ROLE role_test RESOURCE GROUP rg_test; -CREATE +CREATE ROLE -- By pass this session, else this affects the testing session, i.e. 1: SET gp_resource_group_bypass = true; @@ -95,6 +95,6 @@ SET -- Clean up DROP ROLE role_test; -DROP +DROP ROLE DROP RESOURCE GROUP rg_test; -DROP +DROP RESOURCE GROUP diff --git a/src/test/isolation2/expected/resource_queue.out b/src/test/isolation2/expected/resource_queue.out index f52c6890dd0..bf90017eea2 100644 --- a/src/test/isolation2/expected/resource_queue.out +++ b/src/test/isolation2/expected/resource_queue.out @@ -1,14 +1,14 @@ 0:CREATE RESOURCE QUEUE rq_concurrency_test WITH (active_statements = 1); -CREATE +CREATE QUEUE 0:CREATE role role_concurrency_test RESOURCE QUEUE rq_concurrency_test; -CREATE +CREATE ROLE 1:SET role role_concurrency_test; SET 1:BEGIN; BEGIN 1:DECLARE c1 CURSOR FOR SELECT 1; -DECLARE +DECLARE CURSOR 2:SET role role_concurrency_test; SET @@ -55,7 +55,7 @@ PREPARE (3 rows) 1:END; -END +COMMIT 2<: <... completed> ?column? @@ -63,7 +63,7 @@ END 1 (1 row) 2:END; -END +COMMIT 3<: <... completed> ?column? @@ -71,7 +71,7 @@ END 1 (1 row) 3:END; -END +COMMIT -- Sanity check: Ensure that all locks were released. 0:SELECT granted, locktype, mode FROM pg_locks where locktype = 'resource queue' and pid != pg_backend_pid(); @@ -116,6 +116,6 @@ CLOSE (1 row) 0:DROP role role_concurrency_test; -DROP +DROP ROLE 0:DROP RESOURCE QUEUE rq_concurrency_test; -DROP +DROP QUEUE diff --git a/src/test/isolation2/expected/resource_queue_cancel.out b/src/test/isolation2/expected/resource_queue_cancel.out index 40343b36cc2..da67e99937a 100644 --- a/src/test/isolation2/expected/resource_queue_cancel.out +++ b/src/test/isolation2/expected/resource_queue_cancel.out @@ -2,9 +2,9 @@ -- active statements limit is reached. 0:CREATE RESOURCE QUEUE rq_cancel WITH (active_statements = 1); -CREATE +CREATE QUEUE 0:CREATE ROLE role_cancel RESOURCE QUEUE rq_cancel; -CREATE +CREATE ROLE -- Consume an active statement in session 1. 1:SET ROLE role_cancel; @@ -12,7 +12,7 @@ SET 1:BEGIN; BEGIN 1:DECLARE c CURSOR FOR SELECT 0; -DECLARE +DECLARE CURSOR -- Make session 2 wait on the resource queue lock. 2:SET ROLE role_cancel; @@ -29,19 +29,19 @@ SET -- Now once we end session 1's transaction, we should be able to consume the -- vacated active statement slot in session 2. 1:END; -END +COMMIT 2<: <... completed> ERROR: canceling statement due to user request 2:END; -END +COMMIT 2:BEGIN; BEGIN 2:DECLARE c CURSOR FOR SELECT 0; -DECLARE +DECLARE CURSOR 2:END; -END +COMMIT -- Sanity check: Ensure that the resource queue is now empty. 0:SELECT rsqcountlimit, rsqcountvalue FROM pg_resqueue_status WHERE rsqname = 'rq_cancel'; @@ -52,6 +52,6 @@ END -- Cleanup 0:DROP ROLE role_cancel; -DROP +DROP ROLE 0:DROP RESOURCE QUEUE rq_cancel; -DROP +DROP QUEUE diff --git a/src/test/isolation2/expected/resource_queue_deadlock.out b/src/test/isolation2/expected/resource_queue_deadlock.out index 06309f34b6d..b78976d91a3 100644 --- a/src/test/isolation2/expected/resource_queue_deadlock.out +++ b/src/test/isolation2/expected/resource_queue_deadlock.out @@ -2,11 +2,9 @@ -- trigger a local deadlock detection. 0: CREATE RESOURCE QUEUE rq_deadlock_test WITH (active_statements = 1); -CREATE +CREATE QUEUE 0: CREATE role role_deadlock_test RESOURCE QUEUE rq_deadlock_test; -CREATE -0: SET gp_autostats_lock_wait TO ON; -SET +CREATE ROLE 0: SELECT gp_inject_fault_infinite('before_auto_stats', 'suspend', dbid) FROM gp_segment_configuration WHERE content = -1 AND role = 'p'; gp_inject_fault_infinite @@ -28,7 +26,7 @@ SET on_no_stats (1 row) 1: CREATE TABLE t_deadlock_test(c1 int); -CREATE +CREATE TABLE 1&: INSERT INTO t_deadlock_test VALUES (1); 2: SET role role_deadlock_test; SET @@ -56,7 +54,7 @@ HINT: See server log for query details. 2: ROLLBACK; ROLLBACK 1<: <... completed> -INSERT 1 +INSERT 0 1 -- Sanity check: Ensure that the resource queue is now empty. 0: SELECT rsqcountlimit, rsqcountvalue from pg_resqueue_status WHERE rsqname = 'rq_deadlock_test'; @@ -67,8 +65,8 @@ INSERT 1 -- Clean up the test 0: DROP TABLE t_deadlock_test; -DROP +DROP TABLE 0: DROP ROLE role_deadlock_test; -DROP +DROP ROLE 0: DROP RESOURCE QUEUE rq_deadlock_test; -DROP +DROP QUEUE diff --git a/src/test/isolation2/expected/resource_queue_multi_portal.out b/src/test/isolation2/expected/resource_queue_multi_portal.out index 5fe141ccc9c..3bb74f1b87d 100644 --- a/src/test/isolation2/expected/resource_queue_multi_portal.out +++ b/src/test/isolation2/expected/resource_queue_multi_portal.out @@ -2,9 +2,9 @@ -- in the face of deadlocks and statement cancellations, when there is more than -- one active portal in the session. 0:CREATE RESOURCE QUEUE rq_multi_portal WITH (active_statements = 2); -CREATE +CREATE QUEUE 0:CREATE ROLE role_multi_portal RESOURCE QUEUE rq_multi_portal; -CREATE +CREATE ROLE 1:SET ROLE role_multi_portal; SET @@ -49,12 +49,12 @@ END 1:BEGIN; BEGIN 1:DECLARE c1 CURSOR FOR SELECT 1; -DECLARE +DECLARE CURSOR 2:BEGIN; BEGIN 2:DECLARE c2 CURSOR FOR SELECT 1; -DECLARE +DECLARE CURSOR -- There should be 2 active statements. 0:SELECT rsqcountlimit, rsqcountvalue FROM pg_resqueue_status WHERE rsqname = 'rq_multi_portal'; @@ -75,7 +75,7 @@ DECLARE -- This should cause a deadlock. 2:DECLARE c4 CURSOR FOR SELECT 1; -DECLARE +DECLARE CURSOR -- After the deadlock report, one session should have ERRORed out with the -- deadlock report and aborted, while the other session should remain active @@ -104,9 +104,9 @@ DETAIL: Process 738539 waits for ExclusiveLock on resource queue 90366; blocked Process 738548 waits for ExclusiveLock on resource queue 90366; blocked by process 738539. HINT: See server log for query details. 1:END; -END +ROLLBACK 2:END; -END +COMMIT 0:SELECT rsqcountlimit, rsqcountvalue FROM pg_resqueue_status WHERE rsqname = 'rq_multi_portal'; rsqcountlimit | rsqcountvalue ---------------+--------------- @@ -120,9 +120,9 @@ END 1:BEGIN; BEGIN 1:DECLARE c1 CURSOR FOR SELECT 1; -DECLARE +DECLARE CURSOR 1:DECLARE c2 CURSOR FOR SELECT 1; -DECLARE +DECLARE CURSOR -- There should be 2 active statements. 0:SELECT rsqcountlimit, rsqcountvalue FROM pg_resqueue_status WHERE rsqname = 'rq_multi_portal'; @@ -145,7 +145,7 @@ DETAIL: resource queue id: 585193, portal id: 3 (1 row) 1:END; -END +ROLLBACK -- -- Scenario 3: @@ -154,12 +154,12 @@ END 1:BEGIN; BEGIN 1:DECLARE c1 CURSOR FOR SELECT 1; -DECLARE +DECLARE CURSOR 2:BEGIN; BEGIN 2:DECLARE c2 CURSOR FOR SELECT 1; -DECLARE +DECLARE CURSOR -- This should block as it will exceed the active statements limit. 1&:DECLARE c3 CURSOR FOR SELECT 1; @@ -195,9 +195,9 @@ ERROR: canceling statement due to user request -- After ending the transactions, there should be 0 active statements. 1:END; -END +ROLLBACK 2:END; -END +COMMIT 0:SELECT rsqcountlimit, rsqcountvalue FROM pg_resqueue_status WHERE rsqname = 'rq_multi_portal'; rsqcountlimit | rsqcountvalue ---------------+--------------- @@ -206,6 +206,6 @@ END -- Cleanup 0:DROP ROLE role_multi_portal; -DROP +DROP ROLE 0:DROP RESOURCE QUEUE rq_multi_portal; -DROP +DROP QUEUE diff --git a/src/test/isolation2/expected/runaway_query.out b/src/test/isolation2/expected/runaway_query.out index ab550bc3315..c02cacb5b9e 100644 --- a/src/test/isolation2/expected/runaway_query.out +++ b/src/test/isolation2/expected/runaway_query.out @@ -1,10 +1,10 @@ CREATE EXTENSION IF NOT EXISTS gp_inject_fault; -CREATE +CREATE EXTENSION CREATE OR REPLACE LANGUAGE plpgsql; -CREATE +CREATE LANGUAGE CREATE TABLE runaway_query_test_table(a bigint NOT NULL) DISTRIBUTED BY (a); -CREATE +CREATE TABLE -- Use error fault to simulate vmem protect error and force cancel query. SELECT gp_inject_fault_infinite('gpdbwrappers_get_comparison_operator', 'error', 1); diff --git a/src/test/isolation2/expected/runaway_query_optimizer.out b/src/test/isolation2/expected/runaway_query_optimizer.out index 35b6b069cd1..3651ef4fc89 100644 --- a/src/test/isolation2/expected/runaway_query_optimizer.out +++ b/src/test/isolation2/expected/runaway_query_optimizer.out @@ -1,10 +1,10 @@ CREATE EXTENSION IF NOT EXISTS gp_inject_fault; -CREATE +CREATE EXTENSION CREATE OR REPLACE LANGUAGE plpgsql; -CREATE +CREATE LANGUAGE CREATE TABLE runaway_query_test_table(a bigint NOT NULL) DISTRIBUTED BY (a); -CREATE +CREATE TABLE -- Use error fault to simulate vmem protect error and force cancel query. SELECT gp_inject_fault_infinite('gpdbwrappers_get_comparison_operator', 'error', 1); diff --git a/src/test/isolation2/expected/segwalrep/cancel_commit_pending_replication.out b/src/test/isolation2/expected/segwalrep/cancel_commit_pending_replication.out index ed5e3d201b8..377d5054afd 100644 --- a/src/test/isolation2/expected/segwalrep/cancel_commit_pending_replication.out +++ b/src/test/isolation2/expected/segwalrep/cancel_commit_pending_replication.out @@ -10,7 +10,7 @@ select gp_inject_fault('wal_sender_loop', 'reset', 2); (1 row) create or replace function wait_for_replication(iterations int) returns bool as $$ begin /* in func */ for i in 1 .. iterations loop /* in func */ if exists (select wait_event from pg_stat_activity where sess_id in (select sess_id from store_session_id) and wait_event = 'SyncRep') then /* in func */ return true; /* in func */ end if; /* in func */ perform pg_sleep(0.1); /* in func */ perform pg_stat_clear_snapshot(); /* in func */ end loop; /* in func */ return false; /* in func */ end; /* in func */ $$ language plpgsql VOLATILE; -CREATE +CREATE FUNCTION SELECT role, preferred_role, content, status FROM gp_segment_configuration; role | preferred_role | content | status @@ -25,10 +25,10 @@ SELECT role, preferred_role, content, status FROM gp_segment_configuration; m | m | 0 | u (8 rows) create table store_session_id(a int, sess_id int); -CREATE +CREATE TABLE -- adding `2` as first column as the distribution column and add this tuple to segment 0 1: insert into store_session_id select 2, sess_id from pg_stat_activity where pid = pg_backend_pid(); -INSERT 1 +INSERT 0 1 -- suspend to hit commit-prepared point on segment (as we are -- interested in testing Commit here and not really Prepare) select gp_inject_fault_infinite('finish_prepared_start_of_function', 'suspend', 2); @@ -93,7 +93,7 @@ select gp_inject_fault('wal_sender_loop', 'reset', 2); Success: (1 row) 1<: <... completed> -CREATE +CREATE TABLE -- cleanup select gp_inject_fault('sync_rep_query_cancel', 'reset', 2); gp_inject_fault diff --git a/src/test/isolation2/expected/segwalrep/commit_blocking.out b/src/test/isolation2/expected/segwalrep/commit_blocking.out index 5edac6d59f5..8ea8a0cd1da 100644 --- a/src/test/isolation2/expected/segwalrep/commit_blocking.out +++ b/src/test/isolation2/expected/segwalrep/commit_blocking.out @@ -23,9 +23,9 @@ select content, role, preferred_role, status from gp_segment_configuration; -- create table and show commits are not blocked create table segwalrep_commit_blocking (a int) distributed by (a); -CREATE +CREATE TABLE insert into segwalrep_commit_blocking values (5); -INSERT 1 +INSERT 0 1 -- skip FTS probes always select gp_inject_fault('fts_probe', 'reset', 1); @@ -86,20 +86,20 @@ server closed the connection unexpectedly 3: begin; BEGIN 3: insert into segwalrep_commit_blocking values (4); -INSERT 1 +INSERT 0 1 3&: commit; -- this should not block due to direct dispatch to primary with active synced mirror 4: insert into segwalrep_commit_blocking values (6); -INSERT 1 +INSERT 0 1 -- bring the mirror back up -1U: select pg_ctl_start(datadir, port) from gp_segment_configuration where role = 'm' and content = 0; pg_ctl_start -------------------------------------------------- - waiting for server to start done server started + (1 row) -- should unblock and commit now that mirror is back up and in-sync @@ -115,7 +115,7 @@ select gp_inject_fault('fts_probe', 'reset', 1); -- everything should be back to normal 4: insert into segwalrep_commit_blocking select i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 4: select * from segwalrep_commit_blocking order by a; a ---- diff --git a/src/test/isolation2/expected/segwalrep/commit_blocking_on_standby.out b/src/test/isolation2/expected/segwalrep/commit_blocking_on_standby.out index c5eb5f674b8..2052f9fd42f 100644 --- a/src/test/isolation2/expected/segwalrep/commit_blocking_on_standby.out +++ b/src/test/isolation2/expected/segwalrep/commit_blocking_on_standby.out @@ -25,7 +25,7 @@ select gp_inject_fault_infinite('walrecv_skip_flush', 'skip', dbid) from gp_segm begin; BEGIN create or replace function wait_for_pg_stat_activity(timeout_secs int) returns void as $$ declare c int; /* in func */ i int; /* in func */ begin c := 0; /* in func */ i := 0; /* in func */ while c < 1 and i < timeout_secs*2 loop select count(*) into c from pg_stat_activity where wait_event = 'SyncRep'; /* in func */ perform pg_sleep(0.5); /* in func */ perform pg_stat_clear_snapshot(); /* in func */ i := i + 1; /* in func */ end loop; /* in func */ if c < 1 then raise exception 'timeout waiting for command to get blocked'; /* in func */ end if; /* in func */ end; /* in func */ $$ language plpgsql; -CREATE +CREATE FUNCTION -- Flush WAL to trigger the fault on standby. checkpoint; @@ -65,12 +65,12 @@ commit; COMMIT 1<: <... completed> -CREATE +CREATE TABLE -- The blocked commit must have finished and the table should be ready -- for insert. insert into commit_blocking_on_standby_t1 values (1); -INSERT 1 +INSERT 0 1 -- Scenario2: In CATCHUP phase, commits should not block until standby @@ -193,12 +193,12 @@ select gp_inject_fault('all', 'reset', dbid) from gp_segment_configuration where (2 rows) 1<: <... completed> -CREATE +CREATE TABLE -- Create table transaction must have committed and the table should -- be ready for insert. insert into commit_blocking_on_standby_t2 values (1); -INSERT 1 +INSERT 0 1 select wait_until_standby_in_state('streaming'); wait_until_standby_in_state @@ -296,10 +296,10 @@ select gp_inject_fault('all', 'reset', dbid) from gp_segment_configuration where (2 rows) 1<: <... completed> -CREATE +CREATE TABLE insert into commit_blocking_on_mirror_tbl values (2, 1); -INSERT 1 +INSERT 0 1 0U: select wait_until_standby_in_state('streaming'); wait_until_standby_in_state ----------------------------- diff --git a/src/test/isolation2/expected/segwalrep/die_commit_pending_replication.out b/src/test/isolation2/expected/segwalrep/die_commit_pending_replication.out index bc706f16321..650ea3dac9a 100644 --- a/src/test/isolation2/expected/segwalrep/die_commit_pending_replication.out +++ b/src/test/isolation2/expected/segwalrep/die_commit_pending_replication.out @@ -1,9 +1,9 @@ -- Adding `2` as first column as the distribution column. -- `2` should be on the first segment. let's double check here. create table store_session_id(a int, sess_id int); -CREATE +CREATE TABLE 1: insert into store_session_id select 2, sess_id from pg_stat_activity where pid = pg_backend_pid(); -INSERT 1 +INSERT 0 1 1: select gp_segment_id, a from store_session_id; gp_segment_id | a ---------------+--- @@ -11,7 +11,7 @@ INSERT 1 (1 row) 1: create table die_commit_pending_replication(a int, b int); -CREATE +CREATE TABLE -- Suspend to hit commit-prepared point on segment (as we are -- interested in testing Commit here and not really Prepare) @@ -90,7 +90,7 @@ select gp_inject_fault('sync_rep_query_die', 'reset', dbid) from gp_segment_conf (1 row) 1<: <... completed> -INSERT 2 +INSERT 0 2 -- check if the insert fails or not. select gp_segment_id, * from die_commit_pending_replication; @@ -110,9 +110,9 @@ select gp_segment_id, * from die_commit_pending_replication; -- 2: create temp table die_commit_pending_replication2(a int); -CREATE +CREATE TABLE 2: insert into die_commit_pending_replication2 values(2),(1); -INSERT 2 +INSERT 0 2 -- Insert fault before the "notification" code on QE and susped there. select gp_inject_fault_infinite('start_performDtxProtocolCommitOnePhase', 'error', dbid) from gp_segment_configuration where role='p' and content = 0; @@ -134,6 +134,6 @@ select gp_inject_fault_infinite('start_performDtxProtocolCommitOnePhase', 'reset -- cleanup drop table die_commit_pending_replication; -DROP +DROP TABLE drop table store_session_id; -DROP +DROP TABLE diff --git a/src/test/isolation2/expected/segwalrep/dtm_recovery_on_standby.out b/src/test/isolation2/expected/segwalrep/dtm_recovery_on_standby.out index 76f12f54a10..dc9b4dfc32d 100644 --- a/src/test/isolation2/expected/segwalrep/dtm_recovery_on_standby.out +++ b/src/test/isolation2/expected/segwalrep/dtm_recovery_on_standby.out @@ -90,7 +90,7 @@ select gp_inject_fault('all', 'reset', dbid) from gp_segment_configuration where Success: (1 row) 1<: <... completed> -CREATE +CREATE TABLE 2<: <... completed> ERROR: fault triggered, fault name:'transaction_abort_after_distributed_prepared' fault type:'error' @@ -123,12 +123,12 @@ select count(*) from committed_by_standby; ERROR: terminating connection due to administrator command (seg0 slice1 192.168.235.128:7002 pid=24473) -- end_ignore create table standby_config as (select hostname, datadir, port, role from gp_segment_configuration where content = -1) distributed by (hostname); -CREATE 2 +SELECT 2 create or replace function reinitialize_standby() returns text as $$ import subprocess rv = plpy.execute("select hostname, datadir, port from standby_config order by role", 2) standby = rv[0] # role = 'm' master = rv[1] # role = 'p' try: cmd = 'rm -rf %s.dtm_recovery && cp -R %s %s.dtm_recovery' % (standby['datadir'], standby['datadir'], standby['datadir']) remove_output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode('ascii') cmd = 'gpinitstandby -ar -P %d' % master['port'] remove_output += subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode('ascii') cmd = 'gpfts -A -D;' remove_output += subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode('ascii') cmd = 'export PGPORT=%d; gpinitstandby -a -s %s -S %s -P %d' % (master['port'], standby['hostname'], standby['datadir'], standby['port']) init_output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode('ascii') except subprocess.CalledProcessError as e: plpy.info(e.output) raise if "ERROR" not in remove_output and "FATAL" not in remove_output and \ "ERROR" not in init_output and "FATAL" not in init_output: return "standby initialized" return remove_output + "\n" + init_output $$ language plpython3u; -CREATE +CREATE FUNCTION select reinitialize_standby(); reinitialize_standby diff --git a/src/test/isolation2/expected/segwalrep/dtx_recovery_wait_lsn.out b/src/test/isolation2/expected/segwalrep/dtx_recovery_wait_lsn.out index ce415fc5f96..50045da32c2 100644 --- a/src/test/isolation2/expected/segwalrep/dtx_recovery_wait_lsn.out +++ b/src/test/isolation2/expected/segwalrep/dtx_recovery_wait_lsn.out @@ -5,10 +5,19 @@ -- The FTS process should be able to continue probe and 'sync off' the mirror -- while the 'dtx recovery' process is hanging recovering distributed transactions. +-- modify fts gucs to speed up the test. +1: alter system set gp_fts_probe_interval to 10; +ALTER SYSTEM +1: alter system set gp_fts_probe_retries to 1; +ALTER SYSTEM +1: select pg_reload_conf(); + pg_reload_conf +---------------- + t +(1 row) + 1: create table t_wait_lsn(a int); -CREATE -5: create table t_wait_lsn2(a int); -CREATE +CREATE TABLE -- suspend segment 0 before performing 'COMMIT PREPARED' 2: select gp_inject_fault_infinite('finish_prepared_start_of_function', 'suspend', dbid) from gp_segment_configuration where content=0 and role='p'; @@ -149,13 +158,14 @@ server closed the connection unexpectedly 2 (1 row) 4: drop table t_wait_lsn; -DROP -4: drop table t_wait_lsn2; -DROP +DROP TABLE -4: select gp_inject_fault('walrecv_skip_flush', 'reset', dbid) from gp_segment_configuration where content=0; - gp_inject_fault ------------------ - Success: - Success: -(2 rows) +4: alter system reset gp_fts_probe_interval; +ALTER SYSTEM +4: alter system reset gp_fts_probe_retries; +ALTER SYSTEM +4: select pg_reload_conf(); + pg_reload_conf +---------------- + t +(1 row) diff --git a/src/test/isolation2/expected/segwalrep/failover_with_many_records.out b/src/test/isolation2/expected/segwalrep/failover_with_many_records.out index 47a4f4f6140..e41922fcd99 100644 --- a/src/test/isolation2/expected/segwalrep/failover_with_many_records.out +++ b/src/test/isolation2/expected/segwalrep/failover_with_many_records.out @@ -16,7 +16,7 @@ (exited with code 0) 1:CREATE TABLE t(a int, b int); -CREATE +CREATE TABLE 1:SELECT gp_inject_fault_infinite('checkpoint_after_redo_calculated', 'suspend', dbid) FROM gp_segment_configuration WHERE role = 'p' AND content = 1; gp_inject_fault_infinite @@ -25,14 +25,14 @@ CREATE (1 row) 2&:CHECKPOINT; 3:INSERT INTO t VALUES (1, 0); -INSERT 1 +INSERT 0 1 -- Force WAL to switch wal files explicitly -- start_ignore 1U:SELECT pg_switch_wal(); -- end_ignore 3:INSERT INTO t SELECT 0, i FROM generate_series(1, 25)i; -INSERT 25 +INSERT 0 25 1:SELECT gp_inject_fault_infinite('checkpoint_after_redo_calculated', 'reset', dbid) FROM gp_segment_configuration WHERE role = 'p' AND content = 1; gp_inject_fault_infinite diff --git a/src/test/isolation2/expected/segwalrep/fts_unblock_primary.out b/src/test/isolation2/expected/segwalrep/fts_unblock_primary.out index 5f1afd5134a..a632c85069f 100644 --- a/src/test/isolation2/expected/segwalrep/fts_unblock_primary.out +++ b/src/test/isolation2/expected/segwalrep/fts_unblock_primary.out @@ -38,12 +38,12 @@ select content, role, preferred_role, status from gp_segment_configuration; -- create table and show commits are not blocked create table fts_unblock_primary (a int) distributed by (a); -CREATE +CREATE TABLE -- This case aims to insert a tuple to seg0. -- Under jump consistent hash, int value 4 should -- be on seg0. insert into fts_unblock_primary values (4); -INSERT 1 +INSERT 0 1 -- skip FTS probes always select gp_inject_fault('fts_probe', 'reset', 1); @@ -74,12 +74,12 @@ select gp_request_fts_probe_scan(); 2: begin; BEGIN 2: insert into fts_unblock_primary values (5); -INSERT 1 +INSERT 0 1 2&: commit; -- this should not block due to direct dispatch to primary with active synced mirror insert into fts_unblock_primary values (2); -INSERT 1 +INSERT 0 1 -- resume FTS probes select gp_inject_fault('fts_probe', 'reset', 1); @@ -159,9 +159,9 @@ select gp_inject_fault_infinite('initialize_wal_sender', 'suspend', dbid) from g -1U: select pg_ctl_start((select datadir from gp_segment_configuration c where c.role='m' and c.content=2), (select port from gp_segment_configuration where content = 2 and preferred_role = 'm')); pg_ctl_start -------------------------------------------------- - waiting for server to start done server started + (1 row) select gp_wait_until_triggered_fault('initialize_wal_sender', 1, dbid) from gp_segment_configuration where role='p' and content=2; gp_wait_until_triggered_fault @@ -212,7 +212,7 @@ select content, role, preferred_role, mode, status from gp_segment_configuration -- everything is back to normal insert into fts_unblock_primary select i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 -- synchronous_standby_names should be back to its original value on the primary 2U: show synchronous_standby_names; diff --git a/src/test/isolation2/expected/segwalrep/master_wal_switch.out b/src/test/isolation2/expected/segwalrep/master_wal_switch.out index 1247ccfe028..dbbcaec01cd 100644 --- a/src/test/isolation2/expected/segwalrep/master_wal_switch.out +++ b/src/test/isolation2/expected/segwalrep/master_wal_switch.out @@ -30,11 +30,11 @@ SELECT application_name, state FROM pg_stat_replication; -- with a command that generates WAL in between the invocations are -- suffice to generate new WAL file. CREATE TEMP TABLE walfile(fname text) DISTRIBUTED BY (fname); -CREATE +CREATE TABLE INSERT INTO walfile SELECT pg_walfile_name(pg_switch_wal()); -INSERT 1 -CREATE TABLE master_wal_dummy(); -CREATE +INSERT 0 1 +CREATE TABLE coordinator_wal_dummy(); +CREATE TABLE -- This should return false, indicating current WAL segment is -- different than what was previously recorded walfile table. SELECT fname = pg_walfile_name(pg_switch_wal()) FROM walfile; diff --git a/src/test/isolation2/expected/segwalrep/mirror_promotion.out b/src/test/isolation2/expected/segwalrep/mirror_promotion.out index 7cf2f7df8d3..d1b04138291 100644 --- a/src/test/isolation2/expected/segwalrep/mirror_promotion.out +++ b/src/test/isolation2/expected/segwalrep/mirror_promotion.out @@ -88,6 +88,17 @@ select content, preferred_role, role, status, mode from gp_segment_configuration 0 | p | m | u | s (2 rows) +-- set GUCs to speed-up the test +alter system set gp_fts_probe_retries to 2; +ALTER SYSTEM +alter system set gp_fts_probe_timeout to 5; +ALTER SYSTEM +select pg_reload_conf(); + pg_reload_conf +---------------- + t +(1 row) + -- start_ignore select dbid from gp_segment_configuration where content = 0 and role = 'p'; dbid @@ -139,9 +150,9 @@ select pg_sleep(2); -- start_ignore -- reset GUCs alter system set gp_fts_probe_retries to default; -ALTER +ALTER SYSTEM alter system set gp_fts_probe_timeout to default; -ALTER +ALTER SYSTEM select pg_reload_conf(); pg_reload_conf ---------------- @@ -169,9 +180,9 @@ select pg_sleep(2); -- end_ignore (exited with code 0) create tablespace mirror_promotion_tablespace location '/tmp/mirror_promotion_tablespace_loc'; -CREATE +CREATE TABLESPACE create table mirror_promotion_tblspc_heap_table (a int) tablespace mirror_promotion_tablespace; -CREATE +CREATE TABLE -- -- now, let's fully recover the mirror !\retcode gprecoverseg -aF --no-progress; @@ -180,9 +191,9 @@ CREATE (exited with code 0) drop table mirror_promotion_tblspc_heap_table; -DROP +DROP TABLE drop tablespace mirror_promotion_tablespace; -DROP +DROP TABLESPACE -- loop while segments come in sync select wait_until_all_segments_synchronized(); diff --git a/src/test/isolation2/expected/segwalrep/recoverseg_from_file.out b/src/test/isolation2/expected/segwalrep/recoverseg_from_file.out index 00921bb1b3e..265e8f0d46a 100644 --- a/src/test/isolation2/expected/segwalrep/recoverseg_from_file.out +++ b/src/test/isolation2/expected/segwalrep/recoverseg_from_file.out @@ -15,7 +15,7 @@ -- create or replace function generate_recover_config_file(datadir text, port text) returns void as $$ import io import os myhost = os.uname()[1] srcConfig = myhost + '|' + port + '|' + datadir dstConfig = myhost + '|' + port + '|' + datadir + 'temp' configStr = srcConfig + ' ' + dstConfig f = open("/tmp/recover_config_file1", "w") f.write(configStr) f.close() configStr = dstConfig + ' ' + srcConfig f = open("/tmp/recover_config_file2", "w") f.write(configStr) f.close() $$ language plpython3u; -CREATE +CREATE FUNCTION SELECT dbid, role, preferred_role, content, status FROM gp_segment_configuration order by dbid; dbid | role | preferred_role | content | status diff --git a/src/test/isolation2/expected/segwalrep/replication_keeps_crash.out b/src/test/isolation2/expected/segwalrep/replication_keeps_crash.out index ae4e4c9f290..b5bc15f9bef 100644 --- a/src/test/isolation2/expected/segwalrep/replication_keeps_crash.out +++ b/src/test/isolation2/expected/segwalrep/replication_keeps_crash.out @@ -10,6 +10,10 @@ -- More details please refer to FTSGetReplicationDisconnectTime. -- modify fts gucs to speed up the test. +1: alter system set gp_fts_probe_interval to 10; +ALTER SYSTEM +1: alter system set gp_fts_replication_attempt_count to 3; +ALTER SYSTEM 1: select pg_reload_conf(); pg_reload_conf ---------------- @@ -63,7 +67,7 @@ select gp_request_fts_probe_scan(); -- Without gp_fts_replication_attempt_count mirror will continuously connect and re-connect and -- be in grace period to not be marked down. 1<: <... completed> -CREATE +CREATE TABLE -- expect: to see the content 0, mirror is mark down select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; @@ -106,8 +110,12 @@ SELECT role, preferred_role, content, status FROM gp_segment_configuration; (8 rows) drop table mirror_block_t1; -DROP +DROP TABLE +1: alter system reset gp_fts_probe_interval; +ALTER SYSTEM +1: alter system reset gp_fts_replication_attempt_count; +ALTER SYSTEM 1: select pg_reload_conf(); pg_reload_conf ---------------- diff --git a/src/test/isolation2/expected/segwalrep/select_throttle.out b/src/test/isolation2/expected/segwalrep/select_throttle.out index 1d722085938..6c3bddd4050 100644 --- a/src/test/isolation2/expected/segwalrep/select_throttle.out +++ b/src/test/isolation2/expected/segwalrep/select_throttle.out @@ -13,7 +13,7 @@ -- set wait_for_replication_threshold to 1kB for quicker test ALTER SYSTEM SET wait_for_replication_threshold = 1; -ALTER +ALTER SYSTEM SELECT pg_reload_conf(); pg_reload_conf ---------------- @@ -21,13 +21,13 @@ SELECT pg_reload_conf(); (1 row) CREATE TABLE select_no_throttle(a int) DISTRIBUTED BY (a); -CREATE +CREATE TABLE INSERT INTO select_no_throttle SELECT generate_series (1, 10); -INSERT 10 +INSERT 0 10 CREATE TABLE select_throttle(a int) DISTRIBUTED BY (a); -CREATE +CREATE TABLE INSERT INTO select_throttle SELECT generate_series (1, 900000); -INSERT 900000 +INSERT 0 900000 -- Enable tuple hints so that buffer will be marked dirty upon a hint bit change -- (so that we don't have to wait for the tuple to age. See logic in markDirty) @@ -134,9 +134,9 @@ SELECT wait_until_all_segments_synchronized(); 1U: SET gp_disable_tuple_hints=off; SET Truncate select_throttle; -TRUNCATE -INSERT INTO select_throttle SELECT generate_series (1, 100000); -INSERT 100000 +TRUNCATE TABLE +INSERT INTO select_throttle SELECT generate_series (1, 900000); +INSERT 0 900000 -- flush the data to disk checkpoint; CHECKPOINT @@ -234,7 +234,7 @@ RESET (1 row) ALTER SYSTEM RESET wait_for_replication_threshold; -ALTER +ALTER SYSTEM SELECT pg_reload_conf(); pg_reload_conf ---------------- diff --git a/src/test/isolation2/expected/segwalrep/twophase_tolerance_with_mirror_promotion.out b/src/test/isolation2/expected/segwalrep/twophase_tolerance_with_mirror_promotion.out index d27c4bbae4e..b48865ca370 100644 --- a/src/test/isolation2/expected/segwalrep/twophase_tolerance_with_mirror_promotion.out +++ b/src/test/isolation2/expected/segwalrep/twophase_tolerance_with_mirror_promotion.out @@ -159,7 +159,7 @@ LINE 1: INSERT INTO tolerance_test_table VALUES(42); Success: (1 row) 1<: <... completed> -CREATE +CREATE TABLE 1:SELECT gp_inject_fault('transaction_abort_after_distributed_prepared', 'reset', dbid) FROM gp_segment_configuration WHERE content = -1 AND role = 'p'; gp_inject_fault @@ -178,7 +178,7 @@ CREATE p | m (2 rows) 1:INSERT INTO tolerance_test_table VALUES(42); -INSERT 1 +INSERT 0 1 -- Scenario 3: Commit-Prepare received on primary but not acknowledged to master -- NOTICE: Don't use session 2 again because it's cached gang is invalid @@ -220,7 +220,7 @@ INSERT 1 -- end_ignore (exited with code 0) 1<: <... completed> -DROP +DROP TABLE -- Use new connection session. This helps is to make sure master is up and -- running, even if in worst case the above Drop command commit-prepared retries diff --git a/src/test/isolation2/expected/select_dropped_table.out b/src/test/isolation2/expected/select_dropped_table.out index 1de5825b246..402f8ea4ac8 100644 --- a/src/test/isolation2/expected/select_dropped_table.out +++ b/src/test/isolation2/expected/select_dropped_table.out @@ -18,16 +18,16 @@ -- This test is used to confirm the fix. 1: create table tab_select_dropped_table (c int); -CREATE +CREATE TABLE 1: begin; BEGIN 1: drop table tab_select_dropped_table; -DROP +DROP TABLE 2&: select * from tab_select_dropped_table; 1: end; -END +COMMIT 2<: <... completed> ERROR: relation "tab_select_dropped_table" does not exist LINE 1: select * from tab_select_dropped_table; diff --git a/src/test/isolation2/expected/setup.out b/src/test/isolation2/expected/setup.out index 0c9c28cea9e..3be43dcb0eb 100644 --- a/src/test/isolation2/expected/setup.out +++ b/src/test/isolation2/expected/setup.out @@ -1,5 +1,5 @@ create or replace language plpython3u; -CREATE +CREATE LANGUAGE -- Helper function, to call either __gp_aoseg, or gp_aocsseg, depending -- on whether the table is row- or column-oriented. This allows us to @@ -9,7 +9,7 @@ CREATE -- and thinks that a ';' at end of line ends the command. The /* in func */ -- comments at the end of each line thwarts that. CREATE OR REPLACE FUNCTION gp_ao_or_aocs_seg(rel regclass, segment_id OUT integer, segno OUT integer, tupcount OUT bigint, modcount OUT bigint, formatversion OUT smallint, state OUT smallint) RETURNS SETOF record as $$ declare amname_var text; /* in func */ begin /* in func */ select amname into amname_var from pg_class c, pg_am am where c.relam = am.oid and c.oid = rel; /* in func */ if amname_var = 'ao_column' then /* in func */ for segment_id, segno, tupcount, modcount, formatversion, state in SELECT DISTINCT x.segment_id, x.segno, x.tupcount, x.modcount, x.formatversion, x.state FROM gp_toolkit.__gp_aocsseg(rel) x loop /* in func */ return next; /* in func */ end loop; /* in func */ elsif amname_var = 'ao_row' then /* in func */ for segment_id, segno, tupcount, modcount, formatversion, state in SELECT x.segment_id, x.segno, x.tupcount, x.modcount, x.formatversion, x.state FROM gp_toolkit.__gp_aoseg(rel) x loop /* in func */ return next; /* in func */ end loop; /* in func */ else /* in func */ raise '% is not an AO_ROW or AO_COLUMN table', rel::text; /* in func */ end if; /* in func */ end; /* in func */ $$ LANGUAGE plpgsql; -CREATE +CREATE FUNCTION -- Show locks in master and in segments. Because the number of segments -- in the cluster depends on configuration, we print only summary information @@ -17,17 +17,17 @@ CREATE -- we print that as a special case, but otherwise we just print "n segments", -- meaning the relation is locked on more than one segment. create or replace view locktest_master as select coalesce( case when relname like 'pg_toast%index' then 'toast index' when relname like 'pg_toast%' then 'toast table' when relname like 'pg_aoseg%' then 'aoseg table' when relname like 'pg_aovisimap%index' then 'aovisimap index' when relname like 'pg_aovisimap%' then 'aovisimap table' else relname end, 'dropped table'), mode, locktype, 'master'::text as node from pg_locks l left outer join pg_class c on ((l.locktype = 'append-only segment file' and l.relation = c.relfilenode) or (l.locktype != 'append-only segment file' and l.relation = c.oid)), pg_database d where relation is not null and l.database = d.oid and (relname <> 'gp_fault_strategy' and relname != 'locktest_master' or relname is NULL) and d.datname = current_database() and l.gp_segment_id = -1 group by l.gp_segment_id, relation, relname, locktype, mode order by 1, 3, 2; -CREATE +CREATE VIEW create or replace view locktest_segments_dist as select relname, mode, locktype, l.gp_segment_id as node, relation from pg_locks l left outer join pg_class c on ((l.locktype = 'append-only segment file' and l.relation = c.relfilenode) or (l.locktype != 'append-only segment file' and l.relation = c.oid)), pg_database d where relation is not null and l.database = d.oid and (relname <> 'gp_fault_strategy' and relname != 'locktest_segments_dist' or relname is NULL) and d.datname = current_database() and l.gp_segment_id > -1 group by l.gp_segment_id, relation, relname, locktype, mode; -CREATE +CREATE VIEW create or replace view locktest_segments as SELECT coalesce( case when relname like 'pg_toast%index' then 'toast index' when relname like 'pg_toast%' then 'toast table' when relname like 'pg_aoseg%' then 'aoseg table' when relname like 'pg_aovisimap%index' then 'aovisimap index' when relname like 'pg_aovisimap%' then 'aovisimap table' else relname end, 'dropped table'), mode, locktype, case when count(*) = 1 then '1 segment' else 'n segments' end as node FROM gp_dist_random('locktest_segments_dist') group by relname, relation, mode, locktype; -CREATE +CREATE VIEW -- Helper function CREATE or REPLACE FUNCTION wait_until_waiting_for_required_lock (rel_name text, lmode text, segment_id integer) /*in func*/ RETURNS bool AS $$ declare retries int; /* in func */ begin /* in func */ retries := 1200; /* in func */ loop /* in func */ if (select not granted from pg_locks l where granted='f' and l.relation::regclass = rel_name::regclass and l.mode=lmode and l.gp_segment_id=segment_id) then /* in func */ return true; /* in func */ end if; /* in func */ if retries <= 0 then /* in func */ return false; /* in func */ end if; /* in func */ perform pg_sleep(0.1); /* in func */ retries := retries - 1; /* in func */ end loop; /* in func */ end; /* in func */ $$ language plpgsql; -CREATE +CREATE FUNCTION -- @@ -40,7 +40,7 @@ create or replace function pg_ctl(datadir text, command text, command_mode text import subprocess if command == 'promote': cmd = 'pg_ctl promote -D %s' % datadir elif command in ('stop', 'restart'): cmd = 'pg_ctl -l postmaster.log -D %s ' % datadir cmd = cmd + '-w -t 600 -m %s %s' % (command_mode, command) else: return 'Invalid command input' proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = proc.communicate() if proc.returncode == 0: return 'OK' else: raise PgCtlError(stdout.decode()+'|'+stderr.decode()) $$ language plpython3u; -CREATE +CREATE FUNCTION -- -- pg_ctl_start: @@ -52,8 +52,8 @@ CREATE -- datadir: data directory of process to target with `pg_ctl` -- port: which port the server should start on -- -create or replace function pg_ctl_start(datadir text, port bigint) returns text as $$ import subprocess cmd = 'pg_ctl -l postmaster.log -D %s ' % datadir opts = '-p %d' % (port) opts = opts + ' -c gp_role=execute' cmd = cmd + '-o "%s" start' % opts return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode().replace('.', '') $$ language plpython3u; -CREATE +create or replace function pg_ctl_start(datadir text, port int) returns text as $$ import subprocess cmd = 'pg_ctl -l postmaster.log -D %s ' % datadir opts = '-p %d' % (port) opts = opts + ' -c gp_role=execute' cmd = cmd + '-o "%s" start' % opts return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode().replace('.', '') $$ language plpython3u; +CREATE FUNCTION -- -- restart_primary_segments_containing_data_for(table_name text): @@ -62,7 +62,7 @@ CREATE -- Note: this does an immediate restart, which forces recovery -- create or replace function restart_primary_segments_containing_data_for(table_name text) returns setof integer as $$ declare segment_id integer; /* in func */ begin for segment_id in select * from primary_segments_containing_data_for(table_name) loop perform pg_ctl( (select get_data_directory_for(segment_id)), 'restart', 'immediate' ); /* in func */ end loop; /* in func */ end; /* in func */ $$ language plpgsql; -CREATE +CREATE FUNCTION -- -- clean_restart_primary_segments_containing_data_for(table_name text): @@ -71,33 +71,33 @@ CREATE -- Note: this does a fast restart, which does not require recovery -- create or replace function clean_restart_primary_segments_containing_data_for(table_name text) returns setof integer as $$ declare segment_id integer; /* in func */ begin for segment_id in select * from primary_segments_containing_data_for(table_name) loop perform pg_ctl( (select get_data_directory_for(segment_id)), 'restart', 'fast' ); /* in func */ end loop; /* in func */ end; /* in func */ $$ language plpgsql; -CREATE +CREATE FUNCTION create or replace function primary_segments_containing_data_for(table_name text) returns setof integer as $$ begin return query execute 'select distinct gp_segment_id from ' || table_name; /* in func */ end; /* in func */ $$ language plpgsql; -CREATE +CREATE FUNCTION create or replace function get_data_directory_for(segment_number int, segment_role text default 'p') returns text as $$ BEGIN return ( select datadir from gp_segment_configuration where role=segment_role and content=segment_number ); /* in func */ END; /* in func */ $$ language plpgsql; -CREATE +CREATE FUNCTION create or replace function master() returns setof gp_segment_configuration as $$ select * from gp_segment_configuration where role='p' and content=-1; /* in func */ $$ language sql; -CREATE +CREATE FUNCTION -create or replace function wait_until_segment_synchronized(segment_number int) returns text as $$ begin for i in 1..6000 loop if (select count(*) = 0 from gp_segment_configuration where content = segment_number and mode != 's') then return 'OK'; /* in func */ end if; /* in func */ perform pg_sleep(0.1); /* in func */ perform gp_request_fts_probe_scan(); /* in func */ end loop; /* in func */ return 'Fail'; /* in func */ end; /* in func */ $$ language plpgsql; -CREATE +create or replace function wait_until_segment_synchronized(segment_number int) returns text as $$ begin for i in 1..1200 loop if (select count(*) = 0 from gp_segment_configuration where content = segment_number and mode != 's') then return 'OK'; /* in func */ end if; /* in func */ perform pg_sleep(0.1); /* in func */ perform gp_request_fts_probe_scan(); /* in func */ end loop; /* in func */ return 'Fail'; /* in func */ end; /* in func */ $$ language plpgsql; +CREATE FUNCTION -create or replace function wait_until_all_segments_synchronized() returns text as $$ begin for i in 1..6000 loop if (select count(*) = 0 from gp_segment_configuration where content != -1 and mode != 's') then return 'OK'; /* in func */ end if; /* in func */ perform pg_sleep(0.1); /* in func */ perform gp_request_fts_probe_scan(); /* in func */ end loop; /* in func */ return 'Fail'; /* in func */ end; /* in func */ $$ language plpgsql; -CREATE +create or replace function wait_until_all_segments_synchronized() returns text as $$ begin /* no-op for a mirrorless cluster */ if (select count(*) = 0 from gp_segment_configuration where role = 'm') then return 'OK'; /* in func */ end if; /* in func */ for i in 1..1200 loop if (select count(*) = 0 from gp_segment_configuration where content != -1 and mode != 's') then return 'OK'; /* in func */ end if; /* in func */ perform pg_sleep(0.1); /* in func */ perform gp_request_fts_probe_scan(); /* in func */ end loop; /* in func */ return 'Fail'; /* in func */ end; /* in func */ $$ language plpgsql; +CREATE FUNCTION CREATE OR REPLACE FUNCTION is_query_waiting_for_syncrep(iterations int, check_query text) RETURNS bool AS $$ for i in range(iterations): results = plpy.execute("SELECT gp_execution_segment() AS content, query, wait_event\ FROM gp_dist_random('pg_stat_activity')\ WHERE gp_execution_segment() = 1 AND\ query = '%s' AND\ wait_event = 'SyncRep'" % check_query ) if results: return True return False $$ LANGUAGE plpython3u VOLATILE; -CREATE +CREATE FUNCTION create or replace function wait_for_replication_replay (segid int, retries int) returns bool as $$ declare i int; /* in func */ result bool; /* in func */ begin i := 0; /* in func */ -- Wait until the mirror/standby has replayed up to flush location loop SELECT flush_lsn = replay_lsn INTO result from gp_stat_replication where gp_segment_id = segid; /* in func */ if result then return true; /* in func */ end if; /* in func */ if i >= retries then return false; /* in func */ end if; /* in func */ perform pg_sleep(0.1); /* in func */ perform pg_stat_clear_snapshot(); /* in func */ i := i + 1; /* in func */ end loop; /* in func */ end; /* in func */ $$ language plpgsql; -CREATE +CREATE FUNCTION create or replace function wait_until_standby_in_state(targetstate text) returns text as $$ declare replstate text; /* in func */ i int; /* in func */ begin i := 0; /* in func */ while i < 1200 loop select state into replstate from pg_stat_replication; /* in func */ if replstate = targetstate then return replstate; /* in func */ end if; /* in func */ perform pg_sleep(0.1); /* in func */ perform pg_stat_clear_snapshot(); /* in func */ i := i + 1; /* in func */ end loop; /* in func */ return replstate; /* in func */ end; /* in func */ $$ language plpgsql; -CREATE +CREATE FUNCTION -- -- pg_basebackup: @@ -118,39 +118,39 @@ if xlog_method == 'stream': cmd += ' --wal-method stream' elif xlog_method == 'f cmd += ' --no-verify-checksums' try: # Unset PGAPPNAME so that the pg_stat_replication.application_name is not affected if os.getenv('PGAPPNAME') is not None: os.environ.pop('PGAPPNAME') results = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace(b'.', b'').decode() except subprocess.CalledProcessError as e: results = str(e) + "\ncommand output: " + (e.output.decode()) return results $$ language plpython3u; -CREATE +CREATE FUNCTION create or replace function count_of_items_in_directory(user_path text) returns text as $$ import subprocess cmd = 'ls {user_path}'.format(user_path=user_path) results = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace(b'.', b'').decode() return len([result for result in results.splitlines() if result != '']) $$ language plpython3u; -CREATE +CREATE FUNCTION create or replace function count_of_items_in_database_directory(user_path text, database_oid oid) returns int as $$ import subprocess import os directory = os.path.join(user_path, str(database_oid)) cmd = 'ls ' + directory results = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace(b'.', b'').decode() return len([result for result in results.splitlines() if result != '']) $$ language plpython3u; -CREATE +CREATE FUNCTION create or replace function validate_tablespace_symlink(datadir text, tablespacedir text, dbid int, tablespace_oid oid) returns boolean as $$ import os return os.readlink('%s/pg_tblspc/%d' % (datadir, tablespace_oid)) == ('%s/%d' % (tablespacedir, dbid)) $$ language plpython3u; -CREATE +CREATE FUNCTION -- This function is used to loop until master shutsdown, to make sure -- next command executed is only after restart and doesn't go through -- while PANIC is still being processed by master, as master continues -- to accept connections for a while despite undergoing PANIC. CREATE OR REPLACE FUNCTION wait_till_master_shutsdown() RETURNS void AS $$ DECLARE i int; /* in func */ BEGIN i := 0; /* in func */ while i < 120 loop i := i + 1; /* in func */ PERFORM pg_sleep(.5); /* in func */ end loop; /* in func */ END; /* in func */ $$ LANGUAGE plpgsql; -CREATE +CREATE FUNCTION -- Helper function that ensures stats collector receives stat from the latest operation. create or replace function wait_until_dead_tup_change_to(relid oid, stat_val_expected bigint) returns text as $$ declare stat_val int; /* in func */ i int; /* in func */ begin i := 0; /* in func */ while i < 1200 loop select pg_stat_get_dead_tuples(relid) into stat_val; /* in func */ if stat_val = stat_val_expected then /* in func */ return 'OK'; /* in func */ end if; /* in func */ perform pg_sleep(0.1); /* in func */ perform pg_stat_clear_snapshot(); /* in func */ i := i + 1; /* in func */ end loop; /* in func */ return 'Fail'; /* in func */ end; /* in func */ $$ language plpgsql; -CREATE +CREATE FUNCTION -- Helper function that ensures mirror of the specified contentid is down. create or replace function wait_for_mirror_down(contentid smallint, timeout_sec integer) returns bool as $$ declare i int; /* in func */ begin /* in func */ i := 0; /* in func */ loop /* in func */ perform gp_request_fts_probe_scan(); /* in func */ if (select count(1) from gp_segment_configuration where role='m' and content=$1 and status='d') = 1 then /* in func */ return true; /* in func */ end if; /* in func */ if i >= 2 * $2 then /* in func */ return false; /* in func */ end if; /* in func */ perform pg_sleep(0.5); /* in func */ i = i + 1; /* in func */ end loop; /* in func */ end; /* in func */ $$ language plpgsql; -CREATE +CREATE FUNCTION -- Helper function that ensures stats collector receives stat from the latest operation. create or replace function wait_until_vacuum_count_change_to(relid oid, stat_val_expected bigint) returns text as $$ declare stat_val int; /* in func */ i int; /* in func */ begin i := 0; /* in func */ while i < 1200 loop select pg_stat_get_vacuum_count(relid) into stat_val; /* in func */ if stat_val = stat_val_expected then /* in func */ return 'OK'; /* in func */ end if; /* in func */ perform pg_sleep(0.1); /* in func */ perform pg_stat_clear_snapshot(); /* in func */ i := i + 1; /* in func */ end loop; /* in func */ return 'Fail'; /* in func */ end; /* in func */ $$ language plpgsql; -CREATE +CREATE FUNCTION -- Helper function to get the number of blocks in a relation. CREATE OR REPLACE FUNCTION nblocks(rel regclass) RETURNS int AS $$ /* in func */ BEGIN /* in func */ RETURN pg_relation_size(rel) / current_setting('block_size')::int; /* in func */ END; $$ /* in func */ LANGUAGE PLPGSQL; -CREATE +CREATE FUNCTION -- Helper function to populate logical heap pages in a certain block sequence. -- Can be used for both heap and AO/CO tables. The target block sequence into @@ -165,4 +165,4 @@ CREATE -- Note: while using this with AO/CO tables, please account for how the block -- sequences start/end based on the concurrency level (see AOSegmentGet_startHeapBlock()) CREATE OR REPLACE FUNCTION populate_pages(relname text, value int, upto tid) RETURNS VOID AS $$ /* in func */ DECLARE curtid tid; /* in func */ BEGIN /* in func */ LOOP /* in func */ EXECUTE format('INSERT INTO %I VALUES($1) RETURNING ctid', relname) INTO curtid USING value; /* in func */ EXIT WHEN curtid > upto; /* in func */ END LOOP; /* in func */ END; $$ /* in func */ LANGUAGE PLPGSQL; -CREATE +CREATE FUNCTION diff --git a/src/test/isolation2/expected/spilling_hashagg.out b/src/test/isolation2/expected/spilling_hashagg.out index 8f01e1e2416..a7311914d28 100644 --- a/src/test/isolation2/expected/spilling_hashagg.out +++ b/src/test/isolation2/expected/spilling_hashagg.out @@ -9,7 +9,7 @@ -- Until then this test doesn't actually test spilling. CREATE TABLE test_src_tbl AS WITH cte1 AS ( SELECT field5 from generate_series(1,1000) field5 ) SELECT field5 % 100 AS a, field5 % 100 + 1 AS b FROM cte1 DISTRIBUTED BY (a); -CREATE 1000 +SELECT 1000 ANALYZE test_src_tbl; ANALYZE @@ -26,7 +26,7 @@ SELECT gp_inject_fault('force_hashagg_stream_hashtable', 'skip', '', '', '', 100 Success: (4 rows) CREATE TABLE test_hashagg_on AS SELECT a, COUNT(DISTINCT b) AS b FROM test_src_tbl GROUP BY a; -CREATE 100 +SELECT 100 EXPLAIN (costs off) SELECT a, COUNT(DISTINCT b) AS b FROM test_src_tbl GROUP BY a; QUERY PLAN -------------------------------------------- @@ -43,7 +43,7 @@ EXPLAIN (costs off) SELECT a, COUNT(DISTINCT b) AS b FROM test_src_tbl GROUP BY set optimizer_enable_hashagg=off; SET CREATE TABLE test_hashagg_off AS SELECT a, COUNT(DISTINCT b) AS b FROM test_src_tbl GROUP BY a; -CREATE 100 +SELECT 100 EXPLAIN (costs off) SELECT a, COUNT(DISTINCT b) AS b FROM test_src_tbl GROUP BY a; QUERY PLAN -------------------------------------------- diff --git a/src/test/isolation2/expected/spilling_hashagg_optimizer.out b/src/test/isolation2/expected/spilling_hashagg_optimizer.out index 8f0f54e200a..fcafcdd58e1 100644 --- a/src/test/isolation2/expected/spilling_hashagg_optimizer.out +++ b/src/test/isolation2/expected/spilling_hashagg_optimizer.out @@ -9,7 +9,7 @@ -- Until then this test doesn't actually test spilling. CREATE TABLE test_src_tbl AS WITH cte1 AS ( SELECT field5 from generate_series(1,1000) field5 ) SELECT field5 % 100 AS a, field5 % 100 + 1 AS b FROM cte1 DISTRIBUTED BY (a); -CREATE 1000 +SELECT 1000 ANALYZE test_src_tbl; ANALYZE @@ -26,7 +26,7 @@ SELECT gp_inject_fault('force_hashagg_stream_hashtable', 'skip', '', '', '', 100 Success: (4 rows) CREATE TABLE test_hashagg_on AS SELECT a, COUNT(DISTINCT b) AS b FROM test_src_tbl GROUP BY a; -CREATE 100 +SELECT 100 EXPLAIN (costs off) SELECT a, COUNT(DISTINCT b) AS b FROM test_src_tbl GROUP BY a; QUERY PLAN -------------------------------------------- @@ -43,7 +43,7 @@ EXPLAIN (costs off) SELECT a, COUNT(DISTINCT b) AS b FROM test_src_tbl GROUP BY set optimizer_enable_hashagg=off; SET CREATE TABLE test_hashagg_off AS SELECT a, COUNT(DISTINCT b) AS b FROM test_src_tbl GROUP BY a; -CREATE 100 +SELECT 100 EXPLAIN (costs off) SELECT a, COUNT(DISTINCT b) AS b FROM test_src_tbl GROUP BY a; QUERY PLAN -------------------------------------------- diff --git a/src/test/isolation2/expected/standby_replay_dtx_info.out b/src/test/isolation2/expected/standby_replay_dtx_info.out index 3d8cfc26dd9..9782a3fa612 100644 --- a/src/test/isolation2/expected/standby_replay_dtx_info.out +++ b/src/test/isolation2/expected/standby_replay_dtx_info.out @@ -8,7 +8,7 @@ create or replace function wait_for_standby_replay (retries int) returns bool as $$ declare i int; /* in func */ standby_is_up bool; /* in func */ result bool; /* in func */ begin i := 0; /* in func */ -- Wait until the mirror/standby has replayed up to flush location loop SELECT flush_lsn = replay_lsn INTO result from pg_stat_replication; /* in func */ if not found then return false; /* in func */ end if; /* in func */ if result then return true; /* in func */ end if; /* in func */ if i >= retries then return false; /* in func */ end if; /* in func */ perform pg_sleep(0.1); /* in func */ perform pg_stat_clear_snapshot(); /* in func */ i := i + 1; /* in func */ end loop; /* in func */ end; /* in func */ $$ language plpgsql; -CREATE +CREATE FUNCTION select wait_for_standby_replay(1200); wait_for_standby_replay @@ -16,7 +16,7 @@ select wait_for_standby_replay(1200); t (1 row) create table test_dtx_standby_tbl(c1 int); -CREATE +CREATE TABLE -- We have just created a checkpoint. The next automatic checkpoint -- will be triggered only after 5 minutes or after CheckPointSegments @@ -58,7 +58,7 @@ CHECKPOINT Success: (1 row) 2<: <... completed> -INSERT 10 +INSERT 0 10 1: select gp_inject_fault_infinite('checkpoint_after_redo_calculated', 'reset', dbid) from gp_segment_configuration where content = -1 and role = 'p'; gp_inject_fault_infinite -------------------------- @@ -76,7 +76,7 @@ CHECKPOINT -- this DTX might overflow gxacts array 2: insert into test_dtx_standby_tbl select generate_series(11,20); -INSERT 10 +INSERT 0 10 -- Wait standby to replay all XLOG select wait_for_standby_replay(1200); @@ -91,6 +91,6 @@ select gp_inject_fault_infinite('standby_gxacts_overflow', 'reset', dbid) from g Success: (1 row) drop table test_dtx_standby_tbl; -DROP +DROP TABLE drop function wait_for_standby_replay(int); -DROP +DROP FUNCTION diff --git a/src/test/isolation2/expected/starve_case.out b/src/test/isolation2/expected/starve_case.out index 51b9db4051d..265076d622a 100644 --- a/src/test/isolation2/expected/starve_case.out +++ b/src/test/isolation2/expected/starve_case.out @@ -7,9 +7,9 @@ -- create table starve (c int); -CREATE +CREATE TABLE create table starve_helper (name varchar, sessionid int); -CREATE +CREATE TABLE -- Function to access a table so that AccessShare lock is requested on -- the table. Use a non-SQL language for this function so that parser @@ -19,13 +19,13 @@ CREATE -- ENTRY_DB_SINGLETON reader that executes this function won't go -- through the waitMask conflict check in LockAcquire(). CREATE OR REPLACE FUNCTION function_starve_volatile(x int) /*in func*/ RETURNS int AS $$ /*in func*/ declare /*in func*/ v int; /*in func*/ BEGIN /*in func*/ SELECT count(c) into v FROM starve; /*in func*/ RETURN $1 + 1; /*in func*/ END $$ /*in func*/ LANGUAGE plpgsql VOLATILE MODIFIES SQL DATA; -CREATE +CREATE FUNCTION -- Function to wait until a specific session is reported as waiting on -- a lock. The session's mppsessionid is obtained from starve_helper -- table. Timeout if no locks are awaited within 2 seconds. CREATE OR REPLACE FUNCTION wait_until_locks_awaited(sess_name varchar) /*in func*/ RETURNS bool AS $$ /*in func*/ declare /*in func*/ num_awaited int := 0; /*in func*/ iterations int := 0; /*in func*/ sessions_waiting_for_locks int[]; /*in func*/ begin /*in func*/ while num_awaited = 0 and iterations < 20 loop /*in func*/ select array_agg(mppsessionid) into sessions_waiting_for_locks from pg_locks where granted = false and gp_segment_id = -1; /*in func*/ select count(*) into num_awaited from starve_helper s where /*in func*/ s.name = sess_name and s.sessionid = ANY (sessions_waiting_for_locks); /*in func*/ perform pg_sleep(.1); /*in func*/ iterations := iterations + 1; /*in func*/ end loop; /*in func*/ return num_awaited > 0; /*in func*/ end $$ /*in func*/ LANGUAGE plpgsql STABLE; -CREATE +CREATE FUNCTION -- Hold access shared lock, so that session2 must wait. 1: begin; @@ -36,7 +36,7 @@ BEGIN (0 rows) 2: insert into starve_helper select 'session2', setting::int from pg_settings where name = 'gp_session_id'; -INSERT 1 +INSERT 0 1 -- Wait on access exclusive lock. 2: begin; BEGIN @@ -49,7 +49,7 @@ select wait_until_locks_awaited('session2'); (1 row) 3: insert into starve_helper select 'session3', setting::int from pg_settings where name = 'gp_session_id'; -INSERT 1 +INSERT 0 1 -- ENTRY_DB_SINGLETON reader requests access share lock on table -- starve. The lockmode conflicts with already existing waiter's -- lockmode (access exclusive). And the writer is not holding any @@ -79,7 +79,7 @@ COMMIT -- session2 is granted the lock on starve table first. 2<: <... completed> -ALTER +ALTER TABLE 2: select mode from pg_locks where granted=false and relation='starve'::regclass and gp_segment_id=-1; mode ----------------- @@ -94,7 +94,7 @@ COMMIT ERROR: column "c" does not exist (entry db 127.0.0.1:15432 pid=94829) CONTEXT: PL/pgSQL function "function_starve_volatile" line 5 at SQL statement 3: commit; -COMMIT +ROLLBACK -- @@ -102,7 +102,7 @@ COMMIT -- on a lock in case of waitMask conflict. -- truncate table starve_helper; -TRUNCATE +TRUNCATE TABLE -- Hold access shared lock, so that session2 must wait. 1: begin; @@ -113,7 +113,7 @@ BEGIN (0 rows) 2: insert into starve_helper select 'session2', setting::int from pg_settings where name = 'gp_session_id'; -INSERT 1 +INSERT 0 1 -- Wait on access exclusive lock. 2: begin; BEGIN @@ -126,7 +126,7 @@ select wait_until_locks_awaited('session2'); (1 row) 3: insert into starve_helper select 'session3', setting::int from pg_settings where name = 'gp_session_id'; -INSERT 1 +INSERT 0 1 3: begin; BEGIN -- Wait on RowExclusiveLock on table starve because session2 is @@ -143,7 +143,7 @@ select wait_until_locks_awaited('session3'); COMMIT -- Session2 must go first. 2<: <... completed> -ALTER +ALTER TABLE -- Ensure that session3 is still waiting. 2: select mode from pg_locks where granted=false and relation='starve'::regclass and gp_segment_id=-1; mode @@ -155,6 +155,6 @@ COMMIT -- Session3 gets the lock only after session2 commits. 3<: <... completed> -INSERT 2 +INSERT 0 2 3: commit; COMMIT diff --git a/src/test/isolation2/expected/sync_guc.out b/src/test/isolation2/expected/sync_guc.out index b0d042fc8e6..6e87ca588e9 100644 --- a/src/test/isolation2/expected/sync_guc.out +++ b/src/test/isolation2/expected/sync_guc.out @@ -1,10 +1,10 @@ -- TEST 1: Fix Github issue https://github.com/greenplum-db/gpdb/issues/9208 1: create schema sync_np1; -CREATE +CREATE SCHEMA 1: create schema sync_np2; -CREATE +CREATE SCHEMA 1: CREATE OR REPLACE FUNCTION public.segment_setting(guc text) RETURNS SETOF text EXECUTE ON ALL SEGMENTS AS $$ BEGIN RETURN NEXT pg_catalog.current_setting(guc); END $$ LANGUAGE plpgsql; -CREATE +CREATE FUNCTION 1q: ... -- The SET command will create a Gang on the primaries, and the GUC -- values should be the same on all QD/QEs. @@ -30,18 +30,18 @@ RESET "$user", public (3 rows) 2: create or replace function sync_f1() returns int as $$ select 1234; $$language sql; -CREATE +CREATE FUNCTION 2: select sync_f1(); sync_f1 --------- 1234 (1 row) 2: drop function sync_f1(); -DROP +DROP FUNCTION 2: drop schema sync_np1; -DROP +DROP SCHEMA 2: drop schema sync_np2; -DROP +DROP SCHEMA 2q: ... -- TEST 2: Fix Github issue https://github.com/greenplum-db/gpdb/issues/685 @@ -66,9 +66,9 @@ RESET off (3 rows) 3: create table sync_t1(i int); -CREATE +CREATE TABLE 3: insert into sync_t1 select i from generate_series(1,10)i; -INSERT 10 +INSERT 0 10 3: delete from sync_t1; DELETE 10 3: select * from sync_t1; @@ -76,19 +76,19 @@ DELETE 10 --- (0 rows) 3: drop table sync_t1; -DROP +DROP TABLE 3q: ... 1: drop function public.segment_setting(guc text); -DROP +DROP FUNCTION 1q: ... -- TEST 3: make sure all QEs call RESET if there are more than 1 QE of the session -- in the primary 4: create temp table sync_t11(a int, b int) distributed by(b); -CREATE +CREATE TABLE 4: create temp table sync_t12(a int, b int) distributed by(a); -CREATE +CREATE TABLE -- The join will create 2 slices on each primary, and 1 entrydb on the coordinator. -- So, every primary and the coordinator should trigger 2 SET/RESET diff --git a/src/test/isolation2/expected/tcp_ic_teardown.out b/src/test/isolation2/expected/tcp_ic_teardown.out index 6b6d3ac8d2d..2610999fbdb 100644 --- a/src/test/isolation2/expected/tcp_ic_teardown.out +++ b/src/test/isolation2/expected/tcp_ic_teardown.out @@ -6,9 +6,9 @@ CREATE FUNCTION set_gp_ic_type(ic_type text) RETURNS VOID as $$ import os cmd = CREATE CREATE TABLE tcp_ic_teardown(i int); -CREATE +CREATE TABLE INSERT INTO tcp_ic_teardown SELECT generate_series(1, 5); -INSERT 5 +INSERT 0 5 -- Save current IC type before we set it to 'tcp', so we can revert it at the -- end of the test. diff --git a/src/test/isolation2/expected/terminate_in_gang_creation.out b/src/test/isolation2/expected/terminate_in_gang_creation.out index a45558c9439..f430bab0f32 100644 --- a/src/test/isolation2/expected/terminate_in_gang_creation.out +++ b/src/test/isolation2/expected/terminate_in_gang_creation.out @@ -14,7 +14,7 @@ SELECT gp_inject_fault_infinite('before_orphaned_check', 'skip', dbid) FROM gp_s Success: (1 row) ALTER SYSTEM SET gp_dtx_recovery_interval to 5; -ALTER +ALTER SYSTEM SELECT pg_reload_conf(); pg_reload_conf ---------------- @@ -46,9 +46,9 @@ SELECT gp_wait_until_triggered_fault('before_orphaned_check', 1, dbid) FROM gp_s -- #17 AllocateWriterGang DROP TABLE IF EXISTS foo; -DROP +DROP TABLE CREATE TABLE foo (c1 int, c2 int) DISTRIBUTED BY (c1); -CREATE +CREATE TABLE 10: BEGIN; BEGIN @@ -92,7 +92,7 @@ server closed the connection unexpectedly 10q: ... DROP TABLE foo; -DROP +DROP TABLE -- Test a bug that if cached idle primary QE is gone (e.g. after kill-9, pg_ctl -- restart, etc), a new query needs a new created reader gang might fail with @@ -127,7 +127,7 @@ CHECKPOINT 11: SET gp_vmem_idle_resource_timeout TO 0; SET 11: CREATE TABLE foo (c1 int, c2 int) DISTRIBUTED BY (c1); -CREATE +CREATE TABLE -- ORCA optimizes value scan so there is no additional reader gang in below INSERT. 11: SET optimizer = off; SET @@ -145,9 +145,9 @@ DETAIL: FATAL: reader could not find writer proc entry DETAIL: lock [0,1260] AccessShareLock 0. Probably because writer gang is gone somehow. Maybe try rerunning. (seg1 127.0.0.1:7004) 11: INSERT INTO foo values(2),(1); -INSERT 2 +INSERT 0 2 11: DROP TABLE foo; -DROP +DROP TABLE 11: RESET gp_vmem_idle_resource_timeout; RESET @@ -162,7 +162,7 @@ SELECT gp_inject_fault_infinite('before_orphaned_check', 'reset', dbid) FROM gp_ Success: (1 row) 2: ALTER SYSTEM RESET gp_dtx_recovery_interval; -ALTER +ALTER SYSTEM 2: SELECT pg_reload_conf(); pg_reload_conf ---------------- diff --git a/src/test/isolation2/expected/truncate_after_ao_vacuum_skip_drop.out b/src/test/isolation2/expected/truncate_after_ao_vacuum_skip_drop.out index 3ca27f0390d..01aa4f3a9a2 100644 --- a/src/test/isolation2/expected/truncate_after_ao_vacuum_skip_drop.out +++ b/src/test/isolation2/expected/truncate_after_ao_vacuum_skip_drop.out @@ -2,9 +2,9 @@ -- AppendOnlyHash after doing a TRUNCATE. CREATE TABLE truncate_after_ao_vacuum_skip_drop (a INT, b INT) WITH (appendonly=true); -CREATE +CREATE TABLE INSERT INTO truncate_after_ao_vacuum_skip_drop SELECT i as a, i as b FROM generate_series(1, 10) AS i; -INSERT 10 +INSERT 0 10 DELETE FROM truncate_after_ao_vacuum_skip_drop; DELETE 10 @@ -27,7 +27,7 @@ BEGIN 2: VACUUM truncate_after_ao_vacuum_skip_drop; VACUUM 1: END; -END +COMMIT -- We should see an aoseg in state 2 (AOSEG_STATE_AWAITING_DROP) 0U: SELECT segno, state FROM gp_toolkit.__gp_aoseg('truncate_after_ao_vacuum_skip_drop'); @@ -39,7 +39,7 @@ END -- The AO relation should be rewritten and AppendOnlyHash entry invalidated 1: TRUNCATE truncate_after_ao_vacuum_skip_drop; -TRUNCATE +TRUNCATE TABLE 0U: SELECT segno, state FROM gp_toolkit.__gp_aoseg('truncate_after_ao_vacuum_skip_drop'); segno | state -------+------- @@ -48,7 +48,7 @@ TRUNCATE -- Check if insert goes into segno 1 instead of segno 2. If it did not -- go into segno 1, there was a leak in the AppendOnlyHash entry. 1: INSERT INTO truncate_after_ao_vacuum_skip_drop SELECT i as a, i as b FROM generate_series(1, 100) AS i; -INSERT 100 +INSERT 0 100 0U: SELECT segno, tupcount > 0, state FROM gp_toolkit.__gp_aoseg('truncate_after_ao_vacuum_skip_drop'); segno | ?column? | state -------+----------+------- diff --git a/src/test/isolation2/expected/uao_crash_compaction_column.out b/src/test/isolation2/expected/uao_crash_compaction_column.out index 22d455582f7..5d9ca42981f 100644 --- a/src/test/isolation2/expected/uao_crash_compaction_column.out +++ b/src/test/isolation2/expected/uao_crash_compaction_column.out @@ -31,24 +31,24 @@ SET ao_column (1 row) 3:DROP TABLE IF EXISTS crash_before_cleanup_phase CASCADE; -DROP +DROP TABLE 3:CREATE TABLE crash_before_cleanup_phase (a INT, b INT, c CHAR(20)); -CREATE +CREATE TABLE 3:CREATE INDEX crash_before_cleanup_phase_index ON crash_before_cleanup_phase(b); -CREATE +CREATE INDEX 3:INSERT INTO crash_before_cleanup_phase SELECT i AS a, 1 AS b, 'hello world' AS c FROM generate_series(1, 10) AS i; -INSERT 10 +INSERT 0 10 3:DELETE FROM crash_before_cleanup_phase WHERE a < 4; DELETE 3 -- for crash_vacuum_in_appendonly_insert 3:DROP TABLE IF EXISTS crash_vacuum_in_appendonly_insert CASCADE; -DROP +DROP TABLE 3:CREATE TABLE crash_vacuum_in_appendonly_insert (a INT, b INT, c CHAR(20)); -CREATE +CREATE TABLE 3:CREATE INDEX crash_vacuum_in_appendonly_insert_index ON crash_vacuum_in_appendonly_insert(b); -CREATE +CREATE INDEX 3:INSERT INTO crash_vacuum_in_appendonly_insert SELECT i AS a, 1 AS b, 'hello world' AS c FROM generate_series(1, 10) AS i; -INSERT 10 +INSERT 0 10 3:UPDATE crash_vacuum_in_appendonly_insert SET b = 2; UPDATE 10 @@ -83,7 +83,7 @@ BEGIN Success: (1 row) 3:END; -END +COMMIT -- we already waited for suspend faults to trigger and hence we can proceed to -- run next command which would trigger panic fault and help test @@ -137,7 +137,7 @@ ERROR: Error on receive from seg0 127.0.0.1:7002 pid=18463: server closed the c 2 | 2 | 258 | 3 | 0 | 1 (15 rows) 1:INSERT INTO crash_before_cleanup_phase VALUES(1, 1, 'c'), (25, 6, 'c'); -INSERT 2 +INSERT 0 2 1:UPDATE crash_before_cleanup_phase SET b = b+10 WHERE a=25; UPDATE 1 1:SELECT * FROM crash_before_cleanup_phase ORDER BY a,b; @@ -199,7 +199,7 @@ VACUUM 2 | 2 | 258 | 5 | 0 | 1 (18 rows) 1:INSERT INTO crash_before_cleanup_phase VALUES(21, 1, 'c'), (26, 1, 'c'); -INSERT 2 +INSERT 0 2 1:UPDATE crash_before_cleanup_phase SET b = b+10 WHERE a=26; UPDATE 1 1:SELECT * FROM crash_before_cleanup_phase ORDER BY a,b; @@ -263,7 +263,7 @@ VACUUM 2 | 2 | 258 | 5 | 0 | 1 (18 rows) 1:INSERT INTO crash_vacuum_in_appendonly_insert VALUES(21, 1, 'c'), (26, 1, 'c'); -INSERT 2 +INSERT 0 2 1:UPDATE crash_vacuum_in_appendonly_insert SET b = b+10 WHERE a=26; UPDATE 1 1:SELECT * FROM crash_vacuum_in_appendonly_insert ORDER BY a,b; @@ -294,15 +294,15 @@ SET ----------------------------- ao_column (1 row) -2:DROP TABLE IF EXISTS crash_master_before_cleanup_phase CASCADE; -DROP -2:CREATE TABLE crash_master_before_cleanup_phase (a INT, b INT, c CHAR(20)); -CREATE -2:CREATE INDEX crash_master_before_cleanup_phase_index ON crash_master_before_cleanup_phase(b); -CREATE -2:INSERT INTO crash_master_before_cleanup_phase SELECT i AS a, 1 AS b, 'hello world' AS c FROM generate_series(1, 10) AS i; -INSERT 10 -2:DELETE FROM crash_master_before_cleanup_phase WHERE a < 4; +2:DROP TABLE IF EXISTS crash_coordinator_before_cleanup_phase CASCADE; +DROP TABLE +2:CREATE TABLE crash_coordinator_before_cleanup_phase (a INT, b INT, c CHAR(20)); +CREATE TABLE +2:CREATE INDEX crash_coordinator_before_cleanup_phase_index ON crash_coordinator_before_cleanup_phase(b); +CREATE INDEX +2:INSERT INTO crash_coordinator_before_cleanup_phase SELECT i AS a, 1 AS b, 'hello world' AS c FROM generate_series(1, 10) AS i; +INSERT 0 10 +2:DELETE FROM crash_coordinator_before_cleanup_phase WHERE a < 4; DELETE 3 -- inject panic fault @@ -345,9 +345,9 @@ server closed the connection unexpectedly 2 | 2 | 258 | 0 | 0 | 1 2 | 2 | 258 | 3 | 0 | 1 (15 rows) -4:INSERT INTO crash_master_before_cleanup_phase VALUES(1, 1, 'c'), (25, 6, 'c'); -INSERT 2 -4:UPDATE crash_master_before_cleanup_phase SET b = b+10 WHERE a=25; +4:INSERT INTO crash_coordinator_before_cleanup_phase VALUES(1, 1, 'c'), (25, 6, 'c'); +INSERT 0 2 +4:UPDATE crash_coordinator_before_cleanup_phase SET b = b+10 WHERE a=25; UPDATE 1 4:SELECT * FROM crash_master_before_cleanup_phase ORDER BY a,b; a | b | c @@ -405,9 +405,9 @@ VACUUM 2 | 2 | 258 | 3 | 0 | 1 2 | 2 | 258 | 5 | 0 | 1 (18 rows) -4:INSERT INTO crash_master_before_cleanup_phase VALUES(21, 1, 'c'), (26, 1, 'c'); -INSERT 2 -4:UPDATE crash_master_before_cleanup_phase SET b = b+10 WHERE a=26; +4:INSERT INTO crash_coordinator_before_cleanup_phase VALUES(21, 1, 'c'), (26, 1, 'c'); +INSERT 0 2 +4:UPDATE crash_coordinator_before_cleanup_phase SET b = b+10 WHERE a=26; UPDATE 1 4:SELECT * FROM crash_master_before_cleanup_phase ORDER BY a,b; a | b | c @@ -441,7 +441,7 @@ UPDATE 1 4:SET default_table_access_method = ao_column; SET 4:CREATE TABLE crash_vacuum_in_appendonly_insert_1 (a INT, b INT, c CHAR(20)); -CREATE +CREATE TABLE -- just sanity check to make sure appendonly table is created 4:SELECT count(*) from pg_appendonly where relid in (select oid from pg_class where relname='crash_vacuum_in_appendonly_insert_1'); count @@ -449,7 +449,7 @@ CREATE 1 (1 row) 4:INSERT INTO crash_vacuum_in_appendonly_insert_1 SELECT i AS a, 1 AS b, 'hello world' AS c FROM generate_series(1, 10) AS i; -INSERT 10 +INSERT 0 10 4:UPDATE crash_vacuum_in_appendonly_insert_1 SET b = 2; UPDATE 10 4:SELECT gp_inject_fault('xlog_ao_insert', 'infinite_loop', 2); diff --git a/src/test/isolation2/expected/uao_crash_compaction_row.out b/src/test/isolation2/expected/uao_crash_compaction_row.out index 5dd3bc113f0..45ec3bb4a6b 100644 --- a/src/test/isolation2/expected/uao_crash_compaction_row.out +++ b/src/test/isolation2/expected/uao_crash_compaction_row.out @@ -30,24 +30,24 @@ SET ao_row (1 row) 3:DROP TABLE IF EXISTS crash_before_cleanup_phase CASCADE; -DROP +DROP TABLE 3:CREATE TABLE crash_before_cleanup_phase (a INT, b INT, c CHAR(20)); -CREATE +CREATE TABLE 3:CREATE INDEX crash_before_cleanup_phase_index ON crash_before_cleanup_phase(b); -CREATE +CREATE INDEX 3:INSERT INTO crash_before_cleanup_phase SELECT i AS a, 1 AS b, 'hello world' AS c FROM generate_series(1, 10) AS i; -INSERT 10 +INSERT 0 10 3:DELETE FROM crash_before_cleanup_phase WHERE a < 4; DELETE 3 -- for crash_vacuum_in_appendonly_insert 3:DROP TABLE IF EXISTS crash_vacuum_in_appendonly_insert CASCADE; -DROP +DROP TABLE 3:CREATE TABLE crash_vacuum_in_appendonly_insert (a INT, b INT, c CHAR(20)); -CREATE +CREATE TABLE 3:CREATE INDEX crash_vacuum_in_appendonly_insert_index ON crash_vacuum_in_appendonly_insert(b); -CREATE +CREATE INDEX 3:INSERT INTO crash_vacuum_in_appendonly_insert SELECT i AS a, 1 AS b, 'hello world' AS c FROM generate_series(1, 10) AS i; -INSERT 10 +INSERT 0 10 3:UPDATE crash_vacuum_in_appendonly_insert SET b = 2; UPDATE 10 @@ -124,7 +124,7 @@ VACUUM 2 | 1 | 200 | 4 | 1 | 200 | 1 | 3 | 1 (5 rows) 1:INSERT INTO crash_before_cleanup_phase VALUES(1, 1, 'c'), (25, 6, 'c'); -INSERT 2 +INSERT 0 2 1:UPDATE crash_before_cleanup_phase SET b = b+10 WHERE a=25; UPDATE 1 1:SELECT * FROM crash_before_cleanup_phase ORDER BY a,b; @@ -162,7 +162,7 @@ VACUUM 2 | 2 | 248 | 5 | 1 | 248 | 0 | 3 | 1 (6 rows) 1:INSERT INTO crash_before_cleanup_phase VALUES(21, 1, 'c'), (26, 1, 'c'); -INSERT 2 +INSERT 0 2 1:UPDATE crash_before_cleanup_phase SET b = b+10 WHERE a=26; UPDATE 1 1:SELECT * FROM crash_before_cleanup_phase ORDER BY a,b; @@ -208,7 +208,7 @@ VACUUM 2 | 2 | 200 | 4 | 1 | 200 | 0 | 3 | 1 (6 rows) 1:INSERT INTO crash_vacuum_in_appendonly_insert VALUES(21, 1, 'c'), (26, 1, 'c'); -INSERT 2 +INSERT 0 2 1:UPDATE crash_vacuum_in_appendonly_insert SET b = b+10 WHERE a=26; UPDATE 1 1:SELECT * FROM crash_vacuum_in_appendonly_insert ORDER BY a,b; @@ -239,15 +239,15 @@ SET ----------------------------- ao_row (1 row) -2:DROP TABLE IF EXISTS crash_master_before_cleanup_phase CASCADE; -DROP -2:CREATE TABLE crash_master_before_cleanup_phase (a INT, b INT, c CHAR(20)); -CREATE -2:CREATE INDEX crash_master_before_cleanup_phase_index ON crash_master_before_cleanup_phase(b); -CREATE -2:INSERT INTO crash_master_before_cleanup_phase SELECT i AS a, 1 AS b, 'hello world' AS c FROM generate_series(1, 10) AS i; -INSERT 10 -2:DELETE FROM crash_master_before_cleanup_phase WHERE a < 4; +2:DROP TABLE IF EXISTS crash_coordinator_before_cleanup_phase CASCADE; +DROP TABLE +2:CREATE TABLE crash_coordinator_before_cleanup_phase (a INT, b INT, c CHAR(20)); +CREATE TABLE +2:CREATE INDEX crash_coordinator_before_cleanup_phase_index ON crash_coordinator_before_cleanup_phase(b); +CREATE INDEX +2:INSERT INTO crash_coordinator_before_cleanup_phase SELECT i AS a, 1 AS b, 'hello world' AS c FROM generate_series(1, 10) AS i; +INSERT 0 10 +2:DELETE FROM crash_coordinator_before_cleanup_phase WHERE a < 4; DELETE 3 -- suspend at intended points @@ -280,9 +280,9 @@ server closed the connection unexpectedly 1 | 2 | 0 | 0 | 0 | 0 | 0 | 3 | 1 2 | 1 | 200 | 4 | 1 | 200 | 1 | 3 | 1 (5 rows) -4:INSERT INTO crash_master_before_cleanup_phase VALUES(1, 1, 'c'), (25, 6, 'c'); -INSERT 2 -4:UPDATE crash_master_before_cleanup_phase SET b = b+10 WHERE a=25; +4:INSERT INTO crash_coordinator_before_cleanup_phase VALUES(1, 1, 'c'), (25, 6, 'c'); +INSERT 0 2 +4:UPDATE crash_coordinator_before_cleanup_phase SET b = b+10 WHERE a=25; UPDATE 1 4:SELECT * FROM crash_master_before_cleanup_phase ORDER BY a,b; a | b | c @@ -318,9 +318,9 @@ VACUUM 2 | 1 | 0 | 0 | 0 | 0 | 3 | 3 | 1 2 | 2 | 248 | 5 | 1 | 248 | 0 | 3 | 1 (6 rows) -4:INSERT INTO crash_master_before_cleanup_phase VALUES(21, 1, 'c'), (26, 1, 'c'); -INSERT 2 -4:UPDATE crash_master_before_cleanup_phase SET b = b+10 WHERE a=26; +4:INSERT INTO crash_coordinator_before_cleanup_phase VALUES(21, 1, 'c'), (26, 1, 'c'); +INSERT 0 2 +4:UPDATE crash_coordinator_before_cleanup_phase SET b = b+10 WHERE a=26; UPDATE 1 4:SELECT * FROM crash_master_before_cleanup_phase ORDER BY a,b; a | b | c diff --git a/src/test/isolation2/expected/udf_exception_blocks_panic_scenarios.out b/src/test/isolation2/expected/udf_exception_blocks_panic_scenarios.out index 84b84a3ae8d..bc120f05905 100644 --- a/src/test/isolation2/expected/udf_exception_blocks_panic_scenarios.out +++ b/src/test/isolation2/expected/udf_exception_blocks_panic_scenarios.out @@ -52,21 +52,21 @@ SELECT gp_request_fts_probe_scan(); t (1 row) CREATE OR REPLACE FUNCTION test_excep (arg INTEGER) RETURNS INTEGER AS $$ DECLARE res INTEGER; /* in func */ BEGIN /* in func */ res := 100 / arg; /* in func */ RETURN res; /* in func */ EXCEPTION /* in func */ WHEN division_by_zero /* in func */ THEN RETURN 999; /* in func */ END; /* in func */ $$ LANGUAGE plpgsql; -CREATE +CREATE FUNCTION CREATE OR REPLACE FUNCTION test_protocol_allseg(mid int, mshop int, mgender character) RETURNS VOID AS $$ DECLARE tfactor int default 0; /* in func */ BEGIN /* in func */ BEGIN /* in func */ CREATE TABLE employees(id int, shop_id int, gender character) DISTRIBUTED BY (id); /* in func */ INSERT INTO employees VALUES (0, 1, 'm'); /* in func */ END; /* in func */ BEGIN /* in func */ BEGIN /* in func */ IF EXISTS (select 1 from employees where id = mid) THEN /* in func */ RAISE EXCEPTION 'Duplicate employee id'; /* in func */ ELSE /* in func */ IF NOT (mshop between 1 AND 2) THEN /* in func */ RAISE EXCEPTION 'Invalid shop id' ; /* in func */ END IF; /* in func */ END IF; /* in func */ SELECT * INTO tfactor FROM test_excep(0); /* in func */ BEGIN /* in func */ INSERT INTO employees VALUES (mid, mshop, mgender); /* in func */ EXCEPTION /* in func */ WHEN OTHERS THEN /* in func */ BEGIN /* in func */ RAISE NOTICE 'catching the exception ...3'; /* in func */ END; /* in func */ END; /* in func */ EXCEPTION /* in func */ WHEN OTHERS THEN /* in func */ RAISE NOTICE 'catching the exception ...2'; /* in func */ END; /* in func */ EXCEPTION /* in func */ WHEN OTHERS THEN /* in func */ RAISE NOTICE 'catching the exception ...1'; /* in func */ END; /* in func */ END; /* in func */ $$ LANGUAGE plpgsql; -CREATE -SELECT role, preferred_role, content, status FROM gp_segment_configuration; - role | preferred_role | content | status -------+----------------+---------+-------- - p | p | -1 | u - m | m | -1 | u - p | p | 2 | u - m | m | 2 | u - p | p | 1 | u - m | m | 1 | u - p | p | 0 | u - m | m | 0 | u +CREATE FUNCTION +SELECT role, preferred_role, content, mode, status FROM gp_segment_configuration; + role | preferred_role | content | mode | status +------+----------------+---------+------+-------- + m | m | -1 | s | u + m | m | 0 | s | u + m | m | 1 | s | u + m | m | 2 | s | u + p | p | -1 | n | u + p | p | 0 | s | u + p | p | 1 | s | u + p | p | 2 | s | u (8 rows) SET debug_dtm_action_segment=0; SET @@ -79,7 +79,7 @@ SET SET debug_dtm_action_nestinglevel=0; SET DROP TABLE IF EXISTS employees; -DROP +DROP TABLE select test_protocol_allseg(1, 2,'f'); ERROR: PANIC for debug_dtm_action = 4, debug_dtm_action_protocol = Begin Internal Subtransaction (postgres.c:1490) (seg1 127.0.1.1:25433 pid=27784) CONTEXT: PL/pgSQL function test_protocol_allseg(integer,integer,character) line 9 during statement block entry @@ -107,7 +107,7 @@ SET SET debug_dtm_action_nestinglevel=0; SET DROP TABLE IF EXISTS employees; -DROP +DROP TABLE select test_protocol_allseg(1, 2,'f'); ERROR: DTX RollbackAndReleaseCurrentSubTransaction dispatch failed CONTEXT: PL/pgSQL function test_protocol_allseg(integer,integer,character) line 19 during exception cleanup @@ -135,7 +135,7 @@ SET SET debug_dtm_action_nestinglevel=4; SET DROP TABLE IF EXISTS employees; -DROP +DROP TABLE select test_protocol_allseg(1, 2,'f'); ERROR: DTX RollbackAndReleaseCurrentSubTransaction dispatch failed CONTEXT: PL/pgSQL function test_protocol_allseg(integer,integer,character) line 19 during exception cleanup @@ -163,7 +163,7 @@ SET SET debug_dtm_action_nestinglevel=3; SET DROP TABLE IF EXISTS employees; -DROP +DROP TABLE select test_protocol_allseg(1, 2,'f'); ERROR: DTX RollbackAndReleaseCurrentSubTransaction dispatch failed CONTEXT: PL/pgSQL function test_protocol_allseg(integer,integer,character) line 18 during exception cleanup @@ -191,7 +191,7 @@ SET SET debug_dtm_action_nestinglevel=0; SET DROP TABLE IF EXISTS employees; -DROP +DROP TABLE select test_protocol_allseg(1, 2,'f'); ERROR: DTX RollbackAndReleaseCurrentSubTransaction dispatch failed CONTEXT: PL/pgSQL function test_protocol_allseg(integer,integer,character) line 18 during exception cleanup @@ -219,7 +219,7 @@ SET SET debug_dtm_action_nestinglevel=3; SET DROP TABLE IF EXISTS employees; -DROP +DROP TABLE select test_protocol_allseg(1, 2,'f'); ERROR: DTX RollbackAndReleaseCurrentSubTransaction dispatch failed CONTEXT: PL/pgSQL function test_protocol_allseg(integer,integer,character) line 18 during exception cleanup diff --git a/src/test/isolation2/expected/unlogged_appendonly_tables.out b/src/test/isolation2/expected/unlogged_appendonly_tables.out index 9931e64f408..8e455c5f377 100644 --- a/src/test/isolation2/expected/unlogged_appendonly_tables.out +++ b/src/test/isolation2/expected/unlogged_appendonly_tables.out @@ -1,6 +1,6 @@ -- expect: create table succeeds create unlogged table unlogged_appendonly_table_managers ( id int, name text ) with ( appendonly=true ) distributed by (id); -CREATE +CREATE TABLE -- skip FTS probes to make the test deterministic. SELECT gp_inject_fault_infinite('fts_probe', 'skip', 1); @@ -21,9 +21,9 @@ SELECT gp_request_fts_probe_scan(); -- expect: insert/update/select works insert into unlogged_appendonly_table_managers values (1, 'Joe'); -INSERT 1 +INSERT 0 1 insert into unlogged_appendonly_table_managers values (2, 'Jane'); -INSERT 1 +INSERT 0 1 update unlogged_appendonly_table_managers set name = 'Susan' where id = 2; UPDATE 1 select * from unlogged_appendonly_table_managers order by id; @@ -55,9 +55,9 @@ select segment_id, segno, tupcount from gp_toolkit.__gp_aoseg('unlogged_appendon -- expect: insert/update/select works 3: insert into unlogged_appendonly_table_managers values (1, 'Joe'); -INSERT 1 +INSERT 0 1 3: insert into unlogged_appendonly_table_managers values (2, 'Jane'); -INSERT 1 +INSERT 0 1 3: update unlogged_appendonly_table_managers set name = 'Susan' where id = 2; UPDATE 1 3: select * from unlogged_appendonly_table_managers order by id; @@ -91,7 +91,7 @@ UPDATE 1 -- expect: drop table succeeds 5: drop table unlogged_appendonly_table_managers; -DROP +DROP TABLE SELECT gp_inject_fault('fts_probe', 'reset', 1); gp_inject_fault diff --git a/src/test/isolation2/expected/unlogged_heap_tables.out b/src/test/isolation2/expected/unlogged_heap_tables.out index eda1f72310e..75073386d50 100644 --- a/src/test/isolation2/expected/unlogged_heap_tables.out +++ b/src/test/isolation2/expected/unlogged_heap_tables.out @@ -1,6 +1,6 @@ -- expect: create table succeeds create unlogged table unlogged_heap_table_managers ( id int, name text ) distributed by (id); -CREATE +CREATE TABLE -- skip FTS probes to make the test deterministic. SELECT gp_inject_fault_infinite('fts_probe', 'skip', 1); @@ -21,9 +21,9 @@ SELECT gp_request_fts_probe_scan(); -- expect: insert/update/select works insert into unlogged_heap_table_managers values (1, 'Joe'); -INSERT 1 +INSERT 0 1 insert into unlogged_heap_table_managers values (2, 'Jane'); -INSERT 1 +INSERT 0 1 update unlogged_heap_table_managers set name = 'Susan' where id = 2; UPDATE 1 select * from unlogged_heap_table_managers order by id; @@ -45,9 +45,9 @@ select * from unlogged_heap_table_managers order by id; -- expect: insert/update/select works 3: insert into unlogged_heap_table_managers values (1, 'Joe'); -INSERT 1 +INSERT 0 1 3: insert into unlogged_heap_table_managers values (2, 'Jane'); -INSERT 1 +INSERT 0 1 3: update unlogged_heap_table_managers set name = 'Susan' where id = 2; UPDATE 1 3: select * from unlogged_heap_table_managers order by id; @@ -69,7 +69,7 @@ UPDATE 1 -- expect: drop table succeeds 5: drop table unlogged_heap_table_managers; -DROP +DROP TABLE SELECT gp_inject_fault('fts_probe', 'reset', 1); gp_inject_fault diff --git a/src/test/isolation2/expected/update_hash_col_utilitymode.out b/src/test/isolation2/expected/update_hash_col_utilitymode.out index 74cb7d728a2..e8b72be593a 100644 --- a/src/test/isolation2/expected/update_hash_col_utilitymode.out +++ b/src/test/isolation2/expected/update_hash_col_utilitymode.out @@ -1,5 +1,5 @@ create table t_update_hash_col_utilitymode(c int, d int) distributed by (c); -CREATE +CREATE TABLE -- This works. 1U: update t_update_hash_col_utilitymode set d = d + 1; @@ -10,4 +10,4 @@ UPDATE 0 ERROR: cannot update distribution key columns in utility mode drop table t_update_hash_col_utilitymode; -DROP +DROP TABLE diff --git a/src/test/isolation2/expected/upgrade_numsegments.out b/src/test/isolation2/expected/upgrade_numsegments.out index 965a0468143..f6d3aaa67d7 100644 --- a/src/test/isolation2/expected/upgrade_numsegments.out +++ b/src/test/isolation2/expected/upgrade_numsegments.out @@ -9,13 +9,13 @@ -- numsegments in utility mode, so we use it to test the -- function. -1U: create temp table t1(c1 int, c2 int); -CREATE +CREATE TABLE 0U: create temp table t1(c1 int, c2 int); -CREATE +CREATE TABLE 1U: create temp table t1(c1 int, c2 int); -CREATE +CREATE TABLE 2U: create temp table t1(c1 int, c2 int); -CREATE +CREATE TABLE -- start_ignore -1Uq: ... 0Uq: ... diff --git a/src/test/isolation2/expected/vacuum_after_vacuum_skip_drop_column.out b/src/test/isolation2/expected/vacuum_after_vacuum_skip_drop_column.out index c39460d99dd..4588ee41283 100644 --- a/src/test/isolation2/expected/vacuum_after_vacuum_skip_drop_column.out +++ b/src/test/isolation2/expected/vacuum_after_vacuum_skip_drop_column.out @@ -2,9 +2,9 @@ -- left over by a previous vacuum -- CREATE TABLE aoco_vacuum_after_vacuum_skip_drop (a INT, b INT) WITH (appendonly=true, orientation=column); -CREATE +CREATE TABLE INSERT INTO aoco_vacuum_after_vacuum_skip_drop SELECT i as a, i as b FROM generate_series(1, 10) AS i; -INSERT 10 +INSERT 0 10 DELETE FROM aoco_vacuum_after_vacuum_skip_drop; DELETE 10 @@ -28,7 +28,7 @@ BEGIN 2: VACUUM aoco_vacuum_after_vacuum_skip_drop; VACUUM 1: END; -END +COMMIT -- We should see an aocsseg in state 2 (AOSEG_STATE_AWAITING_DROP) 0U: SELECT segno, column_num, state FROM gp_toolkit.__gp_aocsseg('aoco_vacuum_after_vacuum_skip_drop'); @@ -54,7 +54,7 @@ VACUUM -- Check if insert goes into segno 1 instead of segno 2 1: INSERT INTO aoco_vacuum_after_vacuum_skip_drop SELECT i as a, i as b FROM generate_series(1, 100) AS i; -INSERT 100 +INSERT 0 100 0U: SELECT segno, tupcount > 0, state FROM gp_toolkit.__gp_aocsseg('aoco_vacuum_after_vacuum_skip_drop'); segno | ?column? | state -------+----------+------- diff --git a/src/test/isolation2/expected/vacuum_full_interrupt.out b/src/test/isolation2/expected/vacuum_full_interrupt.out index b2d7a396ba5..7c8e5102454 100644 --- a/src/test/isolation2/expected/vacuum_full_interrupt.out +++ b/src/test/isolation2/expected/vacuum_full_interrupt.out @@ -6,11 +6,11 @@ -- transaction is aborted. 1: CREATE TABLE vacuum_full_interrupt(a int, b int, c int); -CREATE +CREATE TABLE 1: CREATE INDEX vacuum_full_interrupt_idx on vacuum_full_interrupt(b); -CREATE +CREATE INDEX 1: INSERT INTO vacuum_full_interrupt SELECT i, i, i from generate_series(1,100)i; -INSERT 100 +INSERT 0 100 1: ANALYZE vacuum_full_interrupt; ANALYZE -- the relfrozenxid is the same as xmin when there's concurrent transactions. @@ -63,7 +63,7 @@ ERROR: canceling statement due to user request -- verify the index is correctly when insert new tuples, in bug also reset 'relhasindex' in pg_class. 2: INSERT INTO vacuum_full_interrupt SELECT i, i, i from generate_series(1,100)i; -INSERT 100 +INSERT 0 100 2: SET optimizer=off; SET 2: SET enable_seqscan=off; diff --git a/src/test/isolation2/expected/vacuum_progress_column.out b/src/test/isolation2/expected/vacuum_progress_column.out index 587bd35f7c8..54d4018303f 100644 --- a/src/test/isolation2/expected/vacuum_progress_column.out +++ b/src/test/isolation2/expected/vacuum_progress_column.out @@ -6,15 +6,15 @@ SET -- Setup the append-optimized table to be vacuumed DROP TABLE IF EXISTS vacuum_progress_ao_column; -DROP +DROP TABLE CREATE TABLE vacuum_progress_ao_column(i int, j int); -CREATE +CREATE TABLE -- Add two indexes to be vacuumed as well CREATE INDEX on vacuum_progress_ao_column(i); -CREATE +CREATE INDEX CREATE INDEX on vacuum_progress_ao_column(j); -CREATE +CREATE INDEX -- Insert all tuples to seg1 from two current sessions so that data are stored -- in two segment files. @@ -23,23 +23,23 @@ BEGIN 2: BEGIN; BEGIN 1: INSERT INTO vacuum_progress_ao_column SELECT 0, i FROM generate_series(1, 100000) i; -INSERT 100000 +INSERT 0 100000 2: INSERT INTO vacuum_progress_ao_column SELECT 0, i FROM generate_series(1, 100000) i; -INSERT 100000 +INSERT 0 100000 -- Commit so that the logical EOF of segno 2 is non-zero. 2: COMMIT; COMMIT 2: BEGIN; BEGIN 2: INSERT INTO vacuum_progress_ao_column SELECT 0, i FROM generate_series(1, 100000) i; -INSERT 100000 +INSERT 0 100000 -- Abort so that segno 2 has dead tuples after its logical EOF 2: ABORT; -ABORT +ROLLBACK 2q: ... -- Abort so that segno 1 has logical EOF = 0. 1: ABORT; -ABORT +ROLLBACK -- Also delete half of the tuples evenly before the EOF of segno 2. DELETE FROM vacuum_progress_ao_column where j % 2 = 0; diff --git a/src/test/isolation2/expected/vacuum_progress_row.out b/src/test/isolation2/expected/vacuum_progress_row.out index 0f1b3e65ef5..f5e7bfb0165 100644 --- a/src/test/isolation2/expected/vacuum_progress_row.out +++ b/src/test/isolation2/expected/vacuum_progress_row.out @@ -6,15 +6,15 @@ SET -- Setup the append-optimized table to be vacuumed DROP TABLE IF EXISTS vacuum_progress_ao_row; -DROP +DROP TABLE CREATE TABLE vacuum_progress_ao_row(i int, j int); -CREATE +CREATE TABLE -- Add two indexes to be vacuumed as well CREATE INDEX on vacuum_progress_ao_row(i); -CREATE +CREATE INDEX CREATE INDEX on vacuum_progress_ao_row(j); -CREATE +CREATE INDEX -- Insert all tuples to seg1 from two current sessions so that data are stored -- in two segment files. @@ -22,24 +22,24 @@ CREATE BEGIN 2: BEGIN; BEGIN -1: INSERT INTO vacuum_progress_ao_row SELECT 0, i FROM generate_series(1, 100000) i; -INSERT 100000 -2: INSERT INTO vacuum_progress_ao_row SELECT 0, i FROM generate_series(1, 100000) i; -INSERT 100000 +1: INSERT INTO vacuum_progress_ao_row SELECT i, i FROM generate_series(1, 100000) i; +INSERT 0 100000 +2: INSERT INTO vacuum_progress_ao_row SELECT i, i FROM generate_series(1, 100000) i; +INSERT 0 100000 -- Commit so that the logical EOF of segno 2 is non-zero. 2: COMMIT; COMMIT 2: BEGIN; BEGIN -2: INSERT INTO vacuum_progress_ao_row SELECT 0, i FROM generate_series(1, 100000) i; -INSERT 100000 +2: INSERT INTO vacuum_progress_ao_row SELECT i, i FROM generate_series(1, 100000) i; +INSERT 0 100000 -- Abort so that segno 2 has dead tuples after its logical EOF 2: ABORT; -ABORT +ROLLBACK 2q: ... -- Abort so that segno 1 has logical EOF = 0. 1: ABORT; -ABORT +ROLLBACK -- Also delete half of the tuples evenly before the EOF of segno 2. DELETE FROM vacuum_progress_ao_row where j % 2 = 0; diff --git a/src/test/isolation2/expected/vacuum_recently_dead_tuple_due_to_distributed_snapshot.out b/src/test/isolation2/expected/vacuum_recently_dead_tuple_due_to_distributed_snapshot.out index 7a9a981ba06..463c6f492ee 100644 --- a/src/test/isolation2/expected/vacuum_recently_dead_tuple_due_to_distributed_snapshot.out +++ b/src/test/isolation2/expected/vacuum_recently_dead_tuple_due_to_distributed_snapshot.out @@ -2,10 +2,10 @@ -- visible to a distributed snapshot, even if there is no corresponding -- local snapshot in a QE node yet. create table test_recently_dead_utility(a int); -CREATE +CREATE TABLE insert into test_recently_dead_utility select g from generate_series(1, 100) g; -INSERT 100 +INSERT 0 100 -- A function that: -- @@ -24,7 +24,7 @@ INSERT 100 -- (All on one line because of limitations in the isolation2 test language.) create or replace function afunc() returns int4 as $$ declare c int; begin select count(*) into c from pg_Database; perform pg_sleep(10); select count(*) into c from test_recently_dead_utility; return c; end; $$ language plpgsql stable; -CREATE +CREATE FUNCTION -- In one session, launch afunc(). 1&: select afunc(); diff --git a/src/test/isolation2/expected/vacuum_skip_locked_onseg.out b/src/test/isolation2/expected/vacuum_skip_locked_onseg.out index 243f663a27d..07cd9c64488 100644 --- a/src/test/isolation2/expected/vacuum_skip_locked_onseg.out +++ b/src/test/isolation2/expected/vacuum_skip_locked_onseg.out @@ -5,13 +5,13 @@ -- on master). 1: CREATE TABLE vacuum_tbl (c1 int) DISTRIBUTED BY (c1); -CREATE +CREATE TABLE -- Connect to seg #0 in utility mode, lock the table in share mode 0U: BEGIN; BEGIN 0U: LOCK vacuum_tbl IN SHARE MODE; -LOCK +LOCK TABLE -- Issue vacuum with SKIP_LOCKED option -- Note that some ANALYZE options are disabled here because the ANALYZE @@ -32,7 +32,7 @@ COMMIT 0U: BEGIN; BEGIN 0U: LOCK vacuum_tbl IN ACCESS EXCLUSIVE MODE; -LOCK +LOCK TABLE -- Issue vacuum with SKIP_LOCKED option -- Note that some ANALYZE options are disabled here because the ANALYZE @@ -50,4 +50,4 @@ VACUUM COMMIT 1: DROP TABLE IF EXISTS vacuum_tbl; -DROP +DROP TABLE diff --git a/src/test/isolation2/isolation2_main.c b/src/test/isolation2/isolation2_main.c index fb338660dc6..e3f2093811e 100644 --- a/src/test/isolation2/isolation2_main.c +++ b/src/test/isolation2/isolation2_main.c @@ -108,7 +108,16 @@ isolation_start_test(const char *testname, offset += snprintf(psql_cmd + offset, sizeof(psql_cmd) - offset, "%s ", launcher); + /* + * sql_isolation_testcase.py is using psycopg2 to connect to server. Psycopg2 requires the server + * using PGDATESTYLE='ISO, MDY', otherwise psycopg2 will try to execute "SET DATESTYLE='ISO, MDY'" + * after the connection being established. However, the pg_regress hardcoded PGDATESTYLE to 'Postgres, MDY', + * while in Greenplum, the 'SET' command is not allowed to execute in a retrieve connection + * (PGOPTIONS='-c gp_retrieve_conn=true'). To workaround this issue, we set 'DATESTYLE' to 'ISO, MDY' + * before invoking sql_isolation_testcase.py. + */ snprintf(psql_cmd + offset, sizeof(psql_cmd) - offset, + "env PGDATESTYLE='ISO,MDY' " "python3 ./sql_isolation_testcase.py --dbname=\"%s\" --initfile_prefix=\"%s\" < \"%s\" > \"%s\" 2>&1", dblist->str, outfile, diff --git a/src/test/isolation2/output/autovacuum-analyze.source b/src/test/isolation2/output/autovacuum-analyze.source index e071f9e7007..4dfa7e49b2a 100644 --- a/src/test/isolation2/output/autovacuum-analyze.source +++ b/src/test/isolation2/output/autovacuum-analyze.source @@ -4,7 +4,9 @@ -- Speed up test. ALTER SYSTEM SET autovacuum_naptime = 5; -ALTER +ALTER SYSTEM +ALTER SYSTEM SET autovacuum_vacuum_threshold = 50; +ALTER SYSTEM select * from pg_reload_conf(); pg_reload_conf ---------------- @@ -23,7 +25,7 @@ ANALYZE -- Prepare the table to be ANALYZEd 1: CREATE TABLE anatest (id bigint); -CREATE +CREATE TABLE -- Track report gpstat on master SELECT gp_inject_fault('gp_pgstat_report_on_master', 'suspend', '', '', 'anatest', 1, -1, 0, 1); @@ -59,7 +61,7 @@ SELECT gp_inject_fault('gp_pgstat_report_on_master', 'reset', 1); (1 row) 1<: <... completed> -INSERT 1000 +INSERT 0 1000 -- Wait until autovacuum is triggered SELECT gp_wait_until_triggered_fault('auto_vac_worker_after_report_activity', 1, 1); @@ -110,7 +112,7 @@ select relpages, reltuples from pg_class where oid = 'anatest'::regclass; -- Prepare the table to be ANALYZEd 1: CREATE TABLE rankpart (id int, rank int, product int) DISTRIBUTED BY (id) PARTITION BY RANGE (rank) ( START (1) END (10) EVERY (2), DEFAULT PARTITION extra ); -CREATE +CREATE TABLE -- Track report gpstat on master SELECT gp_inject_fault('gp_pgstat_report_on_master', 'suspend', '', '', 'rankpart_1_prt_extra', 1, -1, 0, 1); @@ -147,7 +149,7 @@ SELECT gp_inject_fault('gp_pgstat_report_on_master', 'reset', 1); (1 row) 1<: <... completed> -INSERT 1000 +INSERT 0 1000 -- Wait until autovacuum is triggered SELECT gp_wait_until_triggered_fault('auto_vac_worker_after_report_activity', 1, 1); @@ -263,7 +265,7 @@ select relpages, reltuples from pg_class where oid = 'rankpart'::regclass; -- 1: CREATE TABLE anaabort(id int); -CREATE +CREATE TABLE -- Track report gpstat on master SELECT gp_inject_fault('gp_pgstat_report_on_master', 'suspend', '', '', 'anaabort', 1, -1, 0, 1); @@ -305,7 +307,7 @@ SELECT gp_inject_fault('gp_pgstat_report_on_master', 'reset', 1); (1 row) 1<: <... completed> -INSERT 1000 +INSERT 0 1000 -- Wait until autovacuum is triggered SELECT gp_wait_until_triggered_fault('auto_vac_worker_after_report_activity', 1, 1); @@ -356,7 +358,7 @@ SELECT gp_inject_fault('analyze_after_hold_lock', 'reset', 1); -- Get lock 1<: <... completed> -LOCK +LOCK TABLE -- Shouldn't have statistic updated. SELECT count(*) FROM pg_statistic where starelid = 'anaabort'::regclass; @@ -371,7 +373,7 @@ select relpages, reltuples from pg_class where oid = 'anaabort'::regclass; (1 row) 1: END; -END +COMMIT -- -- Test 4, auto-ANALYZE and auto_stats could work at the same time. @@ -390,7 +392,7 @@ SET -- Prepare the table to be ANALYZEd 2: CREATE TABLE autostatstbl (id bigint); -CREATE +CREATE TABLE -- Track that we have updated the attributes stats in pg_statistic when finished SELECT gp_inject_fault('analyze_finished_one_relation', 'skip', '', '', 'autostatstbl', 1, -1, 0, 1); @@ -407,7 +409,7 @@ SELECT gp_inject_fault('auto_vac_worker_after_report_activity', 'suspend', '', ' -- with auto_stats, the auto-ANALYZE still trigger 2: INSERT INTO autostatstbl select i from generate_series(1, 1000) as i; -INSERT 1000 +INSERT 0 1000 2: select pg_sleep(0.77); -- Force pgstat_report_stat() to send tabstat. -- auto_stats executed but auto-ANALYZE not execute yet since we suspend before finish ANALYZE. SELECT count(*) FROM pg_statistic where starelid = 'autostatstbl'::regclass; @@ -492,7 +494,7 @@ SELECT gp_inject_fault('auto_vac_worker_after_report_activity', 'suspend', '', ' (1 row) 2: INSERT INTO autostatstbl select i from generate_series(1001, 2000) as i; -INSERT 1000 +INSERT 0 1000 2: select pg_sleep(0.77); -- Force pgstat_report_stat() to send tabstat. -- auto_stats executed but auto-ANALYZE not execute yet since we suspend before finish ANALYZE. select relpages, reltuples from pg_class where oid = 'autostatstbl'::regclass; @@ -571,7 +573,7 @@ SELECT gp_inject_fault('auto_vac_worker_after_report_activity', 'suspend', '', ' (1 row) 2: INSERT INTO autostatstbl select i from generate_series(2001, 3000) as i; -INSERT 1000 +INSERT 0 1000 2: select pg_sleep(0.77); -- Force pgstat_report_stat() to send tabstat. -- auto_stats should not executed and auto-ANALYZE not execute yet since we suspend before finish ANALYZE. select relpages, reltuples from pg_class where oid = 'autostatstbl'::regclass; @@ -632,7 +634,9 @@ select analyze_count, autoanalyze_count, n_mod_since_analyze from pg_stat_all_ta -- Reset GUCs. ALTER SYSTEM RESET autovacuum_naptime; -ALTER +ALTER SYSTEM +ALTER SYSTEM RESET autovacuum_vacuum_threshold; +ALTER SYSTEM select * from pg_reload_conf(); pg_reload_conf ---------------- diff --git a/src/test/isolation2/output/distributed_snapshot.source b/src/test/isolation2/output/distributed_snapshot.source index 10ca3f6d5c1..fbffab12e53 100644 --- a/src/test/isolation2/output/distributed_snapshot.source +++ b/src/test/isolation2/output/distributed_snapshot.source @@ -1,7 +1,7 @@ -- Distributed snapshot tests create or replace function test_consume_xids(int4) returns void as '@abs_srcdir@/../regress/regress.so', 'test_consume_xids' language C; -CREATE +CREATE FUNCTION -- Scenario1: Test to validate GetSnapshotData()'s computation of globalXmin using -- distributed snapshot. It mainly uses a old read-only transaction to help @@ -10,12 +10,12 @@ CREATE -- Setup CREATE TABLE distributed_snapshot_test1 (a int); -CREATE +CREATE TABLE 1: BEGIN; BEGIN 1: INSERT INTO distributed_snapshot_test1 values(1); -INSERT 1 +INSERT 0 1 -- Read transaction which helps to get lower globalXmin for session 3. As this -- will have MyProc->xmin set to transaction 1's xid. 2: BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; @@ -31,7 +31,7 @@ COMMIT -- Transaction to bump the latestCompletedXid 1: INSERT INTO distributed_snapshot_test1 values(1); -INSERT 1 +INSERT 0 1 -- Hold after walking over ProcArray in GetSnpashotData(), right at start of -- DistributedLog_AdvanceOldestXmin() @@ -51,7 +51,7 @@ COMMIT -- Transaction used to bump the distributed oldestXmin 1: INSERT INTO distributed_snapshot_test1 values(1); -INSERT 1 +INSERT 0 1 -- let session 3 now move forward to compute distributed oldest xmin 1: SELECT gp_inject_fault('distributedlog_advance_oldest_xmin', 'reset', dbid) from gp_segment_configuration where content = 0 and role = 'p'; gp_inject_fault @@ -68,14 +68,14 @@ INSERT 1 -- Setup CREATE TABLE distributed_snapshot_test2 (a int); -CREATE +CREATE TABLE -- start transaction assigns distributed xid. 1: BEGIN ISOLATION LEVEL REPEATABLE READ; BEGIN -- this sets latestCompletedXid 2: INSERT INTO distributed_snapshot_test2 VALUES(1); -INSERT 1 +INSERT 0 1 -- here, take distributed snapshot 1: SELECT 123 AS "establish snapshot"; establish snapshot @@ -83,7 +83,7 @@ INSERT 1 123 (1 row) 2: INSERT INTO distributed_snapshot_test2 VALUES(2); -INSERT 1 +INSERT 0 1 -- expected to see just VALUES(1) 1: SELECT * FROM distributed_snapshot_test2; a @@ -94,7 +94,7 @@ INSERT 1 COMMIT DROP TABLE distributed_snapshot_test2; -DROP +DROP TABLE -- Scenario3: Test the one-phase commit transactions don't break repeatable read isolation. -- @@ -110,7 +110,7 @@ DROP -- protocol. Repeatable read transactions may read (100), (100,100) or -- (100,100,300), but not (100, 300). CREATE TABLE distributed_snapshot_test3 (a int); -CREATE +CREATE TABLE 10: BEGIN ISOLATION LEVEL REPEATABLE READ; BEGIN 20: BEGIN ISOLATION LEVEL REPEATABLE READ; @@ -118,21 +118,21 @@ BEGIN 30: BEGIN ISOLATION LEVEL REPEATABLE READ; BEGIN 40: INSERT INTO distributed_snapshot_test3 VALUES(100); -INSERT 1 +INSERT 0 1 10: SELECT gp_segment_id, * FROM distributed_snapshot_test3 where a = 100; gp_segment_id | a ---------------+----- 2 | 100 (1 row) 40: INSERT INTO distributed_snapshot_test3 VALUES(100); -INSERT 1 +INSERT 0 1 30: SELECT 123 AS "establish snapshot"; establish snapshot -------------------- 123 (1 row) 40: INSERT INTO distributed_snapshot_test3 VALUES(300); -INSERT 1 +INSERT 0 1 10: SELECT gp_segment_id, * FROM distributed_snapshot_test3; gp_segment_id | a ---------------+----- @@ -158,7 +158,7 @@ COMMIT 30: COMMIT; COMMIT DROP TABLE distributed_snapshot_test3; -DROP +DROP TABLE -- The following test cases are to test that QEs can get -- latest distribute snapshot to scan normal tables (not catalog). @@ -190,9 +190,9 @@ DROP -- Case 1: concurrently alter column type (will do rewrite heap) create table t_alter_snapshot_test(a int, b int, c int); -CREATE +CREATE TABLE insert into t_alter_snapshot_test values (1, 1, 1), (1, 1, 1); -INSERT 2 +INSERT 0 2 select * from t_alter_snapshot_test; a | b | c @@ -204,17 +204,17 @@ select * from t_alter_snapshot_test; 1: begin; BEGIN 1: alter table t_alter_snapshot_test alter column b type text; -ALTER +ALTER TABLE -- the following statement will hang 2&: alter table t_alter_snapshot_test alter column c type text; 1: end; -END +COMMIT -- after 1 commit, 2 can continue, it should use latest distributed -- snapshot so that the data will not be lost. 2<: <... completed> -ALTER +ALTER TABLE select * from t_alter_snapshot_test; a | b | c @@ -223,13 +223,13 @@ select * from t_alter_snapshot_test; 1 | 1 | 1 (2 rows) drop table t_alter_snapshot_test; -DROP +DROP TABLE -- Case 2: concurrently add exclude constrain create table t_alter_snapshot_test(a int, b int); -CREATE +CREATE TABLE insert into t_alter_snapshot_test values (1, 1), (1, 1); -INSERT 2 +INSERT 0 2 select a from t_alter_snapshot_test; a @@ -241,26 +241,26 @@ select a from t_alter_snapshot_test; 1: begin; BEGIN 1: alter table t_alter_snapshot_test alter column b type int using b::int; -ALTER +ALTER TABLE 2&: alter table t_alter_snapshot_test add exclude using btree (a WITH =); 1: end; -END +COMMIT -- after 1 commit, 2 can go on and it should fail 2<: <... completed> ERROR: could not create exclusion constraint "t_alter_snapshot_test_a_excl" (seg1 127.0.1.1:7003 pid=39163) DETAIL: Key (a)=(1) conflicts with key (a)=(1). drop table t_alter_snapshot_test; -DROP +DROP TABLE -- Case 3: concurrently split partition create table t_alter_snapshot_test(id int, rank int, year int) distributed by (id) partition by range (year) ( start (0) end (20) every (4), default partition extra ); -CREATE +CREATE TABLE insert into t_alter_snapshot_test select i,i,i from generate_series(1, 100)i; -INSERT 100 +INSERT 0 100 select count(*) from t_alter_snapshot_test; count ------- @@ -270,15 +270,15 @@ select count(*) from t_alter_snapshot_test; 1: begin; BEGIN 1: alter table t_alter_snapshot_test alter column rank type text; -ALTER +ALTER TABLE 2&: alter table t_alter_snapshot_test split partition for (5) at (5) into (partition pa, partition pb); 1: end; -END +COMMIT -- after 1 commit, 2 can go on and it should not lose data 2<: <... completed> -ALTER +ALTER TABLE select count(*) from t_alter_snapshot_test; count @@ -286,69 +286,69 @@ select count(*) from t_alter_snapshot_test; 100 (1 row) drop table t_alter_snapshot_test; -DROP +DROP TABLE -- case 4: concurrently validate check create table t_alter_snapshot_test(a int, b int); -CREATE +CREATE TABLE insert into t_alter_snapshot_test values (1, 1), (2, 2); -INSERT 2 +INSERT 0 2 alter table t_alter_snapshot_test ADD CONSTRAINT mychk CHECK(a > 20) NOT VALID; -ALTER +ALTER TABLE 1: begin; BEGIN 1: alter table t_alter_snapshot_test alter column b type text; -ALTER +ALTER TABLE 2&: alter table t_alter_snapshot_test validate CONSTRAINT mychk; 1: end; -END +COMMIT -- after 1 commit, 2 can go on and it should fail 2<: <... completed> ERROR: check constraint "mychk" of relation "t_alter_snapshot_test" is violated by some row (seg1 127.0.1.1:8003 pid=423039) drop table t_alter_snapshot_test; -DROP +DROP TABLE -- case 5: concurrently domain check create domain domain_snapshot_test as int; -CREATE +CREATE DOMAIN create table t_alter_snapshot_test(i domain_snapshot_test, j int, k int); -CREATE +CREATE TABLE insert into t_alter_snapshot_test values(200,1,1); -INSERT 1 +INSERT 0 1 alter domain domain_snapshot_test ADD CONSTRAINT mychk CHECK(VALUE > 300) NOT VALID; -ALTER +ALTER DOMAIN 1: begin; BEGIN 1: alter table t_alter_snapshot_test alter column k type text; -ALTER +ALTER TABLE 2&: alter domain domain_snapshot_test validate CONSTRAINT mychk; 1:end; -END +COMMIT -- after 1 commit, 2 can go on and it should fail 2<: <... completed> ERROR: column "i" of table "t_alter_snapshot_test" contains values that violate the new constraint (seg2 127.0.1.1:7004 pid=39164) drop table t_alter_snapshot_test; -DROP +DROP TABLE drop domain domain_snapshot_test; -DROP +DROP DOMAIN -- case 6: alter table expand table create table t_alter_snapshot_test(a int, b int); -CREATE +CREATE TABLE set allow_system_table_mods = on; SET update gp_distribution_policy set numsegments = 2 where localoid = 't_alter_snapshot_test'::regclass::oid; UPDATE 1 insert into t_alter_snapshot_test select i,i from generate_series(1, 10)i; -INSERT 10 +INSERT 0 10 select gp_segment_id, * from t_alter_snapshot_test; gp_segment_id | a | b ---------------+----+---- @@ -367,15 +367,15 @@ select gp_segment_id, * from t_alter_snapshot_test; 1: begin; BEGIN 1: alter table t_alter_snapshot_test alter column b type text; -ALTER +ALTER TABLE 2&: alter table t_alter_snapshot_test expand table; 1: end; -END +COMMIT -- after 1 commit, 2 can go on and data should not be lost 2<: <... completed> -ALTER +ALTER TABLE select gp_segment_id, * from t_alter_snapshot_test; gp_segment_id | a | b @@ -392,13 +392,13 @@ select gp_segment_id, * from t_alter_snapshot_test; 2 | 5 | 5 (10 rows) drop table t_alter_snapshot_test; -DROP +DROP TABLE -- case 7: alter table set distributed by create table t_alter_snapshot_test(a int, b int) distributed randomly; -CREATE +CREATE TABLE insert into t_alter_snapshot_test select i,i from generate_series(1, 10)i; -INSERT 10 +INSERT 0 10 select count(*) from t_alter_snapshot_test; count ------- @@ -408,15 +408,15 @@ select count(*) from t_alter_snapshot_test; 1: begin; BEGIN 1: alter table t_alter_snapshot_test alter column b type text; -ALTER +ALTER TABLE 2&: alter table t_alter_snapshot_test set distributed by (a); 1: end; -END +COMMIT -- after 1 commit, 2 can continue and data should not be lost 2<: <... completed> -ALTER +ALTER TABLE select count(*) from t_alter_snapshot_test; count @@ -424,23 +424,23 @@ select count(*) from t_alter_snapshot_test; 10 (1 row) drop table t_alter_snapshot_test; -DROP +DROP TABLE -- case 8: DML concurrent with Alter Table create table t_alter_snapshot_test(a int, b int); -CREATE +CREATE TABLE ---- test for insert 1: begin; BEGIN 1: insert into t_alter_snapshot_test values (1, 1); -INSERT 1 +INSERT 0 1 2&: alter table t_alter_snapshot_test alter column b type text; 1: end; -END +COMMIT -- 2 can continue, and we should not lose data 2<: <... completed> -ALTER +ALTER TABLE select * from t_alter_snapshot_test; a | b ---+--- @@ -449,19 +449,19 @@ select * from t_alter_snapshot_test; ---- test for update truncate t_alter_snapshot_test; -TRUNCATE +TRUNCATE TABLE insert into t_alter_snapshot_test values (1, 1); -INSERT 1 +INSERT 0 1 1: begin; BEGIN 1: update t_alter_snapshot_test set b = '3'; UPDATE 1 2&: alter table t_alter_snapshot_test alter column b type int using b::int; 1: end; -END +COMMIT -- 2 can continue and we should see the data has been updated 2<: <... completed> -ALTER +ALTER TABLE select * from t_alter_snapshot_test; a | b ---+--- @@ -470,35 +470,35 @@ select * from t_alter_snapshot_test; ---- test for delete truncate t_alter_snapshot_test; -TRUNCATE +TRUNCATE TABLE insert into t_alter_snapshot_test values (1, 1); -INSERT 1 +INSERT 0 1 1: begin; BEGIN 1: delete from t_alter_snapshot_test; DELETE 1 2&: alter table t_alter_snapshot_test alter column b type text; 1: end; -END +COMMIT -- 2 can continue and we should see the data has been deleted 2<: <... completed> -ALTER +ALTER TABLE select * from t_alter_snapshot_test; a | b ---+--- (0 rows) drop table t_alter_snapshot_test; -DROP +DROP TABLE -- Case 9: Repeatable Read Isolation Level Test create table t_alter_snapshot_test(a int, b int); -CREATE +CREATE TABLE insert into t_alter_snapshot_test values (1, 1); -INSERT 1 +INSERT 0 1 1: begin; BEGIN 1: insert into t_alter_snapshot_test values (1, 1); -INSERT 1 +INSERT 0 1 2: begin isolation level repeatable read; BEGIN @@ -510,11 +510,11 @@ BEGIN 2&: alter table t_alter_snapshot_test alter column b type text; 1: end; -END +COMMIT -- 2 can continue and after its alter rewrite the heap -- it can see all the data even under repeatable read 2<: <... completed> -ALTER +ALTER TABLE 2: select * from t_alter_snapshot_test; a | b ---+--- @@ -522,7 +522,7 @@ ALTER 1 | 1 (2 rows) 2: end; -END +COMMIT select * from t_alter_snapshot_test; a | b @@ -531,7 +531,7 @@ select * from t_alter_snapshot_test; 1 | 1 (2 rows) drop table t_alter_snapshot_test; -DROP +DROP TABLE ---------------------------------------- -- Test for fixes @@ -542,7 +542,7 @@ DROP -- have completed and no longer needed). And in that case, we -- should still be able to advance properly after restart. create table distributed_snapshot_fix1(a int); -CREATE +CREATE TABLE -- On a primary, burn xids until the next xid is the first one of a segment, -- which has 4096 (ENTRIES_PER_PAGE) * 32 (SLRU_PAGES_PER_SEGMENT) = 131072 xids. @@ -559,9 +559,9 @@ BEGIN (1 row) 1U: end; -END +COMMIT 1U: insert into distributed_snapshot_fix1 values(1); -INSERT 1 +INSERT 0 1 1Uq: ... 1q: ... @@ -614,9 +614,9 @@ select pg_ctl(datadir, 'restart') from gp_segment_configuration where role = 'p' -- test the distributed snapshot in the situation of direct dispatch 0: create table direct_dispatch_snapshot_alpha(a int, b int); -CREATE +CREATE TABLE 0: insert into direct_dispatch_snapshot_alpha select i, i from generate_series(1, 10) i; -INSERT 10 +INSERT 0 10 -- Direct dispach doesn't support transaction block. Use multiple '-c' to avoid psql creating single transaction. -- direct dispatch, one log line is expected diff --git a/src/test/isolation2/output/external_table.source b/src/test/isolation2/output/external_table.source index c737b5ac83a..59425df689e 100644 --- a/src/test/isolation2/output/external_table.source +++ b/src/test/isolation2/output/external_table.source @@ -1,20 +1,20 @@ CREATE EXTENSION IF NOT EXISTS gp_inject_fault; -CREATE +CREATE EXTENSION -- start_ignore DROP EXTERNAL TABLE IF EXISTS exttab_cursor_1; -DROP +DROP FOREIGN TABLE DROP EXTERNAL TABLE IF EXISTS exttab_cursor_2; -DROP +DROP FOREIGN TABLE -- end_ignore -- Define a cursor on an external table scan query with segment reject limit reached -- does not reach reject limit CREATE EXTERNAL TABLE exttab_cursor_1( i int, j text ) LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') FORMAT 'TEXT' (DELIMITER '|') LOG ERRORS SEGMENT REJECT LIMIT 10; -CREATE +CREATE EXTERNAL TABLE -- reaches reject limit, use the same err table CREATE EXTERNAL TABLE exttab_cursor_2( i int, j text ) LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_more_errors.data') FORMAT 'TEXT' (DELIMITER '|') LOG ERRORS SEGMENT REJECT LIMIT 2; -CREATE +CREATE EXTERNAL TABLE -- Test to make sure error logs are populated when cursors are used -- The total number of formatting errors reported by the query is dependant on the number of rows parsed on -- the segments before COMMIT finishes. So mask the NOTICE by setting the client_min_messages guc to WARNING. @@ -23,7 +23,7 @@ SET 10: BEGIN; BEGIN 10: DECLARE exttab_cur1 no scroll cursor FOR SELECT e1.i, e2.j from exttab_cursor_1 e1 INNER JOIN exttab_cursor_1 e2 ON e1.i = e2.i UNION ALL SELECT e1.i, e2.j from exttab_cursor_1 e1 INNER JOIN exttab_cursor_1 e2 ON e1.i = e2.i UNION ALL SELECT e1.i, e2.j from exttab_cursor_1 e1 INNER JOIN exttab_cursor_1 e2 ON e1.i = e2.i; -DECLARE +DECLARE CURSOR 10: COMMIT; COMMIT 10: reset CLIENT_MIN_MESSAGES; @@ -100,7 +100,7 @@ ERROR: segment reject limit reached, aborting operation (seg0 slice1 @hostname DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i CONTEXT: External table exttab_cursor_2, line 7 of file://@hostname@@abs_srcdir@/data/exttab_more_errors.data, column i 10: COMMIT; -COMMIT +ROLLBACK -- Reset injected faults SELECT gp_inject_fault('transaction_abort_failure', 'reset', dbid) FROM gp_segment_configuration WHERE content=0 AND role='p'; @@ -135,7 +135,7 @@ SELECT gp_inject_fault('interconnect_setup_palloc', 'reset', dbid) FROM gp_segme 10: BEGIN; BEGIN 10: DECLARE exttab_cur1 no scroll cursor FOR (SELECT e1.i, e2.j from exttab_cursor_1 e1 INNER JOIN exttab_cursor_1 e2 ON e1.i = e2.i ORDER BY e1.i); -DECLARE +DECLARE CURSOR -- Should not fail 10: FETCH exttab_cur1; i | j @@ -196,7 +196,7 @@ GP_IGNORE:-- end_ignore -- Create external table with delimiter off CREATE EXTERNAL WEB TABLE ext_delim_off ( junk text) execute 'echo hi' on coordinator FORMAT 'text' (delimiter 'OFF' null E'\\N' escape E'\\'); -CREATE +CREATE EXTERNAL TABLE -- Query the ext_delim_off table SELECT * FROM ext_delim_off; @@ -212,7 +212,7 @@ GP_IGNORE:-- end_ignore -- Create external table(format text) with delimiter off, and a row with 'O' CREATE EXTERNAL WEB TABLE ext_delimiter_off_text (a text) EXECUTE E'echo O' ON COORDINATOR FORMAT 'text' (delimiter 'OFF') ENCODING 'UTF8'; -CREATE +CREATE EXTERNAL TABLE SELECT * FROM ext_delimiter_off_text; a @@ -227,7 +227,7 @@ GP_IGNORE:-- end_ignore -- Create external table(format csv) with delimiter off, and a row with 'O' CREATE EXTERNAL WEB TABLE ext_delimiter_off_csv (a text) EXECUTE E'echo O' ON COORDINATOR FORMAT 'csv' (delimiter 'OFF') ENCODING 'UTF8'; -CREATE +CREATE EXTERNAL TABLE SELECT * FROM ext_delimiter_off_csv; a @@ -237,12 +237,12 @@ SELECT * FROM ext_delimiter_off_csv; -- start_ignore DROP EXTERNAL TABLE IF EXISTS exttab_delimiter_escape_off; -DROP +DROP FOREIGN TABLE -- end_ignore -- Create external table(text format) with both delimiter and escape off CREATE EXTERNAL WEB TABLE exttab_delimiter_escape_off(a text) EXECUTE E'cat @abs_srcdir@/data/exttab_escape_off.data' ON SEGMENT 0 format 'TEXT' (delimiter 'OFF' escape 'OFF') encoding 'UTF8' LOG ERRORS SEGMENT REJECT LIMIT 10; -CREATE +CREATE EXTERNAL TABLE SELECT * FROM exttab_delimiter_escape_off ORDER BY a; a diff --git a/src/test/isolation2/output/fts_manual_probe.source b/src/test/isolation2/output/fts_manual_probe.source index ed3900646d3..13a24dbf685 100644 --- a/src/test/isolation2/output/fts_manual_probe.source +++ b/src/test/isolation2/output/fts_manual_probe.source @@ -24,13 +24,41 @@ select gp_inject_fault('all', 'reset', 1) from master(); (1 row) create temp table fts_probe_results(seq serial, seq_name varchar(20), current_started int, expected_start_delta int, current_done int, expected_done_delta int); -CREATE +CREATE TABLE + +-- create extension only on coordinator since the fts process is only on coordinator +create or replace function fts_probe_stats() returns table ( start_count int, done_count int, status_version int2 ) as '/@abs_builddir@/../regress/regress.so', 'gp_fts_probe_stats' language c execute on coordinator reads sql data; +CREATE FUNCTION create or replace view get_raw_stats as select seq, seq_name, current_started, expected_start_delta, current_started - min(current_started) over () as actual_start_delta, -- actual_start_delta = current_started - initial_started current_done, expected_done_delta, current_done - min(current_done) over () as actual_done_delta -- actual_done_delta = current_done - initial_done from fts_probe_results order by seq; -CREATE +CREATE VIEW create or replace view get_stats as select seq, seq_name, expected_start_delta, actual_start_delta, expected_done_delta, actual_done_delta from get_raw_stats order by seq desc limit 1; -CREATE +CREATE VIEW + +drop function if exists insert_expected_stats(int, int); +DROP FUNCTION +create or replace function insert_expected_stats(seq_name varchar(20), expected_start_delta int, expected_done_delta int) returns void as $$ INSERT INTO fts_probe_results (seq_name, current_started, expected_start_delta, current_done, expected_done_delta) /* inside a function */ SELECT seq_name, /* inside a function */ start_count AS current_started, /* inside a function */ expected_start_delta, /* inside a function */ done_count AS current_done, /* inside a function */ expected_done_delta /* inside a function */ FROM fts_probe_stats(); /* inside a function */ $$ language sql volatile; +CREATE FUNCTION + +-- ensure the internal regular probes do not affect our test +!\retcode gpconfig -c gp_fts_probe_interval -v 3600; +-- start_ignore +20190730:11:15:27:045870 gpconfig:office-5-75:dkrieger-[INFO]:-completed successfully with parameters '-c gp_fts_probe_interval -v 3600' + +-- end_ignore +(exited with code 0) +!\retcode gpstop -u; +-- start_ignore +20190730:11:15:27:045929 gpstop:office-5-75:dkrieger-[INFO]:-Starting gpstop with args: -u +20190730:11:15:27:045929 gpstop:office-5-75:dkrieger-[INFO]:-Gathering information and validating the environment... +20190730:11:15:27:045929 gpstop:office-5-75:dkrieger-[INFO]:-Obtaining Greenplum Coordinator catalog information +20190730:11:15:27:045929 gpstop:office-5-75:dkrieger-[INFO]:-Obtaining Segment details from coordinator... +20190730:11:15:27:045929 gpstop:office-5-75:dkrieger-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 7.0.0-alpha.0+dev.575.g59811832fc build dev' +20190730:11:15:27:045929 gpstop:office-5-75:dkrieger-[INFO]:-Signalling all postmaster processes to reload + +-- end_ignore +(exited with code 0) -- ensure there is no in progress ftsLoop after reloading the gp_fts_probe_interval select gp_request_fts_probe_scan(); diff --git a/src/test/isolation2/output/gp_collation.source b/src/test/isolation2/output/gp_collation.source index 47d261237cf..cde62b95d59 100644 --- a/src/test/isolation2/output/gp_collation.source +++ b/src/test/isolation2/output/gp_collation.source @@ -13,7 +13,7 @@ SELECT gp_inject_fault('collate_locale_os_lookup', 'error', dbid) from gp_segmen -- The fault injector should prevent all collations from being created by -- pg_import_system_collations(). create schema import_collation_schema; -CREATE +CREATE SCHEMA select pg_import_system_collations( (select oid from pg_namespace where nspname = 'import_collation_schema') ); ERROR: fault triggered, fault name:'collate_locale_os_lookup' fault type:'error' (seg0 127.0.1.1:25432 pid=19176) @@ -48,7 +48,7 @@ select gp_segment_id, collname from pg_collation where collname='collation_from_ ---------------+---------- (0 rows) create collation "collation_from_disk" (locale="@gp_syslocale@"); -CREATE +CREATE COLLATION select gp_segment_id, collname from pg_collation where collname='collation_from_disk'; gp_segment_id | collname ---------------+--------------------- @@ -71,7 +71,7 @@ select count(distinct oid) from gp_dist_random('pg_collation') where oid=(select -- Drop the collation, and confirm it is actually gone -- drop collation "collation_from_disk"; -DROP +DROP COLLATION select gp_segment_id, collname from pg_collation where collname='collation_from_disk'; gp_segment_id | collname ---------------+---------- @@ -90,7 +90,7 @@ select gp_segment_id, collname from pg_collation where collname='collation_from_ ---------------+---------- (0 rows) create collation "collation_from_db" from "POSIX"; -CREATE +CREATE COLLATION select gp_segment_id, collname from pg_collation where collname='collation_from_db'; gp_segment_id | collname ---------------+------------------- @@ -115,10 +115,10 @@ select count(distinct oid) from gp_dist_random('pg_collation') where oid=(select -- should not be created on any segment. -- create collation "missing_on_one_segment" from "C"; -CREATE +CREATE COLLATION -- Drop collation from one segment 2U: drop collation "missing_on_one_segment"; -DROP +DROP COLLATION select gp_segment_id, collname from gp_dist_random('pg_collation') where oid=(select oid from pg_collation where collname='missing_on_one_segment'); gp_segment_id | collname ---------------+------------------------ @@ -141,11 +141,11 @@ select gp_segment_id, collname from gp_dist_random('pg_collation') where collnam -- Clean up table missing_on_one_segement -1U: drop collation "missing_on_one_segment"; -DROP +DROP COLLATION 0U: drop collation "missing_on_one_segment"; -DROP +DROP COLLATION 1U: drop collation "missing_on_one_segment"; -DROP +DROP COLLATION -- -- Simulate a locale missing on one segment at collation creation. If this @@ -191,7 +191,7 @@ select gp_segment_id, collname from pg_collation where collname='locale_missing_ ---------------+---------- (0 rows) create collation "locale_missing_on_one_segment" (locale="@gp_syslocale@"); -CREATE +CREATE COLLATION -- Confirm is on all segments. select gp_segment_id, collname from pg_collation where collname='locale_missing_on_one_segment'; gp_segment_id | collname @@ -207,11 +207,11 @@ select gp_segment_id, collname from gp_dist_random('pg_collation') where collnam (3 rows) create table uses_collation (a text collate locale_missing_on_one_segment); -CREATE +CREATE TABLE -- The case here aims to insert two tuples to seg0. -- Under jump consistent hash, ('abc'), ('012') goes to seg0. insert into uses_collation values ('abc'), ('012'); -INSERT 2 +INSERT 0 2 SELECT gp_inject_fault('collate_locale_os_lookup', 'error', dbid) from gp_segment_configuration where content = 0 and role = 'p'; gp_inject_fault diff --git a/src/test/isolation2/output/idle_gang_cleaner.source b/src/test/isolation2/output/idle_gang_cleaner.source index 7510ccd1b53..4904046404c 100644 --- a/src/test/isolation2/output/idle_gang_cleaner.source +++ b/src/test/isolation2/output/idle_gang_cleaner.source @@ -4,18 +4,26 @@ -- clean up the idle writer gangs after the timeout, -- no snapshot collision error should occur. +create or replace language plpython3u; +CREATE LANGUAGE +create extension if not exists gp_inject_fault; +CREATE EXTENSION + +CREATE OR REPLACE FUNCTION idle_gang_pressure_test() RETURNS VOID LANGUAGE plpython3u AS $$ plpy.execute("SET gp_vmem_idle_resource_timeout = 1") for i in range(1000): plpy.execute("SELECT count(*) from idle_gang_cleaner_t a join idle_gang_cleaner_t b using (c2);") plpy.execute("RESET gp_vmem_idle_resource_timeout") $$; +CREATE FUNCTION + set gp_vmem_idle_resource_timeout to '0.5s'; SET set gp_snapshotadd_timeout to 0; SET create table target_session_id_t(target_session_id int) DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE insert into target_session_id_t values(current_setting('gp_session_id')::int); -INSERT 1 +INSERT 0 1 create table idle_gang_cleaner_t (c1 int, c2 int); -CREATE +CREATE TABLE 0U: select gp_inject_fault('proc_kill', 'suspend', 2, target_session_id) from target_session_id_t; gp_inject_fault @@ -47,10 +55,43 @@ select count(*) from idle_gang_cleaner_t a join idle_gang_cleaner_t b using (c2) Success: (1 row) +-- Release idle Reader gangs in transaction or transaction block +0U: select gp_inject_fault('proc_kill', 'suspend', 2, target_session_id) from target_session_id_t; + gp_inject_fault +----------------- + Success: +(1 row) + +begin; +BEGIN +select count(*) from idle_gang_cleaner_t a join idle_gang_cleaner_t b using (c2) ; + count +------- + 0 +(1 row) +0U: select gp_inject_fault('proc_kill', 'wait_until_triggered', '','','', 1, 1, 1, 2 ,target_session_id) from target_session_id_t; + gp_inject_fault +----------------- + Success: +(1 row) +0U: select gp_inject_fault('proc_kill', 'reset', 2, target_session_id) from target_session_id_t; + gp_inject_fault +----------------- + Success: +(1 row) +end; +COMMIT + +select idle_gang_pressure_test(); + idle_gang_pressure_test +------------------------- + +(1 row) + drop table target_session_id_t; -DROP +DROP TABLE drop table idle_gang_cleaner_t; -DROP +DROP TABLE reset gp_vmem_idle_resource_timeout; RESET reset gp_snapshotadd_timeout; diff --git a/src/test/isolation2/output/parallel_retrieve_cursor/corner.source b/src/test/isolation2/output/parallel_retrieve_cursor/corner.source index 438a106dd9a..3f32d219d8a 100644 --- a/src/test/isolation2/output/parallel_retrieve_cursor/corner.source +++ b/src/test/isolation2/output/parallel_retrieve_cursor/corner.source @@ -1,48 +1,48 @@ DROP TABLE IF EXISTS t1; -DROP +DROP TABLE CREATE TABLE t1 (a INT) DISTRIBUTED by (a); -CREATE +CREATE TABLE insert into t1 select generate_series(1,100); -INSERT 100 +INSERT 0 100 DROP TABLE IF EXISTS t11; -DROP +DROP TABLE CREATE TABLE t11 (a INT) DISTRIBUTED by (a); -CREATE +CREATE TABLE insert into t11 select generate_series(1,100000); -INSERT 100000 +INSERT 0 100000 DROP TABLE IF EXISTS t2; -DROP +DROP TABLE CREATE TABLE t2 (a INT) DISTRIBUTED by (a); -CREATE +CREATE TABLE DROP TABLE IF EXISTS t3; -DROP +DROP TABLE CREATE TABLE t3 (a text) DISTRIBUTED by (a); -CREATE +CREATE TABLE COPY t3 FROM PROGRAM 'for i in `seq 1 10`; do echo ${i}test; done'; COPY 10 DROP TABLE IF EXISTS t4; -DROP +DROP TABLE CREATE TABLE t4 (a text) DISTRIBUTED by (a); -CREATE +CREATE TABLE DROP TABLE IF EXISTS t5; -DROP +DROP TABLE CREATE TABLE t5 (b INT) DISTRIBUTED by (b); -CREATE +CREATE TABLE INSERT INTO t5 SELECT GENERATE_SERIES(1, 10); -INSERT 10 +INSERT 0 10 -- Test1: close not executed PARALLEL RETRIEVE CURSOR 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 1 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id1 | token_id | host_id | port_id | READY endpoint_id1 | token_id | host_id | port_id | READY endpoint_id1 | token_id | host_id | port_id | READY (3 rows) 1: CLOSE c1; -CLOSE +CLOSE CURSOR -- check no endpoint info 1: SELECT auth_token,state FROM gp_get_endpoints() WHERE cursorname='c1'; auth_token | state @@ -76,14 +76,14 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c11 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t11; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 11 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c11'; endpoint_id11 | token_id | host_id | port_id | READY endpoint_id11 | token_id | host_id | port_id | READY endpoint_id11 | token_id | host_id | port_id | READY (3 rows) 1: CLOSE c11; -CLOSE +CLOSE CURSOR -- check no endpoint info 1: SELECT auth_token,state FROM gp_get_endpoints() WHERE cursorname='c11'; auth_token | state @@ -117,27 +117,27 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c2 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c3 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c4 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c5 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c6 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c7 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c8 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c9 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c10 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c11 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: ROLLBACK; ROLLBACK -- check no endpoint info @@ -150,7 +150,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: SELECT * FROM gp_wait_parallel_retrieve_cursor('c2', -1); ERROR: cursor "c2" does not exist 1: ROLLBACK; @@ -158,7 +158,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: SELECT * FROM gp_wait_parallel_retrieve_cursor('c2', 0); ERROR: cursor "c2" does not exist 1: ROLLBACK; @@ -173,27 +173,27 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c2 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c3 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c4 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c5 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c6 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c7 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c8 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c9 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c10 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c11 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c2'; endpoint_id2 | token_id | host_id | port_id | READY endpoint_id2 | token_id | host_id | port_id | READY @@ -377,7 +377,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE CURSOR 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; ERROR: cursor "c1" already exists -- check no endpoint info @@ -389,9 +389,9 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE CURSOR 1: DECLARE c2 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c2'; endpoint_id2 | token_id | host_id | port_id | READY endpoint_id2 | token_id | host_id | port_id | READY @@ -559,7 +559,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c2 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1 ORDER BY a LIMIT 10; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c2'; endpoint_id2 | token_id | host_id | port_id | READY (1 row) @@ -634,7 +634,7 @@ Sessions not started cannot be quit 1: BEGIN; BEGIN 1: DECLARE c2 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1 ORDER BY a LIMIT 0; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c2'; endpoint_id2 | token_id | host_id | port_id | READY (1 row) @@ -704,7 +704,7 @@ Sessions not started cannot be quit 1: BEGIN; BEGIN 1: DECLARE c2 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t2; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c2'; endpoint_id2 | token_id | host_id | port_id | READY endpoint_id2 | token_id | host_id | port_id | READY @@ -782,7 +782,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c2 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t3; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c2'; endpoint_id2 | token_id | host_id | port_id | READY endpoint_id2 | token_id | host_id | port_id | READY @@ -870,7 +870,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c2 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t4; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c2'; endpoint_id2 | token_id | host_id | port_id | READY endpoint_id2 | token_id | host_id | port_id | READY @@ -948,7 +948,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1 WHERE a = 50; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id2 | token_id | host_id | port_id | READY (1 row) @@ -995,7 +995,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT SUM(a) FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id2 | token_id | host_id | port_id | READY (1 row) @@ -1041,7 +1041,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT AVG(a) FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id2 | token_id | host_id | port_id | READY (1 row) @@ -1081,7 +1081,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT COUNT(*) FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id2 | token_id | host_id | port_id | READY (1 row) @@ -1117,7 +1117,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1, t5 where t1.a = t5.b; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id2 | token_id | host_id | port_id | READY endpoint_id2 | token_id | host_id | port_id | READY @@ -1175,7 +1175,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT COUNT(*) FROM t1, t5 where t1.a = t5.b; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id2 | token_id | host_id | port_id | READY (1 row) @@ -1206,7 +1206,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * from t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id2 | token_id | host_id | port_id | READY endpoint_id2 | token_id | host_id | port_id | READY @@ -1355,9 +1355,9 @@ DECLARE t (1 row) 1: CLOSE c1; -CLOSE +CLOSE CURSOR 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * from t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id2 | token_id | host_id | port_id | READY endpoint_id2 | token_id | host_id | port_id | READY @@ -1524,7 +1524,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * from t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id2 | token_id | host_id | port_id | READY endpoint_id2 | token_id | host_id | port_id | READY @@ -1677,9 +1677,9 @@ DECLARE t (1 row) 1: CLOSE c1; -CLOSE +CLOSE CURSOR 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * from t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id2 | token_id | host_id | port_id | READY endpoint_id2 | token_id | host_id | port_id | READY @@ -1848,7 +1848,7 @@ BEGIN 1: SAVEPOINT s1; SAVEPOINT 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * from t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id2 | token_id | host_id | port_id | READY endpoint_id2 | token_id | host_id | port_id | READY @@ -1868,11 +1868,11 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * from t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: SAVEPOINT s1; SAVEPOINT 1: CLOSE c1; -CLOSE +CLOSE CURSOR 1: ROLLBACK TO s1; ROLLBACK 1: SELECT * FROM pg_cursors WHERE name='c1'; @@ -1887,7 +1887,7 @@ COMMIT 1: BEGIN; BEGIN 1: DECLARE c21a PARALLEL RETRIEVE CURSOR FOR SELECT COUNT(*) from t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'get_tuple_cell TOKEN21a 1 1 ; create_match_sub $TOKEN21a token21a' : SELECT auth_token FROM gp_get_endpoints() WHERE cursorname='c21a'; auth_token ---------------------------------- @@ -1895,9 +1895,9 @@ DECLARE (1 row) -- Declare more cursors in the same session should not change the first one's token 1: DECLARE c21b PARALLEL RETRIEVE CURSOR FOR SELECT COUNT(*) from t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c21c PARALLEL RETRIEVE CURSOR FOR SELECT * from t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: SELECT auth_token FROM gp_get_endpoints() WHERE cursorname='c21a'; auth_token ---------------------------------- @@ -1915,7 +1915,7 @@ Sessions not started cannot be quit 1: BEGIN; BEGIN 1: DECLARE c22 PARALLEL RETRIEVE CURSOR WITHOUT HOLD FOR SELECT * FROM generate_series(1,10); -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 22 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c22'; endpoint_id22 | token_id | host_id | port_id | READY (1 row) @@ -1992,7 +1992,7 @@ Sessions not started cannot be quit 1: BEGIN; BEGIN 1: DECLARE c23 PARALLEL RETRIEVE CURSOR WITHOUT HOLD FOR SELECT relname FROM pg_class where relname='pg_class'; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 23 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c23'; endpoint_id23 | token_id | host_id | port_id | READY (1 row) @@ -2058,11 +2058,11 @@ Sessions not started cannot be quit 1: BEGIN; BEGIN 1: DECLARE "x12345678901234567890123456789012345678901234567890123456789x" PARALLEL RETRIEVE CURSOR WITHOUT HOLD FOR SELECT * FROM t5; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE "x123456789012345678901234567890123456789012345678901234567890123456789x" PARALLEL RETRIEVE CURSOR WITHOUT HOLD FOR SELECT * FROM t5; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE "x1234567890123456789012345678901234567890123456789012345678901x" PARALLEL RETRIEVE CURSOR WITHOUT HOLD FOR SELECT * FROM t5; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 24 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='x12345678901234567890123456789012345678901234567890123456789x'; endpoint_id24 | token_id | host_id | port_id | READY endpoint_id24 | token_id | host_id | port_id | READY @@ -2165,7 +2165,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE "x123456789012345678901234567890123456789012345678901234567890123456789x" PARALLEL RETRIEVE CURSOR WITHOUT HOLD FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE "x123456789012345678901234567890123456789012345678901234567890123456789y" PARALLEL RETRIEVE CURSOR WITHOUT HOLD FOR SELECT * FROM t1; ERROR: cursor "x12345678901234567890123456789012345678901234567890123456789012" already exists 1: ROLLBACK; @@ -2175,7 +2175,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c2 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t5; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 26 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c2'; endpoint_id26 | token_id | host_id | port_id | READY endpoint_id26 | token_id | host_id | port_id | READY @@ -2201,7 +2201,7 @@ ERROR: another session (pid: 46988) used the endpoint and completed retrieving -- check no endpoint info 1: CLOSE C2; -CLOSE +CLOSE CURSOR 1: COMMIT; COMMIT @@ -2209,7 +2209,7 @@ COMMIT 1: BEGIN; BEGIN 1: DECLARE c27 PARALLEL RETRIEVE CURSOR FOR SELECT generate_series(1,10); -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 27 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c27'; endpoint_id27 | token_id | host_id | port_id | READY (1 row) @@ -2287,14 +2287,14 @@ ERROR: Parallel retrieve cursor should run on the dispatcher only -- Final: clean up DROP TABLE t1; -DROP +DROP TABLE DROP TABLE t11; -DROP +DROP TABLE DROP TABLE t2; -DROP +DROP TABLE DROP TABLE t3; -DROP +DROP TABLE DROP TABLE t4; -DROP +DROP TABLE DROP TABLE t5; -DROP +DROP TABLE diff --git a/src/test/isolation2/output/parallel_retrieve_cursor/explain.source b/src/test/isolation2/output/parallel_retrieve_cursor/explain.source index e26fe7bd9fa..fe6e3fefe1d 100644 --- a/src/test/isolation2/output/parallel_retrieve_cursor/explain.source +++ b/src/test/isolation2/output/parallel_retrieve_cursor/explain.source @@ -14,21 +14,21 @@ set enable_incremental_sort=on; SET DROP TABLE IF EXISTS t1; -DROP +DROP TABLE CREATE TABLE t1 (a INT) DISTRIBUTED by (a); -CREATE +CREATE TABLE insert into t1 select generate_series(1,100); -INSERT 100 +INSERT 0 100 CREATE TABLE t2 (a INT) DISTRIBUTED RANDOMLY; -CREATE +CREATE TABLE insert into t2 select generate_series(1,100); -INSERT 100 +INSERT 0 100 DROP TABLE IF EXISTS rt1; -DROP +DROP TABLE CREATE TABLE rt1 (a INT) DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE insert into rt1 select generate_series(1,100); -INSERT 100 +INSERT 0 100 -- PARALLEL RETRIEVE CURSOR with other options (WITH HOLD/SCROLL) is not supported EXPLAIN (COSTS false) DECLARE c1 PARALLEL RETRIEVE CURSOR WITHOUT HOLD FOR SELECT * FROM t1; diff --git a/src/test/isolation2/output/parallel_retrieve_cursor/fault_inject.source b/src/test/isolation2/output/parallel_retrieve_cursor/fault_inject.source index 4539da90f54..7013130a2f2 100644 --- a/src/test/isolation2/output/parallel_retrieve_cursor/fault_inject.source +++ b/src/test/isolation2/output/parallel_retrieve_cursor/fault_inject.source @@ -1,11 +1,11 @@ -- @Description Tests with faut inject -- DROP TABLE IF EXISTS t1; -DROP +DROP TABLE CREATE TABLE t1 (a INT) DISTRIBUTED by (a); -CREATE +CREATE TABLE insert into t1 select generate_series(1,100); -INSERT 100 +INSERT 0 100 --------- Test1: fault injection end-point shared memory slot full on QE 2: SELECT gp_inject_fault('alloc_endpoint_slot_full', 'reset', 2); @@ -48,7 +48,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 1 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id1 | token_id | host_id | port_id | READY endpoint_id1 | token_id | host_id | port_id | READY @@ -197,7 +197,7 @@ DECLARE t (1 row) 1: CLOSE c1; -CLOSE +CLOSE CURSOR -- check no endpoint info 1: SELECT auth_token,state FROM gp_get_endpoints() WHERE cursorname='c1' or endpointname='DUMMYENDPOINTNAME'; auth_token | state @@ -239,7 +239,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * from t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id2 | token_id | host_id | port_id | READY endpoint_id2 | token_id | host_id | port_id | READY @@ -301,7 +301,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * from t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 3 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id3 | token_id | host_id | port_id | READY endpoint_id3 | token_id | host_id | port_id | READY @@ -358,7 +358,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * from t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 4 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id4 | token_id | host_id | port_id | READY endpoint_id4 | token_id | host_id | port_id | READY @@ -476,7 +476,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * from t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 5 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id5 | token_id | host_id | port_id | READY endpoint_id5 | token_id | host_id | port_id | READY @@ -576,7 +576,7 @@ SELECT gp_inject_fault('fetch_tuples_from_endpoint', 'suspend', '', '', '', 5, 5 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * from t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 7 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id7 | token_id | host_id | port_id | READY endpoint_id7 | token_id | host_id | port_id | READY @@ -610,7 +610,7 @@ DECLARE f (1 row) 1: CLOSE c1; -CLOSE +CLOSE CURSOR SELECT gp_inject_fault('fetch_tuples_from_endpoint', 'resume', 2); gp_inject_fault @@ -638,7 +638,7 @@ ERROR: endpoint is not available because the parallel retrieve cursor was abort 1: SELECT * FROM gp_wait_parallel_retrieve_cursor('c1', 0); ERROR: cursor "c1" does not exist 1: END; -END +ROLLBACK 1: SELECT gp_inject_fault('fetch_tuples_from_endpoint', 'reset', 2); gp_inject_fault ----------------- @@ -656,15 +656,15 @@ END (1 row) DROP TABLE t1; -DROP +DROP TABLE -- Test7: error inject at the 1000th time while retrieving tuples from endpoint. other retrieve session finished. DROP TABLE IF EXISTS t2; -DROP +DROP TABLE CREATE TABLE t2 (a INT) DISTRIBUTED by (a); -CREATE +CREATE TABLE insert into t2 select generate_series(1,10000); -INSERT 10000 +INSERT 0 10000 SELECT gp_inject_fault('fetch_tuples_from_endpoint', 'reset', dbid) FROM gp_segment_configuration WHERE content=1 AND role='p'; gp_inject_fault @@ -700,7 +700,7 @@ SELECT gp_inject_fault('fetch_tuples_from_endpoint', 'suspend', '', '', '', 800, 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * from t2; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 6 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id6 | token_id | host_id | port_id | READY endpoint_id6 | token_id | host_id | port_id | READY @@ -770,4 +770,4 @@ ROLLBACK (1 row) DROP TABLE t2; -DROP +DROP TABLE diff --git a/src/test/isolation2/output/parallel_retrieve_cursor/privilege.source b/src/test/isolation2/output/parallel_retrieve_cursor/privilege.source index 30a0bc1f158..1ac85b2cb3b 100644 --- a/src/test/isolation2/output/parallel_retrieve_cursor/privilege.source +++ b/src/test/isolation2/output/parallel_retrieve_cursor/privilege.source @@ -1,36 +1,36 @@ -- @Description Tests the privileges related with endpoints -- DROP TABLE IF EXISTS t1; -DROP +DROP TABLE DROP USER IF EXISTS adminuser; -DROP +DROP ROLE DROP USER IF EXISTS u1; -DROP +DROP ROLE DROP USER IF EXISTS uu1; -DROP +DROP ROLE DROP USER IF EXISTS u2; -DROP +DROP ROLE CREATE USER adminuser; -CREATE +CREATE ROLE ALTER USER adminuser WITH SUPERUSER; -ALTER +ALTER ROLE CREATE USER u1 with CREATEROLE; -CREATE +CREATE ROLE CREATE USER u2; -CREATE +CREATE ROLE SET SESSION AUTHORIZATION u1; SET CREATE TABLE t1 (a INT) DISTRIBUTED by (a); -CREATE +CREATE TABLE insert into t1 select generate_series(1,10); -INSERT 10 +INSERT 0 10 CREATE USER uu1; -CREATE +CREATE ROLE GRANT ALL PRIVILEGES ON t1 TO uu1; GRANT GRANT uu1 TO u1; -GRANT +GRANT ROLE RESET SESSION AUTHORIZATION; RESET @@ -47,7 +47,7 @@ SET 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 1 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id1 | token_id | host_id | port_id | READY endpoint_id1 | token_id | host_id | port_id | READY @@ -62,10 +62,10 @@ SET (1 row) --- c2 is declared by u1 1: DECLARE c2 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR --- c12 is declared by u1 on entry db 1: DECLARE c12 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM generate_series(1,10); -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR --- u1 is able to see all endpoints created by himself. 1: SELECT DISTINCT(cursorname), username FROM gp_get_endpoints(); cursorname | username @@ -161,7 +161,7 @@ SET u1 | uu1 (1 row) 1: DECLARE c3 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 2: @post_run 'parse_endpoint_info 3 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c3'; endpoint_id3 | token_id | host_id | port_id | READY endpoint_id3 | token_id | host_id | port_id | READY @@ -215,7 +215,7 @@ HINT: Use the same user as the PARALLEL RETRIEVE CURSOR creator to retrieve. 1<: <... completed> FAILED: Execution failed 1: END; -END +COMMIT 2q: ... 3: @pre_run 'unset RETRIEVE_USER; echo $RAW_STR ' : SELECT 1; ?column? @@ -237,12 +237,12 @@ SET BEGIN -- Used to let super login to retrieve session so then it can change user in session. 1: DECLARE c0 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: SET SESSION AUTHORIZATION u1; SET --- c1 is declared and executed by u1 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1&: SELECT * FROM gp_wait_parallel_retrieve_cursor('c1', -1); 2: @post_run 'parse_endpoint_info 40 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c0'; @@ -305,7 +305,7 @@ ERROR: canceling statement due to user request 1: CLOSE c1; ERROR: current transaction is aborted, commands ignored until end of transaction block 1: END; -END +ROLLBACK 1q: ... 3q: ... 0Rq: ... @@ -320,14 +320,14 @@ SET 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR --- Close c1 by adminuser 1: SET SESSION AUTHORIZATION adminuser; SET 1: CLOSE c1; -CLOSE +CLOSE CURSOR 1: END; -END +COMMIT --------- Test4: u2 should NOT be able to see or retrieve from u1's endpoints @@ -342,7 +342,7 @@ SET BEGIN -- Used to let super login to retrieve session so then it can change user in session. 1: DECLARE c0 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 50 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c0'; endpoint_id50 | token_id | host_id | port_id | READY endpoint_id50 | token_id | host_id | port_id | READY @@ -352,7 +352,7 @@ DECLARE SET --- c4 is declared and executed by u1 1: DECLARE c4 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 5 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c4'; endpoint_id5 | token_id | host_id | port_id | READY endpoint_id5 | token_id | host_id | port_id | READY @@ -430,7 +430,7 @@ ERROR: canceling statement due to user request 1: CLOSE c4; ERROR: current transaction is aborted, commands ignored until end of transaction block 1: END; -END +ROLLBACK 0Rq: ... 1Rq: ... 2Rq: ... diff --git a/src/test/isolation2/output/parallel_retrieve_cursor/replicated_table.source b/src/test/isolation2/output/parallel_retrieve_cursor/replicated_table.source index 75aa878a48a..eaa724d73d7 100644 --- a/src/test/isolation2/output/parallel_retrieve_cursor/replicated_table.source +++ b/src/test/isolation2/output/parallel_retrieve_cursor/replicated_table.source @@ -1,11 +1,11 @@ -- @Description Tests the PARALLEL RETRIEVE CURSOR for select replcated table -- DROP TABLE IF EXISTS rt1; -DROP +DROP TABLE CREATE TABLE rt1 (a INT) DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE insert into rt1 select generate_series(1,100); -INSERT 100 +INSERT 0 100 --------- Test1: Basic test for PARALLEL RETRIEVE CURSOR on replicated table @@ -16,7 +16,7 @@ INSERT 100 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM rt1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: SELECT sc.content = current_setting('gp_session_id')::int % 3 AS diff FROM gp_get_endpoints() ep, gp_segment_configuration sc WHERE ep.gp_segment_id = sc.content; diff ------ @@ -31,27 +31,27 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM rt1 ORDER BY a; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 2: BEGIN; BEGIN 2: DECLARE c2 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM rt1 ORDER BY a; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 3: BEGIN; BEGIN 3: DECLARE c3 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM rt1 ORDER BY a; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 4: BEGIN; BEGIN 4: DECLARE c4 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM rt1 ORDER BY a; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 5: BEGIN; BEGIN 5: DECLARE c5 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM rt1 ORDER BY a; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 6: BEGIN; BEGIN 6: DECLARE c6 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM rt1 ORDER BY a; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR -- Here because replicated table will execute on seg id: session_id % segment_number -- Just declare & CHECK PARALLEL RETRIEVE CURSORs in all segment_number (i.e. 3) sessions, @@ -161,27 +161,27 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM rt1 WHERE MOD(a,3)=1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 2: BEGIN; BEGIN 2: DECLARE c2 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM rt1 WHERE MOD(a,3)=1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 3: BEGIN; BEGIN 3: DECLARE c3 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM rt1 WHERE MOD(a,3)=1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 4: BEGIN; BEGIN 4: DECLARE c4 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM rt1 WHERE MOD(a,3)=1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 5: BEGIN; BEGIN 5: DECLARE c5 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM rt1 WHERE MOD(a,3)=1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 6: BEGIN; BEGIN 6: DECLARE c6 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM rt1 WHERE MOD(a,3)=1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR -- Here because replicated table will execute on seg id: session_id % segment_number -- Just declare & CHECK PARALLEL RETRIEVE CURSORs in all segment_number (i.e. 3) sessions, @@ -291,27 +291,27 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM rt1 WHERE MOD(a,3)=1 OR MOD(a,3)=2; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 2: BEGIN; BEGIN 2: DECLARE c2 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM rt1 WHERE MOD(a,3)=1 OR MOD(a,3)=2; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 3: BEGIN; BEGIN 3: DECLARE c3 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM rt1 WHERE MOD(a,3)=1 OR MOD(a,3)=2; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 4: BEGIN; BEGIN 4: DECLARE c4 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM rt1 WHERE MOD(a,3)=1 OR MOD(a,3)=2; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 5: BEGIN; BEGIN 5: DECLARE c5 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM rt1 WHERE MOD(a,3)=1 OR MOD(a,3)=2; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 6: BEGIN; BEGIN 6: DECLARE c6 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM rt1 WHERE MOD(a,3)=1 OR MOD(a,3)=2; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR -- Here because replicated table will execute on seg id: session_id % segment_number -- Just declare & CHECK PARALLEL RETRIEVE CURSORs in all segment_number (i.e. 3) sessions, diff --git a/src/test/isolation2/output/parallel_retrieve_cursor/retrieve_quit_check.source b/src/test/isolation2/output/parallel_retrieve_cursor/retrieve_quit_check.source index ccb3dbe61f8..a941fe8a9d2 100644 --- a/src/test/isolation2/output/parallel_retrieve_cursor/retrieve_quit_check.source +++ b/src/test/isolation2/output/parallel_retrieve_cursor/retrieve_quit_check.source @@ -2,21 +2,21 @@ -- Test quitting retrieve sessions interfering with multiple endpoints when calling -- gp_wait_parallel_retrieve_cursor. DROP TABLE IF EXISTS t1; -DROP +DROP TABLE CREATE TABLE t1 (a INT) DISTRIBUTED by (a); -CREATE +CREATE TABLE insert into t1 select generate_series(1,100); -INSERT 100 +INSERT 0 100 --------- Test1: test for quit retrieve will cancel all unfinished QE backend 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c2 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c3 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 1 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id1 | token_id | host_id | port_id | READY endpoint_id1 | token_id | host_id | port_id | READY @@ -191,7 +191,7 @@ ERROR: current transaction is aborted, commands ignored until end of transactio (0 rows) 1: END; -END +ROLLBACK 2: SELECT cursorname, state FROM gp_get_endpoints(); cursorname | state diff --git a/src/test/isolation2/output/parallel_retrieve_cursor/retrieve_quit_wait.source b/src/test/isolation2/output/parallel_retrieve_cursor/retrieve_quit_wait.source index 843a2e50834..80ecb233932 100644 --- a/src/test/isolation2/output/parallel_retrieve_cursor/retrieve_quit_wait.source +++ b/src/test/isolation2/output/parallel_retrieve_cursor/retrieve_quit_wait.source @@ -2,21 +2,21 @@ -- Test quitting retrieve sessions interfering with multiple endpoints when calling -- gp_wait_parallel_retrieve_cursor. DROP TABLE IF EXISTS t1; -DROP +DROP TABLE CREATE TABLE t1 (a INT) DISTRIBUTED by (a); -CREATE +CREATE TABLE insert into t1 select generate_series(1,100); -INSERT 100 +INSERT 0 100 --------- Test1: test for wait for a finished endpoint 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c2 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c3 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 1 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id1 | token_id | host_id | port_id | READY endpoint_id1 | token_id | host_id | port_id | READY @@ -268,11 +268,11 @@ ERROR: canceling MPP operation: "Endpoint retrieve session is quitting. All unf 1: BEGIN; BEGIN 1: DECLARE c4 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c5 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: DECLARE c6 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 4 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c4'; endpoint_id4 | token_id | host_id | port_id | READY endpoint_id4 | token_id | host_id | port_id | READY diff --git a/src/test/isolation2/output/parallel_retrieve_cursor/security.source b/src/test/isolation2/output/parallel_retrieve_cursor/security.source index 24c492f6732..ba98a51f3c1 100644 --- a/src/test/isolation2/output/parallel_retrieve_cursor/security.source +++ b/src/test/isolation2/output/parallel_retrieve_cursor/security.source @@ -1,14 +1,14 @@ -- @Description Tests retrieve session functionality restriction for security. -- DROP TABLE IF EXISTS t1; -DROP +DROP TABLE CREATE TABLE t1 (a INT) DISTRIBUTED by (a); -CREATE +CREATE TABLE insert into t1 select generate_series(1,100); -INSERT 100 +INSERT 0 100 CREATE OR REPLACE FUNCTION myappend(anyarray, anyelement) RETURNS anyarray AS $$ SELECT $1 || $2 $$ LANGUAGE SQL; -CREATE +CREATE FUNCTION -- Test: Retrieve login without valid token. 1: @pre_run 'export RETRIEVE_TOKEN="123" ; echo $RAW_STR' : SELECT 1; @@ -25,7 +25,7 @@ CREATE 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 1 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id1 | token_id | host_id | port_id | READY endpoint_id1 | token_id | host_id | port_id | READY diff --git a/src/test/isolation2/output/parallel_retrieve_cursor/special_query.source b/src/test/isolation2/output/parallel_retrieve_cursor/special_query.source index 81201f3033d..20baa684a5f 100644 --- a/src/test/isolation2/output/parallel_retrieve_cursor/special_query.source +++ b/src/test/isolation2/output/parallel_retrieve_cursor/special_query.source @@ -3,12 +3,12 @@ --------- Test1: test for PARALLEL RETRIEVE CURSOR on select transient record types DROP TABLE IF EXISTS t1; -DROP +DROP TABLE CREATE TABLE t1 AS SELECT generate_series(1, 10) AS x DISTRIBUTED by (x); -CREATE 10 +SELECT 10 CREATE OR REPLACE FUNCTION make_record(n int) RETURNS RECORD LANGUAGE plpgsql AS ' BEGIN RETURN CASE n WHEN 1 THEN ROW(1) WHEN 2 THEN ROW(1, 2) WHEN 3 THEN ROW(1, 2, 3) WHEN 4 THEN ROW(1, 2, 3, 4) ELSE ROW(1, 2, 3, 4, 5) END; END; '; -CREATE +CREATE FUNCTION SELECT make_record(x) FROM t1; make_record @@ -28,7 +28,7 @@ SELECT make_record(x) FROM t1; 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT make_record(x) FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 1 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id1 | token_id | host_id | port_id | READY endpoint_id1 | token_id | host_id | port_id | READY @@ -93,13 +93,13 @@ DECLARE t (1 row) 1: CLOSE c1; -CLOSE +CLOSE CURSOR --------- Test1.1: test for PARALLEL RETRIEVE CURSOR on select transient record types and multi-retrieve 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT make_record(x) FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 1 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id1 | token_id | host_id | port_id | READY endpoint_id1 | token_id | host_id | port_id | READY @@ -194,12 +194,12 @@ DECLARE t (1 row) 1: CLOSE c1; -CLOSE +CLOSE CURSOR 1: CREATE OR REPLACE FUNCTION make_record2(n int) RETURNS RECORD LANGUAGE plpgsql AS ' BEGIN RETURN CASE n WHEN 1 THEN ROW(1,2,3) WHEN 2 THEN ROW(1,2,3,4) WHEN 3 THEN ROW(1,2,3,4,5) WHEN 4 THEN ROW(1,2,3,4,5,6) ELSE ROW(1,2,3,4,5,6,7) END; END; '; -CREATE +CREATE FUNCTION 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT make_record2(x) FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 1 1 2 3 4': SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id1 | token_id | host_id | port_id | READY endpoint_id1 | token_id | host_id | port_id | READY @@ -266,7 +266,7 @@ DECLARE t (1 row) 1: CLOSE c1; -CLOSE +CLOSE CURSOR --------- Test2: test for PARALLEL RETRIEVE CURSOR on select with join statement. -- there was a hang issue when declaring PARALLEL RETRIEVE CURSOR with join clause. @@ -295,12 +295,12 @@ CLOSE (exited with code 0) DROP TABLE IF EXISTS t2; -DROP +DROP TABLE CREATE TABLE t2 (a BIGINT) DISTRIBUTED by (a); -CREATE +CREATE TABLE insert into t2 select generate_series(1,2000); -INSERT 2000 +INSERT 0 2000 SET gp_interconnect_snd_queue_depth=1; SET @@ -316,7 +316,7 @@ BEGIN 512 (1 row) 2: DECLARE c2 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t2 join t2 t12 on true; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 2: SELECT state FROM gp_get_endpoints() WHERE cursorname='c2'; state ------- @@ -325,13 +325,13 @@ DECLARE READY (3 rows) 2: CLOSE c2; -CLOSE +CLOSE CURSOR 2: END; -END +COMMIT -- cleanup DROP TABLE t2; -DROP +DROP TABLE !\retcode gpconfig -r gp_max_packet_size; -- start_ignore 20210729:16:23:43:000489 gpconfig:lhlgpdb7:gpadmin-[INFO]:-completed successfully with parameters '-r gp_max_packet_size' diff --git a/src/test/isolation2/output/parallel_retrieve_cursor/status_check.source b/src/test/isolation2/output/parallel_retrieve_cursor/status_check.source index f05bed21e10..caea003c190 100644 --- a/src/test/isolation2/output/parallel_retrieve_cursor/status_check.source +++ b/src/test/isolation2/output/parallel_retrieve_cursor/status_check.source @@ -2,17 +2,17 @@ -- need to fault injection to gp_wait_parallel_retrieve_cursor() -- DROP TABLE IF EXISTS t1; -DROP +DROP TABLE CREATE TABLE t1 (a INT) DISTRIBUTED by (a); -CREATE +CREATE TABLE insert into t1 select generate_series(1,100); -INSERT 100 +INSERT 0 100 --------- Test1: Basic test for parallel retrieve interface & close cursor 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 1 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id1 | token_id | host_id | port_id | READY endpoint_id1 | token_id | host_id | port_id | READY @@ -165,7 +165,7 @@ DECLARE t (1 row) 1: CLOSE c1; -CLOSE +CLOSE CURSOR -- check no endpoint info 1: SELECT auth_token,state FROM gp_get_endpoints() WHERE cursorname='c1'; auth_token | state @@ -199,7 +199,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c2 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c2'; endpoint_id2 | token_id | host_id | port_id | READY endpoint_id2 | token_id | host_id | port_id | READY @@ -467,7 +467,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c3 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 3 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c3'; endpoint_id3 | token_id | host_id | port_id | READY endpoint_id3 | token_id | host_id | port_id | READY @@ -618,7 +618,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c4 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 4 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c4'; endpoint_id4 | token_id | host_id | port_id | READY endpoint_id4 | token_id | host_id | port_id | READY @@ -768,7 +768,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c5 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 5 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c5'; endpoint_id5 | token_id | host_id | port_id | READY endpoint_id5 | token_id | host_id | port_id | READY @@ -929,7 +929,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c6 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 6 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c6'; endpoint_id6 | token_id | host_id | port_id | READY endpoint_id6 | token_id | host_id | port_id | READY @@ -1071,7 +1071,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c61 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 61 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c61'; endpoint_id61 | token_id | host_id | port_id | READY endpoint_id61 | token_id | host_id | port_id | READY @@ -1211,7 +1211,7 @@ ERROR: canceling statement due to user request 1: BEGIN; BEGIN 1: DECLARE c7 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 7 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c7'; endpoint_id7 | token_id | host_id | port_id | READY endpoint_id7 | token_id | host_id | port_id | READY @@ -1353,7 +1353,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c8 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'get_tuple_cell SESSION81 1 1 ; create_match_sub_with_spaces $SESSION81 session81' : SELECT sessionid,state FROM gp_get_session_endpoints() WHERE cursorname='c8'; sessionid | state -----------+------- @@ -1365,7 +1365,7 @@ DECLARE 2: BEGIN; BEGIN 2: DECLARE c8 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 2: @post_run 'get_tuple_cell SESSION82 1 1 ; create_match_sub_with_spaces $SESSION82 session82' : SELECT sessionid,state FROM gp_get_session_endpoints() WHERE cursorname='c8'; sessionid | state -----------+------- @@ -1386,13 +1386,13 @@ DECLARE (6 rows) 1: CLOSE c8; -CLOSE +CLOSE CURSOR 1: END; -END +COMMIT 2: CLOSE c8; -CLOSE +CLOSE CURSOR 2: END; -END +COMMIT ---------- Test9: Test parallel retrieve cursor auto-check 1: drop table if exists t1; diff --git a/src/test/isolation2/output/parallel_retrieve_cursor/status_wait.source b/src/test/isolation2/output/parallel_retrieve_cursor/status_wait.source index cdb6488879a..cf1721719be 100644 --- a/src/test/isolation2/output/parallel_retrieve_cursor/status_wait.source +++ b/src/test/isolation2/output/parallel_retrieve_cursor/status_wait.source @@ -1,17 +1,17 @@ -- @Description Tests the state for pg_endpoints AND gp_get_segment_endpoints(), focus in wait mode -- DROP TABLE IF EXISTS t1; -DROP +DROP TABLE CREATE TABLE t1 (a INT) DISTRIBUTED by (a); -CREATE +CREATE TABLE insert into t1 select generate_series(1,100); -INSERT 100 +INSERT 0 100 --------- Test1: Basic test for parallel retrieve interface & close cursor 1: BEGIN; BEGIN 1: DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 1 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c1'; endpoint_id1 | token_id | host_id | port_id | READY endpoint_id1 | token_id | host_id | port_id | READY @@ -160,7 +160,7 @@ DECLARE t (1 row) 1: CLOSE c1; -CLOSE +CLOSE CURSOR -- check no endpoint info 1: SELECT auth_token,state FROM gp_get_endpoints() WHERE cursorname='c1'; auth_token | state @@ -194,7 +194,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c2 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 2 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c2'; endpoint_id2 | token_id | host_id | port_id | READY endpoint_id2 | token_id | host_id | port_id | READY @@ -458,7 +458,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c3 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 3 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c3'; endpoint_id3 | token_id | host_id | port_id | READY endpoint_id3 | token_id | host_id | port_id | READY @@ -594,7 +594,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c4 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 4 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c4'; endpoint_id4 | token_id | host_id | port_id | READY endpoint_id4 | token_id | host_id | port_id | READY @@ -731,7 +731,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c5 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 5 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c5'; endpoint_id5 | token_id | host_id | port_id | READY endpoint_id5 | token_id | host_id | port_id | READY @@ -866,7 +866,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c6 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 6 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c6'; endpoint_id6 | token_id | host_id | port_id | READY endpoint_id6 | token_id | host_id | port_id | READY @@ -996,7 +996,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c61 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 61 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c61'; endpoint_id61 | token_id | host_id | port_id | READY endpoint_id61 | token_id | host_id | port_id | READY @@ -1125,7 +1125,7 @@ ERROR: canceling statement due to user request 1: BEGIN; BEGIN 1: DECLARE c7 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 7 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c7'; endpoint_id7 | token_id | host_id | port_id | READY endpoint_id7 | token_id | host_id | port_id | READY @@ -1267,7 +1267,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c8 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'get_tuple_cell SESSION81 1 1 ; create_match_sub_with_spaces $SESSION81 session81': SELECT sessionid,state FROM gp_get_session_endpoints() WHERE cursorname='c8'; sessionid | state -----------+------- @@ -1279,7 +1279,7 @@ DECLARE 2: BEGIN; BEGIN 2: DECLARE c8 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 2: @post_run 'get_tuple_cell SESSION82 1 1 ; create_match_sub_with_spaces $SESSION82 session82': SELECT sessionid,state FROM gp_get_session_endpoints() WHERE cursorname='c8'; sessionid | state -----------+------- @@ -1300,19 +1300,19 @@ DECLARE (6 rows) 1: CLOSE c8; -CLOSE +CLOSE CURSOR 1: END; -END +COMMIT 2: CLOSE c8; -CLOSE +CLOSE CURSOR 2: END; -END +COMMIT ---------- Test9: Cancel (using pg_cancel_backend(pid)) the process of 'CHECK PARALLEL RETRIEVE CURSOR' 1: BEGIN; BEGIN 1: DECLARE c9 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 9 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c9'; endpoint_id9 | token_id | host_id | port_id | READY endpoint_id9 | token_id | host_id | port_id | READY @@ -1370,7 +1370,7 @@ ROLLBACK 1: BEGIN; BEGIN 1: DECLARE c10 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 10 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c10'; endpoint_id10 | token_id | host_id | port_id | READY endpoint_id10 | token_id | host_id | port_id | READY @@ -1456,16 +1456,16 @@ ROLLBACK --------- Test11: Test t1 has large amount of tuples, only retreive small number of tuples, we can still close cursor. 1:DROP TABLE IF EXISTS t2; -DROP +DROP TABLE 1:CREATE TABLE t2 (id integer, data text) DISTRIBUTED by (id); -CREATE +CREATE TABLE 1:INSERT INTO t2 select id, 'test ' || id from generate_series(1,100000) id; -INSERT 100000 +INSERT 0 100000 1: BEGIN; BEGIN 1: DECLARE c11 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t2; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR 1: @post_run 'parse_endpoint_info 11 1 2 3 4' : SELECT endpointname,auth_token,hostname,port,state FROM gp_get_endpoints() WHERE cursorname='c11'; endpoint_id11 | token_id | host_id | port_id | READY endpoint_id11 | token_id | host_id | port_id | READY @@ -1500,7 +1500,7 @@ DECLARE (5 rows) 1: CLOSE c11; -CLOSE +CLOSE CURSOR 1: ROLLBACK; ROLLBACK diff --git a/src/test/isolation2/output/parallel_retrieve_cursor/syntax.source b/src/test/isolation2/output/parallel_retrieve_cursor/syntax.source index 40d92067636..36404920fb1 100644 --- a/src/test/isolation2/output/parallel_retrieve_cursor/syntax.source +++ b/src/test/isolation2/output/parallel_retrieve_cursor/syntax.source @@ -1,17 +1,17 @@ -- @Description Tests syntax for the PARALLEL RETRIEVE CURSOR statement -- DROP TABLE IF EXISTS t1; -DROP +DROP TABLE CREATE TABLE t1 (a INT) DISTRIBUTED by (a); -CREATE +CREATE TABLE insert into t1 select generate_series(1,100); -INSERT 100 +INSERT 0 100 -- Test1: PARALLEL RETRIEVE CURSOR with other options (WITH HOLD/SCROLL) is not supported BEGIN; BEGIN DECLARE c1 PARALLEL RETRIEVE CURSOR WITHOUT HOLD FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR DECLARE c2 PARALLEL RETRIEVE CURSOR WITH HOLD FOR SELECT * FROM t1; ERROR: DECLARE PARALLEL RETRIEVE CURSOR WITH HOLD ... is not supported DETAIL: Holdable cursors can not be parallel @@ -21,7 +21,7 @@ ROLLBACK BEGIN; BEGIN DECLARE c1 NO SCROLL PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR DECLARE c2 SCROLL PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; ERROR: SCROLL is not allowed for the PARALLEL RETRIEVE CURSORs DETAIL: Scrollable cursors can not be parallel @@ -32,7 +32,7 @@ ROLLBACK BEGIN; BEGIN DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR FETCH ALL FROM c1; ERROR: cannot specify 'FETCH' for PARALLEL RETRIEVE CURSOR HINT: Use 'RETRIEVE' statement on endpoint instead. @@ -42,7 +42,7 @@ ROLLBACK BEGIN; BEGIN DECLARE c1 PARALLEL RETRIEVE CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR MOVE 10 FROM c1; ERROR: the 'MOVE' statement for PARALLEL RETRIEVE CURSOR is not supported ROLLBACK; @@ -52,7 +52,7 @@ ROLLBACK BEGIN; BEGIN DECLARE c1 CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE CURSOR SELECT * FROM gp_wait_parallel_retrieve_cursor('c1', 0); ERROR: cursor is not a PARALLEL RETRIEVE CURSOR ROLLBACK; @@ -60,7 +60,7 @@ ROLLBACK BEGIN; BEGIN DECLARE c1 CURSOR FOR SELECT * FROM t1; -DECLARE +DECLARE CURSOR SELECT * FROM gp_wait_parallel_retrieve_cursor('c1', -1); ERROR: cursor is not a PARALLEL RETRIEVE CURSOR ROLLBACK; @@ -70,7 +70,7 @@ ROLLBACK BEGIN; BEGIN DECLARE c1 CURSOR WITHOUT HOLD FOR SELECT * FROM t1; -DECLARE +DECLARE CURSOR DO $$ DECLARE i int4; c2 refcursor = 'c1'; BEGIN FETCH FROM c2 INTO i; RAISE NOTICE '%', i; END; $$; DO ROLLBACK; @@ -78,7 +78,7 @@ ROLLBACK BEGIN; BEGIN DECLARE c3 PARALLEL RETRIEVE CURSOR WITHOUT HOLD FOR SELECT * FROM t1; -DECLARE +DECLARE PARALLEL RETRIEVE CURSOR DO $$ DECLARE i int4; c4 refcursor = 'c3'; BEGIN FETCH FROM c4 INTO i; RAISE NOTICE '%', i; END; $$; ERROR: The PARALLEL RETRIEVE CURSOR is not supported in SPI. HINT: Use normal cursor statement instead. diff --git a/src/test/isolation2/output/pg_basebackup_large_database_oid.source b/src/test/isolation2/output/pg_basebackup_large_database_oid.source index 141d648b79d..6f356829cdd 100644 --- a/src/test/isolation2/output/pg_basebackup_large_database_oid.source +++ b/src/test/isolation2/output/pg_basebackup_large_database_oid.source @@ -6,7 +6,7 @@ select gp_inject_fault('bump_oid', 'skip', dbid) from gp_segment_configuration w (1 row) create database db_large_oid; -CREATE +CREATE DATABASE select gp_inject_fault('bump_oid', 'reset', dbid) from gp_segment_configuration where role = 'p' and content = -1; gp_inject_fault @@ -28,7 +28,7 @@ select pg_basebackup(address, dbid, port, true, 'some_replication_slot', '@testt drop database db_large_oid; -DROP +DROP DATABASE 0U: select * from pg_drop_replication_slot('some_replication_slot'); pg_drop_replication_slot diff --git a/src/test/isolation2/output/pg_basebackup_with_tablespaces.source b/src/test/isolation2/output/pg_basebackup_with_tablespaces.source index fc8ceda2d41..626880348a4 100644 --- a/src/test/isolation2/output/pg_basebackup_with_tablespaces.source +++ b/src/test/isolation2/output/pg_basebackup_with_tablespaces.source @@ -6,31 +6,31 @@ (exited with code 0) drop tablespace if exists some_basebackup_tablespace; -DROP +DROP TABLESPACE create tablespace some_basebackup_tablespace location '@testtablespace@/some_basebackup_tablespace'; -CREATE +CREATE TABLESPACE -- And a database using the tablespace drop database if exists some_database_with_tablespace; -DROP +DROP DATABASE create database some_database_with_tablespace tablespace some_basebackup_tablespace; -CREATE +CREATE DATABASE -- And a database without using the tablespace drop database if exists some_database_without_tablespace; -DROP +DROP DATABASE create database some_database_without_tablespace; -CREATE +CREATE DATABASE -- And a table and index, temp table and index using the tablespace 1:@db_name some_database_without_tablespace: CREATE TABLE test(a INT, b INT) TABLESPACE some_basebackup_tablespace; -CREATE +CREATE TABLE 1:@db_name some_database_without_tablespace: CREATE INDEX test_index on test(a) TABLESPACE some_basebackup_tablespace; -CREATE +CREATE INDEX 2:@db_name some_database_without_tablespace: CREATE TEMP TABLE test_tmp(a INT, b INT) TABLESPACE some_basebackup_tablespace; -CREATE +CREATE TABLE 2:@db_name some_database_without_tablespace: CREATE INDEX test_tmp_index on test_tmp(a) TABLESPACE some_basebackup_tablespace; -CREATE +CREATE INDEX 1q: ... @@ -103,7 +103,7 @@ select validate_tablespace_symlink('@testtablespace@/some_basebackup_datadir', ' (1 row) drop database some_database_with_tablespace; -DROP +DROP DATABASE -- Need to poll until db drop succeeds since after 2q: happens the PGPROC -- on the server side might be still not released when we run database drop. -- This makes the test flaky. @@ -113,7 +113,7 @@ DROP -- end_ignore (exited with code 0) drop tablespace some_basebackup_tablespace; -DROP +DROP TABLESPACE !\retcode rm -rf @testtablespace@/some_basebackup_datadir/; -- start_ignore @@ -143,23 +143,23 @@ DROP (exited with code 0) create tablespace some_basebackup_tablespace LOCATION '@testtablespace@/some_basebackup_tablespace' WITH (content0='@testtablespace@/some_basebackup_tablespace_c0'); -CREATE +CREATE TABLESPACE -- And a database without using the tablespace drop database if exists some_database_without_tablespace; -DROP +DROP DATABASE create database some_database_without_tablespace; -CREATE +CREATE DATABASE -- And a table and index, temp table and index using the tablespace 1:@db_name some_database_without_tablespace: CREATE TABLE test(a INT, b INT) TABLESPACE some_basebackup_tablespace; -CREATE +CREATE TABLE 1:@db_name some_database_without_tablespace: CREATE INDEX test_index on test(a) TABLESPACE some_basebackup_tablespace; -CREATE +CREATE INDEX 2:@db_name some_database_without_tablespace: CREATE TEMP TABLE test_tmp(a INT, b INT) TABLESPACE some_basebackup_tablespace; -CREATE +CREATE TABLE 2:@db_name some_database_without_tablespace: CREATE INDEX test_tmp_index on test_tmp(a) TABLESPACE some_basebackup_tablespace; -CREATE +CREATE INDEX 1q: ... @@ -208,7 +208,7 @@ select validate_tablespace_symlink('@testtablespace@/some_basebackup_datadir', ' -- end_ignore (exited with code 0) drop tablespace some_basebackup_tablespace; -DROP +DROP TABLESPACE !\retcode rm -rf @testtablespace@/some_basebackup_datadir/; -- start_ignore diff --git a/src/test/isolation2/output/uao/alter_while_vacuum.source b/src/test/isolation2/output/uao/alter_while_vacuum.source index 3c3d0d06391..de230532c05 100644 --- a/src/test/isolation2/output/uao/alter_while_vacuum.source +++ b/src/test/isolation2/output/uao/alter_while_vacuum.source @@ -1,9 +1,9 @@ -- @Description Ensures that an alter table while a vacuum operation is ok -- CREATE TABLE alter_while_vacuum_@amname@ (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO alter_while_vacuum_@amname@ SELECT i as a, i as b FROM generate_series(1, 100000) AS i; -INSERT 100000 +INSERT 0 100000 DELETE FROM alter_while_vacuum_@amname@ WHERE a < 12000; DELETE 11999 @@ -16,7 +16,7 @@ DELETE 11999 2: VACUUM alter_while_vacuum_@amname@; VACUUM 1<: <... completed> -ALTER +ALTER TABLE 1: SELECT * FROM alter_while_vacuum_@amname@ WHERE a < 12010; a | b | d -------+-------+---- diff --git a/src/test/isolation2/output/uao/alter_while_vacuum2.source b/src/test/isolation2/output/uao/alter_while_vacuum2.source index 94b4c86cddd..f3cf87d3137 100644 --- a/src/test/isolation2/output/uao/alter_while_vacuum2.source +++ b/src/test/isolation2/output/uao/alter_while_vacuum2.source @@ -1,19 +1,19 @@ -- @Description Ensures that an alter table while a vacuum operation is ok -- CREATE TABLE alter_while_vacuum2_@amname@ (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO alter_while_vacuum2_@amname@ SELECT i as a, i as b FROM generate_series(1, 100000) AS i; -INSERT 100000 +INSERT 0 100000 INSERT INTO alter_while_vacuum2_@amname@ SELECT i as a, i as b FROM generate_series(1, 100000) AS i; -INSERT 100000 +INSERT 0 100000 INSERT INTO alter_while_vacuum2_@amname@ SELECT i as a, i as b FROM generate_series(1, 100000) AS i; -INSERT 100000 +INSERT 0 100000 INSERT INTO alter_while_vacuum2_@amname@ SELECT i as a, i as b FROM generate_series(1, 100000) AS i; -INSERT 100000 +INSERT 0 100000 INSERT INTO alter_while_vacuum2_@amname@ SELECT i as a, i as b FROM generate_series(1, 100000) AS i; -INSERT 100000 +INSERT 0 100000 INSERT INTO alter_while_vacuum2_@amname@ SELECT i as a, i as b FROM generate_series(1, 100000) AS i; -INSERT 100000 +INSERT 0 100000 DELETE FROM alter_while_vacuum2_@amname@ WHERE a < 12000; DELETE 71994 @@ -26,7 +26,7 @@ DELETE 71994 SET 2>: VACUUM alter_while_vacuum2_@amname@; 1: Alter table alter_while_vacuum2_@amname@ set with ( reorganize='true') distributed randomly; -ALTER +ALTER TABLE 2<: <... completed> VACUUM 1: SELECT COUNT(*) FROM alter_while_vacuum2_@amname@ WHERE a < 12010; diff --git a/src/test/isolation2/output/uao/ao_unique_index_vacuum.source b/src/test/isolation2/output/uao/ao_unique_index_vacuum.source index 74db357c6ab..1591a295b90 100644 --- a/src/test/isolation2/output/uao/ao_unique_index_vacuum.source +++ b/src/test/isolation2/output/uao/ao_unique_index_vacuum.source @@ -3,9 +3,9 @@ -- Case 1: Basic case with a few deleted tuples--------------------------------- CREATE TABLE unique_index_vacuum_@amname@(i int UNIQUE) USING @amname@ DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE INSERT INTO unique_index_vacuum_@amname@ SELECT generate_series(1, 5); -INSERT 5 +INSERT 0 5 DELETE FROM unique_index_vacuum_@amname@ WHERE i = 5; DELETE 1 -- should succeed (and not raise conflicts for rows [1,4] while moving rows [1,4]) @@ -18,15 +18,15 @@ SELECT (gp_toolkit.__gp_aoblkdir('unique_index_vacuum_@amname@')).* FROM gp_dist (0,3) | 2 | 0 | 0 | 1 | 0 | 4 (1 row) DROP TABLE unique_index_vacuum_@amname@; -DROP +DROP TABLE -- Case 2: Concurrent case showcasing that a placeholder block directory row is -- not necessary to be inserted for the rows transferred to a new segment by -- a VACUUM operation. CREATE TABLE unique_index_vacuum_@amname@(i int UNIQUE) USING @amname@ DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE INSERT INTO unique_index_vacuum_@amname@ SELECT generate_series(1, 5); -INSERT 5 +INSERT 0 5 DELETE FROM unique_index_vacuum_@amname@ WHERE i = 5; DELETE 1 @@ -72,14 +72,14 @@ SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_confi 1<: <... completed> VACUUM DROP TABLE unique_index_vacuum_@amname@; -DROP +DROP TABLE -- Case 3: Validate the occurrence of vacuum index. -- just after it has bulk deleted the old index entries. CREATE TABLE unique_index_vacuum_@amname@(i int UNIQUE) USING @amname@ DISTRIBUTED REPLICATED; -CREATE +CREATE TABLE INSERT INTO unique_index_vacuum_@amname@ SELECT generate_series(1, 5); -INSERT 5 +INSERT 0 5 DELETE FROM unique_index_vacuum_@amname@ WHERE i = 5; DELETE 1 @@ -113,4 +113,4 @@ SELECT gp_inject_fault_infinite('vacuum_ao_after_index_delete', 'reset', dbid) F (3 rows) DROP TABLE unique_index_vacuum_@amname@; -DROP +DROP TABLE diff --git a/src/test/isolation2/output/uao/bitmapindex_rescan.source b/src/test/isolation2/output/uao/bitmapindex_rescan.source index a55a3b625bd..00c8cba3664 100644 --- a/src/test/isolation2/output/uao/bitmapindex_rescan.source +++ b/src/test/isolation2/output/uao/bitmapindex_rescan.source @@ -3,27 +3,27 @@ drop table if exists bir; drop table if exists yolo cascade; -- end_ignore create table bir (a int, b int) distributed by (a); -CREATE +CREATE TABLE insert into bir select i, i from generate_series(1, 5) i; -INSERT 5 +INSERT 0 5 create table yolo (a int, b int) USING @amname@ distributed by (a); -CREATE +CREATE TABLE create index yolo_idx on yolo using btree (a); -CREATE +CREATE INDEX 1: begin; BEGIN 2: begin; BEGIN 1: insert into yolo select i, i from generate_series(1, 10000) i; -INSERT 10000 +INSERT 0 10000 2: insert into yolo select i, i from generate_series(1, 2) i; -INSERT 2 +INSERT 0 2 1: commit; COMMIT 2: abort; -ABORT +ROLLBACK analyze yolo; ANALYZE diff --git a/src/test/isolation2/output/uao/brin.source b/src/test/isolation2/output/uao/brin.source index 5fae106e460..c955be65d26 100644 --- a/src/test/isolation2/output/uao/brin.source +++ b/src/test/isolation2/output/uao/brin.source @@ -2,6 +2,8 @@ -- White-box tests are necessary to ensure that summarization is done -- successfully (to avoid cases where ranges have brin data tuples without -- values or where the range is not covered by the revmap etc) +CREATE EXTENSION pageinspect; +CREATE EXTENSION -- Turn off sequential scans to force usage of BRIN indexes for scans. SET enable_seqscan TO off; @@ -13,9 +15,9 @@ SET -- Create an index on an empty table CREATE TABLE brin_ao_summarize_@amname@(i int) USING @amname@; -CREATE +CREATE TABLE CREATE INDEX ON brin_ao_summarize_@amname@ USING brin(i) WITH (pages_per_range=1); -CREATE +CREATE INDEX -- Sanity: There are no revmap/data pages as there is no data 1U: SELECT blkno, brin_page_type(get_raw_page('brin_ao_summarize_@amname@_i_idx', blkno)) FROM generate_series(0, nblocks('brin_ao_summarize_@amname@_i_idx') - 1) blkno; @@ -40,7 +42,7 @@ SELECT brin_summarize_new_values('brin_ao_summarize_@amname@_i_idx'); -- Drop the index DROP INDEX brin_ao_summarize_@amname@_i_idx; -DROP +DROP INDEX -- Create 3 blocks all on 1 QE, in 1 aoseg: 2 blocks full, 1 block with 1 tuple. SELECT populate_pages('brin_ao_summarize_@amname@', 1, tid '(33554434, 0)'); @@ -51,7 +53,7 @@ SELECT populate_pages('brin_ao_summarize_@amname@', 1, tid '(33554434, 0)'); -- Now re-create the index on the data inserted above. CREATE INDEX ON brin_ao_summarize_@amname@ USING brin(i) WITH (pages_per_range=1); -CREATE +CREATE INDEX -- Sanity: there should be 1 revmap page and 1 data page covering the 3 blocks. 1U: SELECT blkno, brin_page_type(get_raw_page('brin_ao_summarize_@amname@_i_idx', blkno)) FROM generate_series(0, nblocks('brin_ao_summarize_@amname@_i_idx') - 1) blkno; @@ -335,7 +337,7 @@ VACUUM -- A new INSERT would always map to the last range on the old segment and that -- range will be updated to hold the new value, as part of INSERT. INSERT INTO brin_ao_summarize_@amname@ VALUES(40); -INSERT 1 +INSERT 0 1 -- All the live tuples will have been moved to a single new logical heap block -- in seg2 (67108864). The 1 tuple INSERTed after the VACUUM should have gone to @@ -464,9 +466,9 @@ SELECT brin_summarize_new_values('brin_ao_summarize_@amname@_i_idx'); -- Specific range summarization/desummarization -------------------------------------------------------------------------------- CREATE TABLE brin_ao_specific_@amname@(i int) USING @amname@; -CREATE +CREATE TABLE CREATE INDEX ON brin_ao_specific_@amname@ USING brin(i) WITH (pages_per_range=1); -CREATE +CREATE INDEX 1: BEGIN; BEGIN @@ -675,9 +677,9 @@ SELECT brin_desummarize_range('brin_ao_specific_@amname@_i_idx', 100663296); -------------------------------------------------------------------------------- CREATE TABLE brin_ao_summarize_partial_@amname@(i int) USING @amname@; -CREATE +CREATE TABLE CREATE INDEX ON brin_ao_summarize_partial_@amname@ USING brin(i) WITH (pages_per_range=3); -CREATE +CREATE INDEX -- Insert 4 blocks of data on 1 QE, in 1 aoseg; 3 blocks full, 1 block with 1 tuple. -- The 1st range [33554432, 33554434] is full and the last range [33554435, 33554437] @@ -758,9 +760,9 @@ SELECT brin_summarize_new_values('brin_ao_summarize_partial_@amname@_i_idx'); -- concurrent inserts to it, while summarization was in flight. CREATE TABLE brin_range_extended1_@amname@(i int) USING @amname@; -CREATE +CREATE TABLE CREATE INDEX ON brin_range_extended1_@amname@ USING brin(i) WITH (pages_per_range=5); -CREATE +CREATE INDEX -- Insert 3 blocks of data on 1 QE, in 1 aoseg; 2 blocks full, 1 block with 1 tuple. SELECT populate_pages('brin_range_extended1_@amname@', 1, tid '(33554434, 0)'); @@ -807,7 +809,7 @@ SELECT gp_wait_until_triggered_fault('summarize_last_partial_range', 1, dbid) FR -- Insert a different value into seg1 concurrently. INSERT INTO brin_range_extended1_@amname@ VALUES(20); -INSERT 1 +INSERT 0 1 -- Sanity: The earlier placeholder tuple has been modified by the INSERT to -- contain only the value 20. The tuple is still a placeholder. @@ -891,9 +893,9 @@ SELECT gp_inject_fault('brin_bitmap_page_added', 'reset', dbid) FROM gp_segment_ -- case it was extended by another transaction, while summarization was in flight. CREATE TABLE brin_range_extended2_@amname@(i int) USING @amname@; -CREATE +CREATE TABLE CREATE INDEX ON brin_range_extended2_@amname@ USING brin(i) WITH (pages_per_range=5); -CREATE +CREATE INDEX -- Insert 3 blocks of data on 1 QE, in 1 aoseg; 2 blocks full, 1 block with 1 tuple. SELECT populate_pages('brin_range_extended2_@amname@', 1, tid '(33554434, 0)'); @@ -1002,7 +1004,7 @@ SELECT gp_inject_fault('summarize_last_partial_range', 'reset', dbid) FROM gp_se -- Another insert into the same QE and the same range should not change the fact -- that the summary tuple is a placeholder tuple. INSERT INTO brin_range_extended2_@amname@ VALUES(30); -INSERT 1 +INSERT 0 1 1U: SELECT blkno, brin_page_type(get_raw_page('brin_range_extended2_@amname@_i_idx', blkno)) FROM generate_series(0, nblocks('brin_range_extended2_@amname@_i_idx') - 1) blkno; blkno | brin_page_type -------+---------------- @@ -1026,7 +1028,7 @@ INSERT 1 -------------------------------------------------------------------------------- CREATE TABLE brin_multi_@amname@(i int) USING @amname@; -CREATE +CREATE TABLE -- For each segment file (there are 2), populate 1 full block and 1 block with -- 1 tuple. @@ -1050,7 +1052,7 @@ COMMIT COMMIT CREATE INDEX ON brin_multi_@amname@ USING brin(i) WITH (pages_per_range=1); -CREATE +CREATE INDEX -- Sanity: All four blocks from the 2 seg files above should be summarized. 1U: SELECT blkno, brin_page_type(get_raw_page('brin_multi_@amname@_i_idx', blkno)) FROM generate_series(0, nblocks('brin_multi_@amname@_i_idx') - 1) blkno; @@ -1103,9 +1105,9 @@ SELECT gp_inject_fault('brin_bitmap_page_added', 'reset', dbid) FROM gp_segment_ -- Test build/summarize with aborted rows. -------------------------------------------------------------------------------- CREATE TABLE brin_abort_@amname@(i int) USING @amname@; -CREATE +CREATE TABLE CREATE INDEX ON brin_abort_@amname@ USING brin(i) WITH (pages_per_range=1); -CREATE +CREATE INDEX BEGIN; BEGIN -- Create 3 blocks all on 1 QE, in 1 aoseg: 2 blocks full, 1 block with 1 tuple. @@ -1115,7 +1117,7 @@ SELECT populate_pages('brin_abort_@amname@', 1, tid '(33554434, 0)'); (1 row) ABORT; -ABORT +ROLLBACK -- Sanity: There are no revmap or data pages created yet. 1U: SELECT blkno, brin_page_type(get_raw_page('brin_abort_@amname@_i_idx', blkno)) FROM generate_series(0, nblocks('brin_abort_@amname@_i_idx') - 1) blkno; @@ -1244,9 +1246,9 @@ SELECT gp_inject_fault('brin_bitmap_page_added', 'reset', dbid) FROM gp_segment_ -- Drop and re-create the index to test build. DROP INDEX brin_abort_@amname@_i_idx; -DROP +DROP INDEX CREATE INDEX ON brin_abort_@amname@ USING brin(i) WITH (pages_per_range=1); -CREATE +CREATE INDEX -- Sanity: There is 1 revmap page and 1 data page, containing 2 empty ranges -- and 2 ranges over the committed rows. @@ -1302,17 +1304,17 @@ SELECT gp_inject_fault('brin_bitmap_page_added', 'reset', dbid) FROM gp_segment_ -- Test build/summarize with whole revmap page containing aborted ranges. -------------------------------------------------------------------------------- CREATE TABLE brin_abort_fullpage_@amname@(i int) USING @amname@; -CREATE +CREATE TABLE CREATE INDEX ON brin_abort_fullpage_@amname@ USING brin(i) WITH (pages_per_range=1); -CREATE +CREATE INDEX -- Insert single row, so we have a gp_fastsequence entry to modify. BEGIN; BEGIN INSERT INTO brin_abort_fullpage_@amname@ VALUES(1); -INSERT 1 +INSERT 0 1 ABORT; -ABORT +ROLLBACK -- Simulate a revmap page full of aborted ranges by altering gp_fastsequence. -- This creates enough entries for 2 revmap pages (About 32768 integers fit in @@ -1395,9 +1397,9 @@ SELECT gp_inject_fault('brin_bitmap_page_added', 'reset', dbid) FROM gp_segment_ -- Drop and re-create the index to test build. DROP INDEX brin_abort_fullpage_@amname@_i_idx; -DROP +DROP INDEX CREATE INDEX ON brin_abort_fullpage_@amname@ USING brin(i) WITH (pages_per_range=1); -CREATE +CREATE INDEX -- Sanity: There are 2 revmap pages, and 3 data pages. The first 2 data pages -- contain nothing but empty ranges. The 3rd data page has a single non-empty @@ -1501,3 +1503,5 @@ SELECT brin_summarize_range('brin_abort_fullpage_@amname@_i_idx', 33559886); RESET enable_seqscan; RESET +DROP EXTENSION pageinspect; +DROP EXTENSION diff --git a/src/test/isolation2/output/uao/brin_chain.source b/src/test/isolation2/output/uao/brin_chain.source index 12012899a23..7ec54c96698 100644 --- a/src/test/isolation2/output/uao/brin_chain.source +++ b/src/test/isolation2/output/uao/brin_chain.source @@ -1,6 +1,9 @@ -- Tests for BRIN chaining for AO/CO tables -- These are in a separate file as they take longer and deal with more data. +CREATE EXTENSION pageinspect; +CREATE EXTENSION + -- All tests insert rows into content=1. -- We create an append-optimized table with the following characteristics: @@ -13,11 +16,11 @@ -- * seg3: 32768 aborted rows (1 logical heap block), 3000 committed rows -> 1 revmap page. CREATE TABLE brin_chain_@amname@(i character(1)) USING heap; -CREATE +CREATE TABLE INSERT INTO brin_chain_@amname@ SELECT '2' FROM generate_series(1, 1000); -INSERT 1000 +INSERT 0 1000 ALTER TABLE brin_chain_@amname@ SET ACCESS METHOD @amname@; -ALTER +ALTER TABLE 1: BEGIN; BEGIN @@ -46,26 +49,26 @@ COPY 180000000 -- end_ignore (exited with code 0) 2: INSERT INTO brin_chain_@amname@ SELECT '2' FROM generate_series(1, 2000); -INSERT 2000 +INSERT 0 2000 3: INSERT INTO brin_chain_@amname@ SELECT '2' FROM generate_series(1, 32768); -INSERT 32768 +INSERT 0 32768 3: ABORT; -ABORT +ROLLBACK 3: BEGIN; BEGIN 3: INSERT INTO brin_chain_@amname@ SELECT '2' FROM generate_series(1, 3000); -INSERT 3000 +INSERT 0 3000 1: COMMIT; COMMIT 2: ABORT; -ABORT +ROLLBACK 3: COMMIT; COMMIT -- Create the index. CREATE INDEX ON brin_chain_@amname@ USING brin(i) WITH (pages_per_range=1); -CREATE +CREATE INDEX -- Sanity: Inspect the revmap chain information (limit to first 5 segments) 1U: SELECT blkno, brin_page_type(get_raw_page('brin_chain_@amname@_i_idx', blkno)) FROM generate_series(0, nblocks('brin_chain_@amname@_i_idx') - 1) blkno; @@ -134,3 +137,6 @@ SELECT count(*) FROM brin_chain_@amname@ WHERE i > '1' and i < '3'; ----------- 180004000 (1 row) + +DROP EXTENSION pageinspect; +DROP EXTENSION diff --git a/src/test/isolation2/output/uao/compaction_full_stats.source b/src/test/isolation2/output/uao/compaction_full_stats.source index 9a2969468bd..ca022d61a3e 100644 --- a/src/test/isolation2/output/uao/compaction_full_stats.source +++ b/src/test/isolation2/output/uao/compaction_full_stats.source @@ -1,16 +1,16 @@ -- @Description Tests the behavior of full vacuum w.r.t. the pg_class statistics -- DROP TABLE IF EXISTS foo; -DROP +DROP TABLE CREATE TABLE foo (a INT, b INT, c CHAR(128)) USING @amname@ DISTRIBUTED BY (a); -CREATE +CREATE TABLE CREATE INDEX foo_index ON foo(b); -CREATE +CREATE INDEX INSERT INTO foo SELECT i as a, i as b, 'hello world' as c FROM generate_series(1, 50) AS i; -INSERT 50 +INSERT 0 50 INSERT INTO foo SELECT i as a, i as b, 'hello world' as c FROM generate_series(51, 100) AS i; -INSERT 50 +INSERT 0 50 ANALYZE foo; ANALYZE diff --git a/src/test/isolation2/output/uao/compaction_utility.source b/src/test/isolation2/output/uao/compaction_utility.source index 0ae471d7212..e9587d96f7e 100644 --- a/src/test/isolation2/output/uao/compaction_utility.source +++ b/src/test/isolation2/output/uao/compaction_utility.source @@ -1,13 +1,13 @@ -- @Description Tests the basic behavior of (lazy) vacuum when called from utility mode -- DROP TABLE IF EXISTS foo; -DROP +DROP TABLE CREATE TABLE foo (a INT, b INT, c CHAR(128)) USING @amname@; -CREATE +CREATE TABLE CREATE INDEX foo_index ON foo(b); -CREATE +CREATE INDEX INSERT INTO foo SELECT i as a, 1 as b, 'hello world' as c FROM generate_series(1, 100) AS i; -INSERT 100 +INSERT 0 100 DELETE FROM foo WHERE a < 20; DELETE 19 diff --git a/src/test/isolation2/output/uao/compaction_utility_2.source b/src/test/isolation2/output/uao/compaction_utility_2.source index d3c2c5c9ebd..d5119ad10a3 100644 --- a/src/test/isolation2/output/uao/compaction_utility_2.source +++ b/src/test/isolation2/output/uao/compaction_utility_2.source @@ -1,13 +1,13 @@ -- @Description Tests the basic behavior of (lazy) vacuum when called from utility mode -- DROP TABLE IF EXISTS foo; -DROP +DROP TABLE CREATE TABLE foo (a INT, b INT, c CHAR(128)) USING @amname@; -CREATE +CREATE TABLE CREATE INDEX foo_index ON foo(b); -CREATE +CREATE INDEX INSERT INTO foo SELECT i as a, 1 as b, 'hello world' as c FROM generate_series(1, 100) AS i; -INSERT 100 +INSERT 0 100 DELETE FROM foo WHERE a < 20; DELETE 19 diff --git a/src/test/isolation2/output/uao/compaction_utility_insert.source b/src/test/isolation2/output/uao/compaction_utility_insert.source index ba037d2ee6c..01c42a8569d 100644 --- a/src/test/isolation2/output/uao/compaction_utility_insert.source +++ b/src/test/isolation2/output/uao/compaction_utility_insert.source @@ -1,16 +1,16 @@ -- @Description Tests the compaction of data inserted in utility mode -- DROP TABLE IF EXISTS foo; -DROP +DROP TABLE CREATE TABLE foo (a INT, b INT, c CHAR(128)) USING @amname@ distributed randomly; -CREATE +CREATE TABLE CREATE INDEX foo_index ON foo(b); -CREATE +CREATE INDEX 0U: INSERT INTO foo VALUES (2, 2, 'c'); -INSERT 1 +INSERT 0 1 0U: INSERT INTO foo VALUES (3, 3, 'c'); -INSERT 1 +INSERT 0 1 SELECT *, segno, tupcount, state FROM gp_ao_or_aocs_seg('foo'); segment_id | segno | tupcount | modcount | formatversion | state | segno | tupcount | state ------------+-------+----------+----------+---------------+-------+-------+----------+------- diff --git a/src/test/isolation2/output/uao/compaction_utility_insert_2.source b/src/test/isolation2/output/uao/compaction_utility_insert_2.source index f3bc1a0f400..eedd1e58d0a 100644 --- a/src/test/isolation2/output/uao/compaction_utility_insert_2.source +++ b/src/test/isolation2/output/uao/compaction_utility_insert_2.source @@ -1,23 +1,23 @@ -- @Description Tests the compaction of data inserted in utility mode -- DROP TABLE IF EXISTS foo; -DROP +DROP TABLE CREATE TABLE foo (a INT, b INT, c CHAR(128)) USING @amname@; -CREATE +CREATE TABLE CREATE INDEX foo_index ON foo(b); -CREATE +CREATE INDEX INSERT INTO foo VALUES (1, 1, 'c'); -INSERT 1 +INSERT 0 1 SELECT segno, tupcount, state FROM gp_ao_or_aocs_seg('foo'); segno | tupcount | state -------+----------+------- 1 | 1 | 1 (1 row) 2U: INSERT INTO foo VALUES (2, 2, 'c'); -INSERT 1 +INSERT 0 1 2U: INSERT INTO foo VALUES (3, 3, 'c'); -INSERT 1 +INSERT 0 1 2U: SELECT segno, tupcount, state FROM gp_ao_or_aocs_seg('foo'); segno | tupcount | state -------+----------+------- diff --git a/src/test/isolation2/output/uao/create_index_allows_readonly.source b/src/test/isolation2/output/uao/create_index_allows_readonly.source index a97bce1de81..f4eb8a455f5 100644 --- a/src/test/isolation2/output/uao/create_index_allows_readonly.source +++ b/src/test/isolation2/output/uao/create_index_allows_readonly.source @@ -5,9 +5,9 @@ set default_table_access_method=@amname@; SET create table @amname@_create_index_with_select_tbl(a int, b int); -CREATE +CREATE TABLE insert into @amname@_create_index_with_select_tbl select a,a from generate_series(1,10) a; -INSERT 10 +INSERT 0 10 -- Verify readonly transaction is able to run concurrently with index creation. @@ -23,7 +23,7 @@ BEGIN BEGIN -- expect no hang 2: create index @amname@_create_index_with_select_idx on @amname@_create_index_with_select_tbl(a); -CREATE +CREATE INDEX -- expect no hang 3: select * from @amname@_create_index_with_select_tbl where a = 2; a | b @@ -32,16 +32,16 @@ CREATE (1 row) 1: end; -END +COMMIT 2: end; -END +COMMIT -- Verify behaviors of select with locking clause (i.e. select for update) -- when running concurrently with index creation, expect blocking with each other. -- This is only for AO/CO tables, for Heap tables, refer to lockmodes.sql. drop index @amname@_create_index_with_select_idx; -DROP +DROP INDEX 1: begin; BEGIN @@ -60,17 +60,17 @@ BEGIN COMMIT 2<: <... completed> -CREATE +CREATE INDEX 2: commit; COMMIT drop index @amname@_create_index_with_select_idx; -DROP +DROP INDEX 2: begin; BEGIN 2: create index @amname@_create_index_with_select_idx on @amname@_create_index_with_select_tbl(a); -CREATE +CREATE INDEX 1: begin; BEGIN @@ -89,6 +89,6 @@ COMMIT COMMIT drop table @amname@_create_index_with_select_tbl; -DROP +DROP TABLE reset default_table_access_method; RESET diff --git a/src/test/isolation2/output/uao/cursor_before_delete.source b/src/test/isolation2/output/uao/cursor_before_delete.source index c7a9ddc8fc0..d96257b4c4a 100644 --- a/src/test/isolation2/output/uao/cursor_before_delete.source +++ b/src/test/isolation2/output/uao/cursor_before_delete.source @@ -1,16 +1,16 @@ -- @Description Tests the visibility when a cursor has been created before the delete. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT) USING @amname@; -CREATE +CREATE TABLE insert into ao select generate_series(1, 100); -INSERT 100 +INSERT 0 100 1: BEGIN; BEGIN 1: DECLARE cur CURSOR FOR SELECT a FROM ao ORDER BY a; -DECLARE +DECLARE CURSOR 1: FETCH NEXT IN cur; a --- @@ -43,19 +43,19 @@ COMMIT 5 (1 row) 1: CLOSE cur; -CLOSE +CLOSE CURSOR 1: COMMIT; COMMIT 3: BEGIN; BEGIN 3: DECLARE cur CURSOR FOR SELECT a FROM ao ORDER BY a; -DECLARE +DECLARE CURSOR 3: FETCH NEXT IN cur; a --- 5 (1 row) 3: CLOSE cur; -CLOSE +CLOSE CURSOR 3: COMMIT; COMMIT diff --git a/src/test/isolation2/output/uao/cursor_before_deletevacuum.source b/src/test/isolation2/output/uao/cursor_before_deletevacuum.source index ee1dddd8d58..5b8fe7970f2 100644 --- a/src/test/isolation2/output/uao/cursor_before_deletevacuum.source +++ b/src/test/isolation2/output/uao/cursor_before_deletevacuum.source @@ -1,16 +1,16 @@ -- @Description Tests the visibility when a cursor has been created before the delete. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT) USING @amname@; -CREATE +CREATE TABLE insert into ao select generate_series(1, 10); -INSERT 10 +INSERT 0 10 1: BEGIN; BEGIN 1: DECLARE cur CURSOR FOR SELECT a FROM ao ORDER BY a; -DECLARE +DECLARE CURSOR 1: FETCH NEXT IN cur; a --- @@ -44,7 +44,7 @@ COMMIT 5 (1 row) 1: CLOSE cur; -CLOSE +CLOSE CURSOR 1: COMMIT; COMMIT 2<: <... completed> @@ -52,13 +52,13 @@ VACUUM 3: BEGIN; BEGIN 3: DECLARE cur CURSOR FOR SELECT a FROM ao ORDER BY a; -DECLARE +DECLARE CURSOR 3: FETCH NEXT IN cur; a --- 5 (1 row) 3: CLOSE cur; -CLOSE +CLOSE CURSOR 3: COMMIT; COMMIT diff --git a/src/test/isolation2/output/uao/cursor_before_update.source b/src/test/isolation2/output/uao/cursor_before_update.source index f0998ea3e68..8b595717931 100644 --- a/src/test/isolation2/output/uao/cursor_before_update.source +++ b/src/test/isolation2/output/uao/cursor_before_update.source @@ -1,16 +1,16 @@ -- @Description Tests the visibility when a cursor has been created before the update. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,100) AS i; -INSERT 100 +INSERT 0 100 1: BEGIN; BEGIN 1: DECLARE cur CURSOR FOR SELECT a,b FROM ao ORDER BY a; -DECLARE +DECLARE CURSOR 1: FETCH NEXT IN cur; a | b ---+--- @@ -43,19 +43,19 @@ COMMIT 5 | 5 (1 row) 1: CLOSE cur; -CLOSE +CLOSE CURSOR 1: COMMIT; COMMIT 3: BEGIN; BEGIN 3: DECLARE cur CURSOR FOR SELECT a,b FROM ao ORDER BY a; -DECLARE +DECLARE CURSOR 3: FETCH NEXT IN cur; a | b ---+--- 1 | 8 (1 row) 3: CLOSE cur; -CLOSE +CLOSE CURSOR 3: COMMIT; COMMIT diff --git a/src/test/isolation2/output/uao/cursor_withhold.source b/src/test/isolation2/output/uao/cursor_withhold.source index eec08b9c2af..7ef58270010 100644 --- a/src/test/isolation2/output/uao/cursor_withhold.source +++ b/src/test/isolation2/output/uao/cursor_withhold.source @@ -1,14 +1,14 @@ -- @Description Tests the visibility of an "with hold" cursor w.r.t. deletes. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT) USING @amname@; -CREATE +CREATE TABLE insert into ao select generate_series(1,100); -INSERT 100 +INSERT 0 100 1: DECLARE cur CURSOR WITH HOLD FOR SELECT a FROM ao ORDER BY a; -DECLARE +DECLARE CURSOR 1: FETCH NEXT IN cur; a --- @@ -41,13 +41,13 @@ COMMIT 5 (1 row) 1: CLOSE cur; -CLOSE +CLOSE CURSOR 3: DECLARE cur CURSOR WITH HOLD FOR SELECT a FROM ao ORDER BY a; -DECLARE +DECLARE CURSOR 3: FETCH NEXT IN cur; a --- 5 (1 row) 3: CLOSE cur; -CLOSE +CLOSE CURSOR diff --git a/src/test/isolation2/output/uao/cursor_withhold2.source b/src/test/isolation2/output/uao/cursor_withhold2.source index 3a63dbd2c0f..df0042791b0 100644 --- a/src/test/isolation2/output/uao/cursor_withhold2.source +++ b/src/test/isolation2/output/uao/cursor_withhold2.source @@ -1,16 +1,16 @@ -- @Description Tests the visibility of an "with hold" cursor w.r.t. deletes. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT) USING @amname@; -CREATE +CREATE TABLE insert into ao select generate_series(1,10); -INSERT 10 +INSERT 0 10 1: BEGIN; BEGIN 1: DECLARE cur CURSOR WITH HOLD FOR SELECT a FROM ao ORDER BY a; -DECLARE +DECLARE CURSOR 1: FETCH NEXT IN cur; a --- @@ -47,13 +47,13 @@ VACUUM 5 (1 row) 1: CLOSE cur; -CLOSE +CLOSE CURSOR 3: DECLARE cur CURSOR WITH HOLD FOR SELECT a FROM ao ORDER BY a; -DECLARE +DECLARE CURSOR 3: FETCH NEXT IN cur; a --- 5 (1 row) 3: CLOSE cur; -CLOSE +CLOSE CURSOR diff --git a/src/test/isolation2/output/uao/delete_while_vacuum.source b/src/test/isolation2/output/uao/delete_while_vacuum.source index c32d7c805a6..d740968fbee 100644 --- a/src/test/isolation2/output/uao/delete_while_vacuum.source +++ b/src/test/isolation2/output/uao/delete_while_vacuum.source @@ -1,11 +1,11 @@ -- @Description Ensures that a delete before a vacuum operation is ok -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1, 100) AS i; -INSERT 100 +INSERT 0 100 DELETE FROM ao WHERE a < 12; @@ -21,11 +21,11 @@ BEGIN 2: VACUUM ao; VACUUM 1<: <... completed> -DELETE +COMMIT 1: SELECT COUNT(*) FROM ao; count ------- 11 (1 row) 3: INSERT INTO ao VALUES (0); -INSERT 1 +INSERT 0 1 diff --git a/src/test/isolation2/output/uao/index_build_reltuples.source b/src/test/isolation2/output/uao/index_build_reltuples.source index d289347544c..04fd55a152a 100644 --- a/src/test/isolation2/output/uao/index_build_reltuples.source +++ b/src/test/isolation2/output/uao/index_build_reltuples.source @@ -10,12 +10,12 @@ SET -- and the index's reltuples, to equal the actual segment tuple counts. CREATE TABLE index_build_reltuples_@amname@(a int); -CREATE +CREATE TABLE INSERT INTO index_build_reltuples_@amname@ SELECT generate_series(1, 10); -INSERT 10 +INSERT 0 10 CREATE INDEX ON index_build_reltuples_@amname@(a); -CREATE +CREATE INDEX SELECT gp_segment_id, count(*) FROM index_build_reltuples_@amname@ GROUP BY gp_segment_id ORDER BY gp_segment_id; gp_segment_id | count @@ -40,7 +40,7 @@ SELECT gp_segment_id, reltuples FROM gp_dist_random('pg_class') WHERE relname='i (3 rows) DROP TABLE index_build_reltuples_@amname@; -DROP +DROP TABLE -- Case 2: Verify that CREATE INDEX is able to update the aorel's reltuples -- to equal the actual segment tuple counts, when there are deleted tuples. For @@ -49,9 +49,9 @@ DROP -- reltuples count for the index would also account for deleted tuples. CREATE TABLE index_build_reltuples_@amname@(a int); -CREATE +CREATE TABLE INSERT INTO index_build_reltuples_@amname@ SELECT generate_series(1, 20); -INSERT 20 +INSERT 0 20 SELECT gp_segment_id, count(*) FROM index_build_reltuples_@amname@ GROUP BY gp_segment_id ORDER BY gp_segment_id; gp_segment_id | count @@ -65,7 +65,7 @@ DELETE FROM index_build_reltuples_@amname@ WHERE a <= 10; DELETE 10 CREATE INDEX ON index_build_reltuples_@amname@(a); -CREATE +CREATE INDEX SELECT gp_segment_id, count(*) FROM index_build_reltuples_@amname@ GROUP BY gp_segment_id ORDER BY gp_segment_id; gp_segment_id | count @@ -90,17 +90,17 @@ SELECT gp_segment_id, reltuples FROM gp_dist_random('pg_class') WHERE relname='i (3 rows) DROP TABLE index_build_reltuples_@amname@; -DROP +DROP TABLE -- Case 3: Verify that CREATE INDEX is able to update both the aorel's reltuples -- and the index's reltuples, to equal the actual segment tuple counts, when -- there are aborted tuples. CREATE TABLE index_build_reltuples_@amname@(a int); -CREATE +CREATE TABLE INSERT INTO index_build_reltuples_@amname@ SELECT generate_series(1, 10); -INSERT 10 +INSERT 0 10 SELECT gp_segment_id, count(*) FROM index_build_reltuples_@amname@ GROUP BY gp_segment_id ORDER BY gp_segment_id; gp_segment_id | count @@ -113,12 +113,12 @@ SELECT gp_segment_id, count(*) FROM index_build_reltuples_@amname@ GROUP BY gp_s BEGIN; BEGIN INSERT INTO index_build_reltuples_@amname@ SELECT generate_series(11, 20); -INSERT 10 +INSERT 0 10 ABORT; -ABORT +ROLLBACK CREATE INDEX ON index_build_reltuples_@amname@(a); -CREATE +CREATE INDEX SELECT gp_segment_id, count(*) FROM index_build_reltuples_@amname@ GROUP BY gp_segment_id ORDER BY gp_segment_id; gp_segment_id | count @@ -143,7 +143,7 @@ SELECT gp_segment_id, reltuples FROM gp_dist_random('pg_class') WHERE relname='i (3 rows) DROP TABLE index_build_reltuples_@amname@; -DROP +DROP TABLE -- Case 4: Verify that CREATE INDEX is able to update both the aorel's reltuples -- and the index's reltuples, to equal the latest segment tuple counts, even @@ -151,7 +151,7 @@ DROP -- (highlights the need for using SnapshotAny) CREATE TABLE index_build_reltuples_@amname@(a int); -CREATE +CREATE TABLE 1: BEGIN ISOLATION LEVEL REPEATABLE READ; BEGIN @@ -161,10 +161,10 @@ BEGIN (0 rows) INSERT INTO index_build_reltuples_@amname@ SELECT generate_series(1, 10); -INSERT 10 +INSERT 0 10 1: CREATE INDEX ON index_build_reltuples_@amname@(a); -CREATE +CREATE INDEX 1: COMMIT; COMMIT @@ -191,7 +191,7 @@ SELECT gp_segment_id, reltuples FROM gp_dist_random('pg_class') WHERE relname='i (3 rows) DROP TABLE index_build_reltuples_@amname@; -DROP +DROP TABLE RESET default_table_access_method; RESET diff --git a/src/test/isolation2/output/uao/insert_policy.source b/src/test/isolation2/output/uao/insert_policy.source index 9d5bbf9cfb1..92197ead4d0 100644 --- a/src/test/isolation2/output/uao/insert_policy.source +++ b/src/test/isolation2/output/uao/insert_policy.source @@ -1,16 +1,16 @@ -- @Description Tests the AO segment file selection policy -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT) USING @amname@; -CREATE +CREATE TABLE -- Case 1: Both transactions insert initial segment files into aoseg. 1: BEGIN; BEGIN 2: BEGIN; BEGIN 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 -- Segment file 1 should be created 3: SELECT segno FROM gp_ao_or_aocs_seg('ao'); segno @@ -18,7 +18,7 @@ INSERT 1 1 (1 row) 2: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 -- Segment file 2 should be created 3: SELECT segno FROM gp_ao_or_aocs_seg('ao'); segno @@ -41,7 +41,7 @@ COMMIT -- Case 2: Concurrent inserts with existing segment files in aoseg. 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 3: SELECT segno, tupcount FROM gp_ao_or_aocs_seg('ao'); segno | tupcount -------+---------- @@ -51,7 +51,7 @@ INSERT 1 -- Here we aim to insert a tuple to the same seg as (1). -- Under jump jash, (15) and (1) are on the same seg(seg1). 1: INSERT INTO AO VALUES (15); -INSERT 1 +INSERT 0 1 3: SELECT segno, tupcount FROM gp_ao_or_aocs_seg('ao'); segno | tupcount -------+---------- @@ -61,11 +61,11 @@ INSERT 1 1: BEGIN; BEGIN 1: INSERT INTO AO VALUES (15); -INSERT 1 +INSERT 0 1 2: BEGIN; BEGIN 2: INSERT INTO AO VALUES (15); -INSERT 1 +INSERT 0 1 1: COMMIT; COMMIT 2: COMMIT; @@ -77,9 +77,9 @@ COMMIT 2 | 3 (2 rows) 1: insert into ao select generate_series(1,100000); -INSERT 100000 +INSERT 0 100000 1: INSERT INTO AO VALUES (15); -INSERT 1 +INSERT 0 1 3: SELECT segno, case when tupcount = 0 then 'zero' when tupcount <= 5 then 'few' else 'many' end FROM gp_ao_or_aocs_seg('ao'); segno | case -------+------ diff --git a/src/test/isolation2/output/uao/insert_policy_2.source b/src/test/isolation2/output/uao/insert_policy_2.source index a763a89bb7d..1a726f56ae4 100644 --- a/src/test/isolation2/output/uao/insert_policy_2.source +++ b/src/test/isolation2/output/uao/insert_policy_2.source @@ -1,272 +1,272 @@ -- @Description Tests the AO segment file selection policy -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT) USING @amname@; -CREATE +CREATE TABLE 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 -- Actual test begins 1: INSERT INTO AO VALUES (1); -INSERT 1 +INSERT 0 1 2U: SELECT segno, tupcount FROM gp_ao_or_aocs_seg('ao'); segno | tupcount -------+---------- 1 | 127 (1 row) 1: INSERT INTO AO VALUES (2); -INSERT 1 +INSERT 0 1 2U: SELECT segno, tupcount FROM gp_ao_or_aocs_seg('ao'); segno | tupcount -------+---------- @@ -275,11 +275,11 @@ INSERT 1 1: BEGIN; BEGIN 1: INSERT INTO AO VALUES (2); -INSERT 1 +INSERT 0 1 2: BEGIN; BEGIN 2: INSERT INTO AO VALUES (2); -INSERT 1 +INSERT 0 1 1: COMMIT; COMMIT 2: COMMIT; @@ -290,9 +290,9 @@ COMMIT 1 | 127 (1 row) 1: insert into ao select generate_series(1,100000); -INSERT 100000 +INSERT 0 100000 1: INSERT INTO AO VALUES (2); -INSERT 1 +INSERT 0 1 2U: SELECT segno, case when tupcount = 0 then 'zero' when tupcount <= 5 then 'few' else 'many' end FROM gp_ao_or_aocs_seg('ao'); segno | case -------+------ diff --git a/src/test/isolation2/output/uao/insert_while_vacuum.source b/src/test/isolation2/output/uao/insert_while_vacuum.source index 8203db799f3..34a0a297b07 100644 --- a/src/test/isolation2/output/uao/insert_while_vacuum.source +++ b/src/test/isolation2/output/uao/insert_while_vacuum.source @@ -1,51 +1,51 @@ -- @Description Ensures that an insert during a vacuum operation is ok -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT) USING @amname@; -CREATE +CREATE TABLE insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 DELETE FROM ao WHERE a < 128; DELETE 2667 @@ -53,7 +53,7 @@ DELETE 2667 4: BEGIN; BEGIN 4: insert into ao select generate_series(1001,2000);insert into ao select generate_series(1001,2000);insert into ao select generate_series(1001,2000);insert into ao select generate_series(1001,2000);insert into ao select generate_series(1001,2000);insert into ao select generate_series(1001,2000);insert into ao select generate_series(1001,2000);insert into ao select generate_series(1001,2000);insert into ao select generate_series(1001,2000);insert into ao select generate_series(1001,2000);insert into ao select generate_series(1001,2000);insert into ao select generate_series(1001,2000);insert into ao select generate_series(1001,2000);insert into ao select generate_series(1001,2000);insert into ao select generate_series(1001,2000);insert into ao select generate_series(1001,2000);insert into ao select generate_series(1001,2000);insert into ao select generate_series(1001,2000);insert into ao select generate_series(1001,2000);insert into ao select generate_series(1001,2000);COMMIT; -INSERT +COMMIT 2<: <... completed> VACUUM 3: SELECT COUNT(*) FROM ao WHERE a = 1500; @@ -62,4 +62,4 @@ VACUUM 20 (1 row) 4: INSERT INTO ao VALUES (0); -INSERT 1 +INSERT 0 1 diff --git a/src/test/isolation2/output/uao/limit_indexscan_inits.source b/src/test/isolation2/output/uao/limit_indexscan_inits.source index 034c66a57ce..9e21323bba1 100644 --- a/src/test/isolation2/output/uao/limit_indexscan_inits.source +++ b/src/test/isolation2/output/uao/limit_indexscan_inits.source @@ -16,40 +16,40 @@ select pg_stat_get_xact_numscans('gp_fastsequence_objid_objmod_index'::regclass) if quote_ident(itype) = 'btree' then /* for BTREE index */ select post_nscans - pre_nscans = segnos + 1 into result; /* calculate the diff and compare to segnos plus 1 to count in segfile0, expect equal */ raise notice '[BTREE] expect: post_nscans - pre_nscans == segnos + 1'; /* in func */ elsif quote_ident(itype) = 'brin' then /* for BRIN index */ select post_nscans - pre_nscans = ((segnos + 1) * 2 - 1) into result; /* BRIN doubles nscans(of BTREE) due to implementation constraint */ raise notice '[BRIN] expect: post_nscans - pre_nscans == ((segnos + 1) * 2 - 1)'; /* in func */ else /* in func */ raise exception 'unexpected type of index %', itype::text; /* in func */ end if; /* in func */ raise notice 'pre_nscans = %, post_nscans = %, segnos = %', pre_nscans, post_nscans, segnos; /* verbose */ return result; /* in func */ end; /* in func */ $$ language plpgsql; -CREATE +CREATE FUNCTION set default_table_access_method=@amname@; SET create table @amname@_limit_iscan_inits_tbl (a int, b int, c int, d int); -CREATE +CREATE TABLE create index on @amname@_limit_iscan_inits_tbl(a); -CREATE +CREATE INDEX create index on @amname@_limit_iscan_inits_tbl using brin (b); -CREATE +CREATE INDEX -- Start three concurrent writing sessions to generate three segment files. 1: begin; BEGIN 1: insert into @amname@_limit_iscan_inits_tbl select a, a, a, a from generate_series(1, 10)a; -INSERT 10 +INSERT 0 10 2: begin; BEGIN 2: insert into @amname@_limit_iscan_inits_tbl select a, a, a, a from generate_series(11, 20)a; -INSERT 10 +INSERT 0 10 3: begin; BEGIN 3: insert into @amname@_limit_iscan_inits_tbl select a, a, a, a from generate_series(21, 30)a; -INSERT 10 +INSERT 0 10 1: end; -END +COMMIT 2: end; -END +COMMIT 3: end; -END +COMMIT -- diable seqscan 0U: set enable_seqscan = off; @@ -89,8 +89,8 @@ SET 0Uq: ... drop table @amname@_limit_iscan_inits_tbl; -DROP +DROP TABLE drop function test_iscan_inits_same_as_aosegs; -DROP +DROP FUNCTION reset default_table_access_method; RESET diff --git a/src/test/isolation2/output/uao/max_concurrency.source b/src/test/isolation2/output/uao/max_concurrency.source index 9423f38f5e6..d484f75d4e5 100644 --- a/src/test/isolation2/output/uao/max_concurrency.source +++ b/src/test/isolation2/output/uao/max_concurrency.source @@ -1,12 +1,12 @@ -- @Description Insert into a ao relation with 127 concurrent transactions -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE ALTER RESOURCE GROUP admin_group SET CONCURRENCY 130; -ALTER +ALTER RESOURCE GROUP 1: BEGIN; BEGIN @@ -263,259 +263,259 @@ BEGIN 127: BEGIN; BEGIN 1: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 2: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 3: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 4: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 5: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 6: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 7: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 8: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 9: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 10: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 11: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 12: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 13: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 14: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 15: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 16: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 17: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 18: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 19: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 20: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 21: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 22: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 23: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 24: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 25: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 26: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 27: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 28: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 29: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 30: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 31: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 32: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 33: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 34: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 35: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 36: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 37: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 38: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 39: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 40: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 41: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 42: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 43: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 44: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 45: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 46: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 47: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 48: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 49: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 50: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 51: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 52: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 53: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 54: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 55: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 56: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 57: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 58: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 59: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 60: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 61: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 62: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 63: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 64: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 65: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 66: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 67: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 68: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 69: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 70: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 71: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 72: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 73: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 74: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 75: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 76: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 77: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 78: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 79: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 80: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 81: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 82: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 83: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 84: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 85: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 86: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 87: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 88: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 89: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 90: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 91: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 92: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 93: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 94: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 95: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 96: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 97: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 98: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 99: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 100: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 101: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 102: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 103: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 104: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 105: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 106: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 107: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 108: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 109: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 110: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 111: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 112: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 113: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 114: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 115: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 116: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 117: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 118: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 119: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 120: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 121: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 122: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 123: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 124: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 125: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 126: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 127: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 1: COMMIT; COMMIT 2: COMMIT; @@ -1034,4 +1034,4 @@ SELECT * FROM gp_ao_or_aocs_seg('ao') ORDER BY segno; (127 rows) ALTER RESOURCE GROUP admin_group SET CONCURRENCY 20; -ALTER +ALTER RESOURCE GROUP diff --git a/src/test/isolation2/output/uao/max_concurrency2.source b/src/test/isolation2/output/uao/max_concurrency2.source index 294e2feccfd..8776993e44f 100644 --- a/src/test/isolation2/output/uao/max_concurrency2.source +++ b/src/test/isolation2/output/uao/max_concurrency2.source @@ -11,12 +11,12 @@ GP_IGNORE: defined new match expression DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE ALTER RESOURCE GROUP admin_group SET CONCURRENCY 130; -ALTER +ALTER RESOURCE GROUP 1: BEGIN; BEGIN @@ -275,259 +275,259 @@ BEGIN 128: BEGIN; BEGIN 1: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 2: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 3: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 4: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 5: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 6: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 7: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 8: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 9: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 10: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 11: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 12: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 13: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 14: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 15: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 16: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 17: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 18: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 19: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 20: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 21: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 22: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 23: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 24: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 25: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 26: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 27: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 28: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 29: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 30: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 31: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 32: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 33: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 34: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 35: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 36: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 37: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 38: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 39: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 40: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 41: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 42: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 43: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 44: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 45: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 46: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 47: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 48: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 49: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 50: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 51: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 52: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 53: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 54: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 55: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 56: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 57: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 58: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 59: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 60: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 61: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 62: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 63: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 64: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 65: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 66: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 67: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 68: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 69: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 70: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 71: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 72: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 73: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 74: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 75: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 76: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 77: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 78: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 79: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 80: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 81: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 82: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 83: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 84: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 85: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 86: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 87: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 88: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 89: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 90: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 91: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 92: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 93: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 94: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 95: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 96: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 97: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 98: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 99: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 100: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 101: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 102: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 103: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 104: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 105: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 106: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 107: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 108: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 109: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 110: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 111: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 112: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 113: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 114: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 115: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 116: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 117: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 118: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 119: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 120: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 121: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 122: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 123: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 124: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 125: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 126: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 127: INSERT INTO AO VALUES (1, 1); -INSERT 1 +INSERT 0 1 128: INSERT INTO AO VALUES (1, 1); ERROR: could not find segment file to use for inserting into relation "ao" 1: COMMIT; @@ -1346,4 +1346,4 @@ SELECT * FROM gp_ao_or_aocs_seg('ao') ORDER BY state, segno; (127 rows) ALTER RESOURCE GROUP admin_group SET CONCURRENCY 20; -ALTER +ALTER RESOURCE GROUP diff --git a/src/test/isolation2/output/uao/modcount.source b/src/test/isolation2/output/uao/modcount.source index 1abf7996d8a..00345d6a556 100644 --- a/src/test/isolation2/output/uao/modcount.source +++ b/src/test/isolation2/output/uao/modcount.source @@ -1,11 +1,11 @@ -- @Description Tests that DML operatins change the modification count. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,10) AS i; -INSERT 10 +INSERT 0 10 SELECT state, tupcount, modcount FROM gp_ao_or_aocs_seg('ao'); state | tupcount | modcount @@ -15,7 +15,7 @@ SELECT state, tupcount, modcount FROM gp_ao_or_aocs_seg('ao'); 1 | 5 | 1 (3 rows) INSERT INTO ao VALUES (11, 11); -INSERT 1 +INSERT 0 1 SELECT state, tupcount, modcount FROM gp_ao_or_aocs_seg('ao'); state | tupcount | modcount -------+----------+---------- diff --git a/src/test/isolation2/output/uao/modcount_vacuum.source b/src/test/isolation2/output/uao/modcount_vacuum.source index 49f29b9ae92..468de1a4b3f 100644 --- a/src/test/isolation2/output/uao/modcount_vacuum.source +++ b/src/test/isolation2/output/uao/modcount_vacuum.source @@ -1,11 +1,11 @@ -- @Description Tests that vacuum is not changing the modification count. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,10) AS i; -INSERT 10 +INSERT 0 10 DELETE FROM ao WHERE a < 5; DELETE 4 diff --git a/src/test/isolation2/output/uao/parallel_delete.source b/src/test/isolation2/output/uao/parallel_delete.source index 0ac8859d81a..4f54ef9caa2 100644 --- a/src/test/isolation2/output/uao/parallel_delete.source +++ b/src/test/isolation2/output/uao/parallel_delete.source @@ -2,11 +2,11 @@ -- until the transaction is committed. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT) USING @amname@ DISTRIBUTED BY (a); -CREATE +CREATE TABLE insert into ao select generate_series(1,100); -INSERT 100 +INSERT 0 100 -- The actual test begins 1: BEGIN; diff --git a/src/test/isolation2/output/uao/parallel_delete_2.source b/src/test/isolation2/output/uao/parallel_delete_2.source index 0723a997aab..f7678fa784a 100644 --- a/src/test/isolation2/output/uao/parallel_delete_2.source +++ b/src/test/isolation2/output/uao/parallel_delete_2.source @@ -2,11 +2,11 @@ -- until the transaction is committed. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT) USING @amname@ DISTRIBUTED BY (a); -CREATE +CREATE TABLE insert into ao select generate_series(1,100); -INSERT 100 +INSERT 0 100 -- The actual test begins 1: BEGIN; diff --git a/src/test/isolation2/output/uao/parallel_delete_optimizer.source b/src/test/isolation2/output/uao/parallel_delete_optimizer.source index 0ac8859d81a..4f54ef9caa2 100644 --- a/src/test/isolation2/output/uao/parallel_delete_optimizer.source +++ b/src/test/isolation2/output/uao/parallel_delete_optimizer.source @@ -2,11 +2,11 @@ -- until the transaction is committed. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT) USING @amname@ DISTRIBUTED BY (a); -CREATE +CREATE TABLE insert into ao select generate_series(1,100); -INSERT 100 +INSERT 0 100 -- The actual test begins 1: BEGIN; diff --git a/src/test/isolation2/output/uao/parallel_update.source b/src/test/isolation2/output/uao/parallel_update.source index 35077903916..6665c8f75ac 100644 --- a/src/test/isolation2/output/uao/parallel_update.source +++ b/src/test/isolation2/output/uao/parallel_update.source @@ -2,11 +2,11 @@ -- until the transaction is committed. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,10) AS i; -INSERT 10 +INSERT 0 10 -- The actual test begins 1: BEGIN; diff --git a/src/test/isolation2/output/uao/parallel_update_2.source b/src/test/isolation2/output/uao/parallel_update_2.source index d190b8852d9..98576a4bacd 100644 --- a/src/test/isolation2/output/uao/parallel_update_2.source +++ b/src/test/isolation2/output/uao/parallel_update_2.source @@ -2,11 +2,11 @@ -- until the transaction is committed. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,10) AS i; -INSERT 10 +INSERT 0 10 -- The actual test begins 1: BEGIN; diff --git a/src/test/isolation2/output/uao/parallel_update_optimizer.source b/src/test/isolation2/output/uao/parallel_update_optimizer.source index 35077903916..6665c8f75ac 100644 --- a/src/test/isolation2/output/uao/parallel_update_optimizer.source +++ b/src/test/isolation2/output/uao/parallel_update_optimizer.source @@ -2,11 +2,11 @@ -- until the transaction is committed. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,10) AS i; -INSERT 10 +INSERT 0 10 -- The actual test begins 1: BEGIN; diff --git a/src/test/isolation2/output/uao/parallel_update_readcommitted.source b/src/test/isolation2/output/uao/parallel_update_readcommitted.source index d9ca48fc74c..a7cda472e7b 100644 --- a/src/test/isolation2/output/uao/parallel_update_readcommitted.source +++ b/src/test/isolation2/output/uao/parallel_update_readcommitted.source @@ -2,11 +2,11 @@ -- until the transaction is committed. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,10) AS i; -INSERT 10 +INSERT 0 10 -- The actual test begins 1: BEGIN; diff --git a/src/test/isolation2/output/uao/phantom_reads.source b/src/test/isolation2/output/uao/phantom_reads.source index 849b6a45fd5..4278d3cbbf7 100644 --- a/src/test/isolation2/output/uao/phantom_reads.source +++ b/src/test/isolation2/output/uao/phantom_reads.source @@ -2,11 +2,11 @@ -- Actually, no UAO is involved here. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1, 100) AS i; -INSERT 100 +INSERT 0 100 1: BEGIN; BEGIN @@ -28,7 +28,7 @@ BEGIN 2: BEGIN; BEGIN 2: INSERT INTO ao VALUES (101, 25); -INSERT 1 +INSERT 0 1 2: COMMIT; COMMIT 1: SELECT * FROM ao WHERE b BETWEEN 20 AND 30 ORDER BY a; diff --git a/src/test/isolation2/output/uao/phantom_reads_delete.source b/src/test/isolation2/output/uao/phantom_reads_delete.source index 7a6a33d470e..8bb65fdaeb3 100644 --- a/src/test/isolation2/output/uao/phantom_reads_delete.source +++ b/src/test/isolation2/output/uao/phantom_reads_delete.source @@ -2,11 +2,11 @@ -- the default isolation level. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1, 100) AS i; -INSERT 100 +INSERT 0 100 1: BEGIN; BEGIN diff --git a/src/test/isolation2/output/uao/phantom_reads_delete_serializable.source b/src/test/isolation2/output/uao/phantom_reads_delete_serializable.source index a0d814acc45..608aae6dbba 100644 --- a/src/test/isolation2/output/uao/phantom_reads_delete_serializable.source +++ b/src/test/isolation2/output/uao/phantom_reads_delete_serializable.source @@ -2,11 +2,11 @@ -- the serializable isolation level. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1, 100) AS i; -INSERT 100 +INSERT 0 100 1: BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; BEGIN diff --git a/src/test/isolation2/output/uao/phantom_reads_serializable.source b/src/test/isolation2/output/uao/phantom_reads_serializable.source index 0b1a7b00f62..87d396bd976 100644 --- a/src/test/isolation2/output/uao/phantom_reads_serializable.source +++ b/src/test/isolation2/output/uao/phantom_reads_serializable.source @@ -2,11 +2,11 @@ -- transactions. Actually, no UAO is involved here. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1, 100) AS i; -INSERT 100 +INSERT 0 100 1: BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; BEGIN @@ -28,7 +28,7 @@ BEGIN 2: BEGIN; BEGIN 2: INSERT INTO ao VALUES (101, 25); -INSERT 1 +INSERT 0 1 2: COMMIT; COMMIT 1: SELECT * FROM ao WHERE b BETWEEN 20 AND 30 ORDER BY a; diff --git a/src/test/isolation2/output/uao/phantom_reads_update.source b/src/test/isolation2/output/uao/phantom_reads_update.source index 65e2428c6d4..6e92225bb31 100644 --- a/src/test/isolation2/output/uao/phantom_reads_update.source +++ b/src/test/isolation2/output/uao/phantom_reads_update.source @@ -2,11 +2,11 @@ -- the default isolation level. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1, 100) AS i; -INSERT 100 +INSERT 0 100 1: BEGIN; BEGIN diff --git a/src/test/isolation2/output/uao/phantom_reads_update_serializable.source b/src/test/isolation2/output/uao/phantom_reads_update_serializable.source index 6d7665642a2..149f5864a2d 100644 --- a/src/test/isolation2/output/uao/phantom_reads_update_serializable.source +++ b/src/test/isolation2/output/uao/phantom_reads_update_serializable.source @@ -2,11 +2,11 @@ -- the serializable isolation level. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1, 100) AS i; -INSERT 100 +INSERT 0 100 1: BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; BEGIN diff --git a/src/test/isolation2/output/uao/select_after_vacuum.source b/src/test/isolation2/output/uao/select_after_vacuum.source index 5227d1fe7c1..ef86bf99b6e 100644 --- a/src/test/isolation2/output/uao/select_after_vacuum.source +++ b/src/test/isolation2/output/uao/select_after_vacuum.source @@ -1,57 +1,57 @@ -- @Description Ensures that a select after a vacuum operation is ok -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE DROP TABLE IF EXISTS ao2; -DROP +DROP TABLE CREATE TABLE ao2 (a INT) USING @amname@; -CREATE +CREATE TABLE CREATE TABLE ao (a INT) USING @amname@; -CREATE +CREATE TABLE insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao2 select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 -- The actual test begins DELETE FROM ao WHERE a < 128; @@ -95,7 +95,7 @@ COMMIT 18333 (1 row) 3: INSERT INTO ao VALUES (0); -INSERT 1 +INSERT 0 1 0: SELECT segno, case when tupcount = 0 then 'zero' when tupcount = 1 then 'one' when tupcount <= 5 then 'few' else 'many' end FROM gp_ao_or_aocs_seg('ao'); segno | case -------+------ diff --git a/src/test/isolation2/output/uao/select_after_vacuum_serializable.source b/src/test/isolation2/output/uao/select_after_vacuum_serializable.source index 3542fe507da..d95eb1db60f 100644 --- a/src/test/isolation2/output/uao/select_after_vacuum_serializable.source +++ b/src/test/isolation2/output/uao/select_after_vacuum_serializable.source @@ -1,57 +1,57 @@ -- @Description Ensures that a serializable select before during a vacuum operation blocks the vacuum. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE DROP TABLE IF EXISTS ao2; -DROP +DROP TABLE CREATE TABLE ao (a INT) USING @amname@; -CREATE +CREATE TABLE CREATE TABLE ao2 (a INT) USING @amname@; -CREATE +CREATE TABLE insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao2 select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 DELETE FROM ao WHERE a < 128; DELETE 2667 @@ -72,4 +72,4 @@ VACUUM 1: COMMIT; COMMIT 3: INSERT INTO ao VALUES (0); -INSERT 1 +INSERT 0 1 diff --git a/src/test/isolation2/output/uao/select_before_vacuum.source b/src/test/isolation2/output/uao/select_before_vacuum.source index 003098d9ef8..f0d73cbbbb8 100644 --- a/src/test/isolation2/output/uao/select_before_vacuum.source +++ b/src/test/isolation2/output/uao/select_before_vacuum.source @@ -1,51 +1,51 @@ -- @Description Ensures that a select before a vacuum operation is ok -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT) USING @amname@; -CREATE +CREATE TABLE insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 DELETE FROM ao WHERE a < 128; DELETE 2667 @@ -77,4 +77,4 @@ VACUUM 18333 (1 row) 3: INSERT INTO ao VALUES (0); -INSERT 1 +INSERT 0 1 diff --git a/src/test/isolation2/output/uao/select_while_delete.source b/src/test/isolation2/output/uao/select_while_delete.source index 529c9453da8..b87464866bf 100644 --- a/src/test/isolation2/output/uao/select_while_delete.source +++ b/src/test/isolation2/output/uao/select_while_delete.source @@ -1,11 +1,11 @@ -- @Description Ensures that a select during a delete operation is ok -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT) USING @amname@; -CREATE +CREATE TABLE insert into ao select generate_series(1,100); -INSERT 100 +INSERT 0 100 2: BEGIN; BEGIN @@ -52,4 +52,4 @@ COMMIT 9 (5 rows) 4: INSERT INTO ao VALUES (0); -INSERT 1 +INSERT 0 1 diff --git a/src/test/isolation2/output/uao/select_while_full_vacuum.source b/src/test/isolation2/output/uao/select_while_full_vacuum.source index dc32c7fa917..9d84796151b 100644 --- a/src/test/isolation2/output/uao/select_while_full_vacuum.source +++ b/src/test/isolation2/output/uao/select_while_full_vacuum.source @@ -1,51 +1,51 @@ -- @Description Ensures that a select during a full vacuum operation is ok -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT) USING @amname@; -CREATE +CREATE TABLE insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 DELETE FROM ao WHERE a < 128; DELETE 2667 @@ -55,6 +55,6 @@ BEGIN 2: VACUUM FULL ao; VACUUM 1<: <... completed> -SELECT +COMMIT 3: INSERT INTO ao VALUES (0); -INSERT 1 +INSERT 0 1 diff --git a/src/test/isolation2/output/uao/select_while_vacuum.source b/src/test/isolation2/output/uao/select_while_vacuum.source index 39a501a0460..a0d364b8612 100644 --- a/src/test/isolation2/output/uao/select_while_vacuum.source +++ b/src/test/isolation2/output/uao/select_while_vacuum.source @@ -1,51 +1,51 @@ -- @Description Ensures that a select during a vacuum operation is ok -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT) USING @amname@; -CREATE +CREATE TABLE insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 DELETE FROM ao WHERE a < 128; DELETE 2667 @@ -55,6 +55,6 @@ BEGIN 2: VACUUM ao; VACUUM 1<: <... completed> -SELECT +COMMIT 3: INSERT INTO ao VALUES (0); -INSERT 1 +INSERT 0 1 diff --git a/src/test/isolation2/output/uao/select_while_vacuum_serializable.source b/src/test/isolation2/output/uao/select_while_vacuum_serializable.source index 9c6ecbd5a71..494c40444bb 100644 --- a/src/test/isolation2/output/uao/select_while_vacuum_serializable.source +++ b/src/test/isolation2/output/uao/select_while_vacuum_serializable.source @@ -1,51 +1,51 @@ -- @Description Ensures that a select from a serializalbe transaction is ok after vacuum -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT) USING @amname@; -CREATE +CREATE TABLE insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 DELETE FROM ao WHERE a < 128; DELETE 2667 @@ -59,4 +59,4 @@ VACUUM 18333 (1 row) 3: INSERT INTO ao VALUES (0); -INSERT 1 +INSERT 0 1 diff --git a/src/test/isolation2/output/uao/select_while_vacuum_serializable2.source b/src/test/isolation2/output/uao/select_while_vacuum_serializable2.source index 583f9e12c45..1fbab5fd618 100644 --- a/src/test/isolation2/output/uao/select_while_vacuum_serializable2.source +++ b/src/test/isolation2/output/uao/select_while_vacuum_serializable2.source @@ -2,51 +2,51 @@ -- on the table. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT) USING @amname@; -CREATE +CREATE TABLE insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 DELETE FROM ao WHERE a < 128; DELETE 2667 @@ -74,7 +74,7 @@ VACUUM 1: COMMIT; COMMIT 3: INSERT INTO ao VALUES (0); -INSERT 1 +INSERT 0 1 2: SELECT segment_id, segno, tupcount, state FROM gp_ao_or_aocs_seg('ao'); segment_id | segno | tupcount | state ------------+-------+----------+------- diff --git a/src/test/isolation2/output/uao/selectinsert_while_vacuum.source b/src/test/isolation2/output/uao/selectinsert_while_vacuum.source index 88d50af5fb0..a07d6d12146 100644 --- a/src/test/isolation2/output/uao/selectinsert_while_vacuum.source +++ b/src/test/isolation2/output/uao/selectinsert_while_vacuum.source @@ -1,49 +1,49 @@ -- @Description Ensures that an insert during a vacuum operation is ok -- CREATE TABLE selectinsert_while_vacuum_@amname@ (a INT) USING @amname@; -CREATE +CREATE TABLE insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into selectinsert_while_vacuum_@amname@ select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 DELETE FROM selectinsert_while_vacuum_@amname@ WHERE a < 128; DELETE 2667 @@ -70,7 +70,7 @@ BEGIN (1 row) 2>: VACUUM selectinsert_while_vacuum_@amname@; 4: SELECT COUNT(*) FROM selectinsert_while_vacuum_@amname@;SELECT COUNT(*) FROM selectinsert_while_vacuum_@amname@;BEGIN;insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);insert into selectinsert_while_vacuum_@amname@ select generate_series(1001,2000);COMMIT; -SELECT +COMMIT 2<: <... completed> VACUUM 3: SELECT COUNT(*) FROM selectinsert_while_vacuum_@amname@ WHERE a = 1500; @@ -79,4 +79,4 @@ VACUUM 20 (1 row) 3: INSERT INTO selectinsert_while_vacuum_@amname@ VALUES (0); -INSERT 1 +INSERT 0 1 diff --git a/src/test/isolation2/output/uao/selectinsertupdate_while_vacuum.source b/src/test/isolation2/output/uao/selectinsertupdate_while_vacuum.source index fb69f657b5e..e9caac2064e 100644 --- a/src/test/isolation2/output/uao/selectinsertupdate_while_vacuum.source +++ b/src/test/isolation2/output/uao/selectinsertupdate_while_vacuum.source @@ -1,11 +1,11 @@ -- @Description Ensures that an update during a vacuum operation is ok -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,10) AS i; -INSERT 10 +INSERT 0 10 DELETE FROM ao WHERE a < 2; DELETE 1 @@ -17,16 +17,16 @@ BEGIN 9 (1 row) 4: INSERT INTO ao VALUES (1, 1); -INSERT 1 +INSERT 0 1 4>: UPDATE ao SET b=1 WHERE a > 5;UPDATE ao SET b=1 WHERE a > 6;COMMIT; 2: VACUUM ao; VACUUM 4<: <... completed> -UPDATE +COMMIT 3: SELECT COUNT(*) FROM ao WHERE b = 1; count ------- 6 (1 row) 3: INSERT INTO ao VALUES (0); -INSERT 1 +INSERT 0 1 diff --git a/src/test/isolation2/output/uao/selectupdate_while_vacuum.source b/src/test/isolation2/output/uao/selectupdate_while_vacuum.source index b5e37aedf2c..d37fb238999 100644 --- a/src/test/isolation2/output/uao/selectupdate_while_vacuum.source +++ b/src/test/isolation2/output/uao/selectupdate_while_vacuum.source @@ -1,41 +1,41 @@ -- @Description Ensures that an update during a vacuum operation is ok -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,1000) AS i; -INSERT 1000 +INSERT 0 1000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,1000) AS i; -INSERT 1000 +INSERT 0 1000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,1000) AS i; -INSERT 1000 +INSERT 0 1000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,1000) AS i; -INSERT 1000 +INSERT 0 1000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,1000) AS i; -INSERT 1000 +INSERT 0 1000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,1000) AS i; -INSERT 1000 +INSERT 0 1000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,1000) AS i; -INSERT 1000 +INSERT 0 1000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,1000) AS i; -INSERT 1000 +INSERT 0 1000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,1000) AS i; -INSERT 1000 +INSERT 0 1000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,1000) AS i; -INSERT 1000 +INSERT 0 1000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,1000) AS i; -INSERT 1000 +INSERT 0 1000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,1000) AS i; -INSERT 1000 +INSERT 0 1000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,1000) AS i; -INSERT 1000 +INSERT 0 1000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,1000) AS i; -INSERT 1000 +INSERT 0 1000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,1000) AS i; -INSERT 1000 +INSERT 0 1000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1,1000) AS i; -INSERT 1000 +INSERT 0 1000 DELETE FROM ao WHERE a < 128; DELETE 2032 @@ -62,7 +62,7 @@ BEGIN (1 row) 2>: VACUUM ao; 4: SELECT COUNT(*) FROM ao;SELECT COUNT(*) FROM ao;BEGIN;UPDATE ao SET b=1 WHERE a > 500;UPDATE ao SET b=1 WHERE a > 400;COMMIT; -SELECT +COMMIT 2<: <... completed> VACUUM 3: SELECT COUNT(*) FROM ao WHERE b = 1; @@ -71,4 +71,4 @@ VACUUM 9600 (1 row) 3: INSERT INTO ao VALUES (0); -INSERT 1 +INSERT 0 1 diff --git a/src/test/isolation2/output/uao/snapshot_index_corruption.source b/src/test/isolation2/output/uao/snapshot_index_corruption.source index 59b00e9f10c..8ce4c624520 100644 --- a/src/test/isolation2/output/uao/snapshot_index_corruption.source +++ b/src/test/isolation2/output/uao/snapshot_index_corruption.source @@ -2,17 +2,17 @@ -- -- Create AO table, insert few rows on it. drop table if exists test_ao; -DROP +DROP TABLE create table test_ao(i bigint) using @amname@ distributed by (i); -CREATE +CREATE TABLE insert into test_ao select generate_series(1,100); -INSERT 100 +INSERT 0 100 -- Test 1 -- Begin single-insert transaction. 1: begin; BEGIN 1: insert into test_ao values(101); -INSERT 1 +INSERT 0 1 -- Try to create index, it should hold on lock before commit below. 2&: create index test_ao_idx on test_ao(i); -- Commit single-insert transaction, so index continues creation. @@ -20,7 +20,7 @@ INSERT 1 COMMIT -- Force index usage and check row is here (false before fix). 2<: <... completed> -CREATE +CREATE INDEX 2: set optimizer=off; SET 2: set enable_seqscan=off; @@ -44,7 +44,7 @@ SET -- Test 2 -- Drop incomplete index 1: drop index test_ao_idx; -DROP +DROP INDEX -- Check row is here and start repeatable read transaction. 2: select i from test_ao where i = 100; i @@ -64,7 +64,7 @@ SET 1: update test_ao set i = 200 where i = 100; UPDATE 1 1: create index test_ao_idx on test_ao(i); -CREATE +CREATE INDEX -- For the repeatable read isolation level row still there. 2: explain (costs off) select i from test_ao where i = 100; QUERY PLAN diff --git a/src/test/isolation2/output/uao/test_pg_appendonly_version.source b/src/test/isolation2/output/uao/test_pg_appendonly_version.source index 2bcf4f25353..6cbfdbc49bc 100644 --- a/src/test/isolation2/output/uao/test_pg_appendonly_version.source +++ b/src/test/isolation2/output/uao/test_pg_appendonly_version.source @@ -2,7 +2,7 @@ -- as it requires pg_appendonly.version >= AORelationVersion_CB2. create table @amname@_version_tbl (a int) using @amname@; -CREATE +CREATE TABLE -- unique index on AO is supported starting from version 2 (AORelationVersion_CB2) select version from pg_appendonly where relid = '@amname@_version_tbl'::regclass; @@ -11,11 +11,11 @@ select version from pg_appendonly where relid = '@amname@_version_tbl'::regclass 2 (1 row) create unique index on @amname@_version_tbl(a); -CREATE +CREATE INDEX insert into @amname@_version_tbl select generate_series(1, 10); -INSERT 10 +INSERT 0 10 create unique index on @amname@_version_tbl(a); -CREATE +CREATE INDEX set enable_seqscan = off; SET select * from @amname@_version_tbl where a = 2; @@ -43,14 +43,14 @@ HINT: ALTER TABLE SET WITH (REORGANIZE = true) before creating the -- alter table with reorganize to verify pg_appendonly being rewritten alter table @amname@_version_tbl set with (reorganize = true); -ALTER +ALTER TABLE select version from pg_appendonly where relid = '@amname@_version_tbl'::regclass; version --------- 2 (1 row) create unique index on @amname@_version_tbl(a); -CREATE +CREATE INDEX select * from @amname@_version_tbl where a = 3; a --- @@ -58,6 +58,6 @@ select * from @amname@_version_tbl where a = 3; (1 row) drop table @amname@_version_tbl; -DROP +DROP TABLE reset allow_system_table_mods; RESET diff --git a/src/test/isolation2/output/uao/update_while_vacuum.source b/src/test/isolation2/output/uao/update_while_vacuum.source index 881f19c8a20..fd6979fa8fc 100644 --- a/src/test/isolation2/output/uao/update_while_vacuum.source +++ b/src/test/isolation2/output/uao/update_while_vacuum.source @@ -1,11 +1,11 @@ -- @Description Ensures that an update before a vacuum operation is ok -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1, 100) AS i; -INSERT 100 +INSERT 0 100 DELETE FROM ao WHERE a < 12; DELETE 11 @@ -20,11 +20,11 @@ BEGIN 2: VACUUM ao; VACUUM 1<: <... completed> -UPDATE +COMMIT 1: SELECT COUNT(*) FROM ao; count ------- 89 (1 row) 3: INSERT INTO ao VALUES (0); -INSERT 1 +INSERT 0 1 diff --git a/src/test/isolation2/output/uao/vacuum_cleanup.source b/src/test/isolation2/output/uao/vacuum_cleanup.source index b90c82bfd68..a81d731c8ed 100644 --- a/src/test/isolation2/output/uao/vacuum_cleanup.source +++ b/src/test/isolation2/output/uao/vacuum_cleanup.source @@ -3,10 +3,10 @@ -- The age of the table is 1 after the following statement 2: create table ao_@amname@_vacuum_cleanup2(a int, b int) using @amname@; -CREATE +CREATE TABLE -- The age of the table is 2 after the following statement 2: insert into ao_@amname@_vacuum_cleanup2 select i, i from generate_series(1, 100) i; -INSERT 100 +INSERT 0 100 -- The age of the table is 7 after the following 5 statements 2: update ao_@amname@_vacuum_cleanup2 set b = b + 1; UPDATE 100 @@ -50,13 +50,13 @@ VACUUM 1 | pg_aovisimap_ (2 rows) 2: end; -END +COMMIT -- Check that drop phase is not skipped, when there are concurrent serializable transactions. 1: create table ao_@amname@_vacuum_cleanup3(a int, b int) using @amname@; -CREATE +CREATE TABLE 1: insert into ao_@amname@_vacuum_cleanup3 select i, i from generate_series(1, 100) i; -INSERT 100 +INSERT 0 100 1: delete from ao_@amname@_vacuum_cleanup3; DELETE 100 @@ -136,14 +136,14 @@ COMMIT create or replace function show_aoseg(tabname text) returns table (segno int, tupcount bigint, modcount bigint, formatversion smallint, state smallint) as $$ declare tabrelid oid; /* in func */ tabsegrelid oid; /* in func */ tabsegrelname text; /* in func */ begin select tabname::regclass::oid into tabrelid; /* in func */ select segrelid from pg_appendonly where relid = tabrelid into tabsegrelid; /* in func */ select relname from pg_class where oid = tabsegrelid into tabsegrelname; /* in func */ return query execute 'select segno,tupcount,modcount,formatversion,state from pg_aoseg.' || tabsegrelname; /* in func */ end; /* in func */ $$ language plpgsql; -CREATE +CREATE FUNCTION create table vacuum_concurrent_test_@amname@ (a int, b int, c int) using @amname@; -CREATE +CREATE TABLE insert into vacuum_concurrent_test_@amname@ select 2, b, b from generate_series(1, 11) b; -INSERT 11 +INSERT 0 11 create index i_b_vacuum_concurrent_reader_@amname@ on vacuum_concurrent_test_@amname@(b); -CREATE +CREATE INDEX update vacuum_concurrent_test_@amname@ set b = b + 1; UPDATE 11 @@ -195,7 +195,7 @@ VACUUM 2 | 12 | 11 (11 rows) 1: end; -END +COMMIT -- start another reader after VACUUM 2: begin; @@ -246,7 +246,7 @@ VACUUM 2 | 12 | 11 (11 rows) 2: end; -END +COMMIT 1q: ... 2q: ... @@ -257,16 +257,16 @@ END -- the corresponding index entries should not be deleted. truncate table vacuum_concurrent_test_@amname@; -TRUNCATE +TRUNCATE TABLE insert into vacuum_concurrent_test_@amname@ select 2, b, b from generate_series(1, 5) b; -INSERT 5 +INSERT 0 5 delete from vacuum_concurrent_test_@amname@; DELETE 5 1: begin; BEGIN 1: insert into vacuum_concurrent_test_@amname@ select 2, b, b from generate_series(6, 10) b; -INSERT 5 +INSERT 0 5 2: vacuum vacuum_concurrent_test_@amname@; VACUUM @@ -457,6 +457,6 @@ reset gp_select_invisible; RESET drop table vacuum_concurrent_test_@amname@; -DROP +DROP TABLE drop function show_aoseg; -DROP +DROP FUNCTION diff --git a/src/test/isolation2/output/uao/vacuum_index_stats.source b/src/test/isolation2/output/uao/vacuum_index_stats.source index ac47b47913f..b73960fad8e 100644 --- a/src/test/isolation2/output/uao/vacuum_index_stats.source +++ b/src/test/isolation2/output/uao/vacuum_index_stats.source @@ -12,11 +12,11 @@ -- The new strategy would not impact table->reltuples updates. create table vacuum_index_stats_@amname@ (a int, b int, c int) using @amname@; -CREATE +CREATE TABLE insert into vacuum_index_stats_@amname@ select 2, b, b from generate_series(1, 11) b; -INSERT 11 +INSERT 0 11 create index i_b_vacuum_index_stats_@amname@ on vacuum_index_stats_@amname@(b); -CREATE +CREATE INDEX set gp_appendonly_compaction_threshold = 10; SET @@ -102,6 +102,6 @@ VACUUM 0Uq: ... drop table vacuum_index_stats_@amname@; -DROP +DROP TABLE reset gp_appendonly_compaction_threshold; RESET diff --git a/src/test/isolation2/output/uao/vacuum_self_function.source b/src/test/isolation2/output/uao/vacuum_self_function.source index dd0b87b5796..978de51501b 100644 --- a/src/test/isolation2/output/uao/vacuum_self_function.source +++ b/src/test/isolation2/output/uao/vacuum_self_function.source @@ -2,17 +2,17 @@ -- that was acquired before vacuum. -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1, 100) AS i; -INSERT 100 +INSERT 0 100 DELETE FROM ao WHERE a <= 30; DELETE 30 create or replace function myfunc() returns bigint as $$ begin /* inside a function */ perform pg_sleep(10); /* inside a function */ return (select count(*) from ao); /* inside a function */ end; /* inside a function */ $$ stable language plpgsql; -CREATE +CREATE FUNCTION -- Launch function into the background. 1&: select myfunc(); diff --git a/src/test/isolation2/output/uao/vacuum_self_serializable.source b/src/test/isolation2/output/uao/vacuum_self_serializable.source index fd70b62d4a1..87c8956a6f7 100644 --- a/src/test/isolation2/output/uao/vacuum_self_serializable.source +++ b/src/test/isolation2/output/uao/vacuum_self_serializable.source @@ -1,11 +1,11 @@ -- @Description Ensures that a vacuum with serializable works ok -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1, 100) AS i; -INSERT 100 +INSERT 0 100 DELETE FROM ao WHERE a <= 30; DELETE 30 diff --git a/src/test/isolation2/output/uao/vacuum_self_serializable2.source b/src/test/isolation2/output/uao/vacuum_self_serializable2.source index 6e78b5344a3..d9c35bde734 100644 --- a/src/test/isolation2/output/uao/vacuum_self_serializable2.source +++ b/src/test/isolation2/output/uao/vacuum_self_serializable2.source @@ -1,15 +1,15 @@ -- @Description Ensures that a vacuum with serializable works ok -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE DROP TABLE IF EXISTS ao2; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE CREATE TABLE ao2 (a INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1, 100) AS i; -INSERT 100 +INSERT 0 100 DELETE FROM ao WHERE a <= 30; DELETE 30 diff --git a/src/test/isolation2/output/uao/vacuum_self_serializable3.source b/src/test/isolation2/output/uao/vacuum_self_serializable3.source index 0ded4a3ed9b..0a44a62ce29 100644 --- a/src/test/isolation2/output/uao/vacuum_self_serializable3.source +++ b/src/test/isolation2/output/uao/vacuum_self_serializable3.source @@ -1,15 +1,15 @@ -- @Description Ensures that a vacuum with serializable works ok -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE DROP TABLE IF EXISTS ao2; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE CREATE TABLE ao2 (a INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1, 100) AS i; -INSERT 100 +INSERT 0 100 DELETE FROM ao WHERE a <= 30; DELETE 30 diff --git a/src/test/isolation2/output/uao/vacuum_while_insert.source b/src/test/isolation2/output/uao/vacuum_while_insert.source index ab89f7c2007..a876117ba54 100644 --- a/src/test/isolation2/output/uao/vacuum_while_insert.source +++ b/src/test/isolation2/output/uao/vacuum_while_insert.source @@ -1,51 +1,51 @@ -- @Description Ensures that a vacuum during insert operations is ok -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT) USING @amname@; -CREATE +CREATE TABLE insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 insert into ao select generate_series(1,1000); -INSERT 1000 +INSERT 0 1000 DELETE FROM ao WHERE a < 128; DELETE 2667 @@ -58,13 +58,13 @@ BEGIN 2: VACUUM ao; VACUUM 1<: <... completed> -INSERT +COMMIT 4<: <... completed> -INSERT +COMMIT 3: SELECT COUNT(*) FROM ao WHERE a = 1500; count ------- 40 (1 row) 3: INSERT INTO ao VALUES (0); -INSERT 1 +INSERT 0 1 diff --git a/src/test/isolation2/output/uao/vacuum_while_vacuum.source b/src/test/isolation2/output/uao/vacuum_while_vacuum.source index 4e7578ab986..492d19f7438 100644 --- a/src/test/isolation2/output/uao/vacuum_while_vacuum.source +++ b/src/test/isolation2/output/uao/vacuum_while_vacuum.source @@ -1,21 +1,21 @@ -- @Description Ensures that an vacuum while a vacuum operation is ok -- DROP TABLE IF EXISTS ao; -DROP +DROP TABLE CREATE TABLE ao (a INT, b INT) USING @amname@; -CREATE +CREATE TABLE INSERT INTO ao SELECT i as a, i as b FROM generate_series(1, 10000) AS i; -INSERT 10000 +INSERT 0 10000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1, 10000) AS i; -INSERT 10000 +INSERT 0 10000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1, 10000) AS i; -INSERT 10000 +INSERT 0 10000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1, 10000) AS i; -INSERT 10000 +INSERT 0 10000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1, 10000) AS i; -INSERT 10000 +INSERT 0 10000 INSERT INTO ao SELECT i as a, i as b FROM generate_series(1, 10000) AS i; -INSERT 10000 +INSERT 0 10000 DELETE FROM ao WHERE a < 1200; DELETE 7194 @@ -35,4 +35,4 @@ VACUUM 52806 (1 row) 3: INSERT INTO ao VALUES (0); -INSERT 1 +INSERT 0 1 diff --git a/src/test/isolation2/output/workfile_mgr_test.source b/src/test/isolation2/output/workfile_mgr_test.source index ecf5438ca72..341ed2cbd7e 100644 --- a/src/test/isolation2/output/workfile_mgr_test.source +++ b/src/test/isolation2/output/workfile_mgr_test.source @@ -1,27 +1,27 @@ CREATE OR REPLACE FUNCTION gp_workfile_mgr_test_on_master(testname text, numfiles int) RETURNS setof bool LANGUAGE C VOLATILE EXECUTE ON COORDINATOR AS '@abs_builddir@/isolation2_regress@DLSUFFIX@', 'gp_workfile_mgr_test_harness'; -CREATE +CREATE FUNCTION CREATE OR REPLACE FUNCTION gp_workfile_mgr_test_on_segments(testname text, numfiles int) RETURNS setof bool LANGUAGE C VOLATILE EXECUTE ON ALL SEGMENTS AS '@abs_builddir@/isolation2_regress@DLSUFFIX@', 'gp_workfile_mgr_test_harness'; -CREATE +CREATE FUNCTION CREATE FUNCTION gp_workfile_mgr_test(testname text, numfiles int) RETURNS SETOF BOOL AS $$ SELECT C.* FROM gp_workfile_mgr_test_on_master($1, $2) as C UNION ALL SELECT C.* FROM gp_workfile_mgr_test_on_segments($1, $2) as C $$ LANGUAGE SQL; -CREATE +CREATE FUNCTION CREATE OR REPLACE FUNCTION gp_workfile_mgr_create_workset(worksetname text, interXact bool, holdPin bool, closeFile bool) RETURNS setof void LANGUAGE C VOLATILE EXECUTE ON ALL SEGMENTS AS '@abs_builddir@/isolation2_regress@DLSUFFIX@', 'gp_workfile_mgr_create_workset'; -CREATE +CREATE FUNCTION CREATE OR REPLACE FUNCTION gp_workfile_mgr_create_empty_workset(worksetname text) RETURNS setof void LANGUAGE C VOLATILE EXECUTE ON ALL SEGMENTS AS '@abs_builddir@/isolation2_regress@DLSUFFIX@', 'gp_workfile_mgr_create_workset'; -CREATE +CREATE FUNCTION CREATE FUNCTION gp_workfile_mgr_cache_entries() RETURNS TABLE(segid int4, prefix text, size int8, operation text, slice int4, sessionid int4, commandid int4, numfiles int4) AS '$libdir/gp_workfile_mgr', 'gp_workfile_mgr_cache_entries' LANGUAGE C VOLATILE EXECUTE ON ALL SEGMENTS; -CREATE +CREATE FUNCTION -- Wait for at the most 1 min for backends to remove transient -- workfile sets as part of exit processing and then report long lived -- workfile sets. create or replace function report_workfile_entries() returns table(segid int4, prefix text, size int8, operation text, slice int4, numfiles int4) as $$ declare iterations int; /* in func */ cnt int; /* in func */ begin iterations := 120; /* wait at the most 1 min */ select count(*) into cnt from gp_workfile_mgr_cache_entries() w where w.prefix not like 'long_live_workset%'; /* in func */ while (iterations > 0) and (cnt > 0) loop select count(*) into cnt from gp_workfile_mgr_cache_entries() w where w.prefix not like 'long_live_workset%'; /* in func */ perform pg_sleep(0.5); /* sleep for half a second */ iterations := iterations - 1; /* in func */ end loop; /* in func */ return query select w.segid, w.prefix, w.size, w.operation, w.slice, w.numfiles from gp_workfile_mgr_cache_entries() w; /* in func */ end; /* in func */ $$ language plpgsql volatile execute on all segments; -CREATE +CREATE FUNCTION -- start_ignore !\retcode gpconfig -c gp_workfile_max_entries -v 32 --skipvalidation; @@ -67,9 +67,9 @@ CREATE ! mkdir -p '@testtablespace@/workfile_mgr'; 1: DROP TABLESPACE IF EXISTS work_file_test_ts; -DROP +DROP TABLESPACE 1: CREATE TABLESPACE work_file_test_ts LOCATION '@testtablespace@/workfile_mgr'; -CREATE +CREATE TABLESPACE 1: select gp_workfile_mgr_test('atomic_test', 0); gp_workfile_mgr_test @@ -123,7 +123,7 @@ CONTEXT: SQL function "gp_workfile_mgr_test" statement 1 (4 rows) 1: DROP TABLESPACE work_file_test_ts; -DROP +DROP TABLESPACE -- start_ignore !\retcode gpconfig -r gp_workfile_max_entries --skipvalidation; @@ -232,7 +232,7 @@ BEGIN 2 | long_live_workset_1 | 0 | long_live_workset | 1 | 0 (12 rows) 4: end; -END +COMMIT 4: select segid, prefix, size, operation, slice, numfiles from gp_workfile_mgr_cache_entries() order by (segid, prefix); segid | prefix | size | operation | slice | numfiles -------+----------------------+------+--------------------+-------+---------- @@ -275,7 +275,7 @@ BEGIN 2 | long_live_workset_1 | 0 | long_live_workset | 1 | 0 (9 rows) 4: abort; -ABORT +ROLLBACK 4: select segid, prefix, size, operation, slice, numfiles from gp_workfile_mgr_cache_entries() order by (segid, prefix); segid | prefix | size | operation | slice | numfiles -------+----------------------+------+--------------------+-------+---------- diff --git a/src/test/isolation2/sql_isolation_testcase.py b/src/test/isolation2/sql_isolation_testcase.py index 59e8212adec..5b188743120 100644 --- a/src/test/isolation2/sql_isolation_testcase.py +++ b/src/test/isolation2/sql_isolation_testcase.py @@ -15,7 +15,6 @@ limitations under the License. """ -import pg import pty import os import subprocess @@ -28,7 +27,18 @@ from optparse import OptionParser import traceback import select -import shutil +import psycopg2 +import io + +## FIXME: When converting 'INTERVAL' typed value to Python Object, psycopg2 doesn't +## recognize the literal string '@ 0'. It's probably caused by the 'DateStyle' GUC +## setting. It would be good to fix it in future. +def cast_interval(value, cur): + if value is None: + return None + return str(value) +INTERVAL = psycopg2.extensions.new_type((1186,), "INTERVAL", cast_interval) +psycopg2.extensions.register_type(INTERVAL) def is_digit(n): try: @@ -37,27 +47,22 @@ def is_digit(n): except ValueError: return False -def null_notice_receiver(notice): - ''' - Tests ignore notice messages when analyzing results, - so silently drop notices from the pg.connection - ''' - return - - class ConnectionInfo(object): __instance = None def __init__(self): self.max_content_id = 0 + self._conn_map = [] if ConnectionInfo.__instance is not None: raise Exception("ConnectionInfo is a singleton.") - query = ("SELECT content, hostname, port, role FROM gp_segment_configuration") - - con = pg.connect(dbname="postgres") - self._conn_map = con.query(query).getresult() - con.close() + with psycopg2.connect(dbname="postgres") as conn: + # Don't start transaction automatically. + conn.set_session(autocommit=True) + with conn.cursor() as cur: + cur.execute("SELECT content, hostname, port, role FROM gp_segment_configuration") + for row in cur: + self._conn_map.append(row) ConnectionInfo.__instance = self for content, _, _, _ in ConnectionInfo.__instance._conn_map: @@ -401,19 +406,12 @@ def connectdb(self, given_dbname, given_host = None, given_port = None, given_op retry = 1000 while retry: try: - if (given_port is None): - con = pg.connect(host= given_host, - opt= given_opt, - dbname= given_dbname, - user = given_user, - passwd = given_passwd) - else: - con = pg.connect(host= given_host, - port= given_port, - opt= given_opt, - dbname= given_dbname, - user = given_user, - passwd = given_passwd) + con = psycopg2.connect(host=given_host, + port=given_port, + options=given_opt, + dbname=given_dbname, + user=given_user, + password=given_passwd) break except Exception as e: if self.mode == "retrieve" and ("auth token is invalid" in str(e) or "Authentication failure" in str(e) or "does not exist" in str(e)): @@ -428,7 +426,8 @@ def connectdb(self, given_dbname, given_host = None, given_port = None, given_op else: raise if con is not None: - con.set_notice_receiver(null_notice_receiver) + # Don't start transaction automatically. + con.set_session(autocommit=True) return con def get_hostname_port(self, contentid, role): @@ -436,38 +435,35 @@ def get_hostname_port(self, contentid, role): Gets the port number/hostname combination of the contentid and role """ - query = ("SELECT hostname, port FROM gp_segment_configuration WHERE" - " content = %s AND role = '%s'") % (contentid, role) con = self.connectdb(self.dbname, given_opt="-c gp_role=utility") - r = con.query(query).getresult() - con.close() - if len(r) == 0: - raise Exception("Invalid content %s" % contentid) - if r[0][0] == socket.gethostname(): - return (None, int(r[0][1])) - return (r[0][0], int(r[0][1])) - - def printout_result(self, r): + with con.cursor() as cur: + cur.execute("SELECT hostname, port FROM gp_segment_configuration WHERE" + " content = %s AND role = %s", (contentid, role)) + r = cur.fetchall() + con.close() + if len(r) == 0 or len(r[0]) != 2: + raise Exception("Invalid content %s" % contentid) + if r[0][0] == socket.gethostname(): + return (None, int(r[0][1])) + return (r[0][0], int(r[0][1])) + + def printout_result(self, description, rows): """ - Print out a pygresql result set (a Query object, after the query + Print out a psycopg2 result set (a Query object, after the query has been executed), in a format that imitates the default formatting of psql. This isn't a perfect imitation: we left-justify all the fields and headers, whereas psql centers the header, and right-justifies numeric fields. But this is close enough, to make - gpdiff.pl recognize the result sets as such. (We used to just call - str(r), and let PyGreSQL do the formatting. But even though - PyGreSQL's default formatting is close to psql's, it's not close - enough.) + gpdiff.pl recognize the result sets as such. """ widths = [] + result = "" # Figure out the widths of each column. - fields = r.listfields() - for f in fields: - widths.append(len(str(f))) + for f in description: + widths.append(len(f.name)) - rset = r.getresult() - for row in rset: + for row in rows: colno = 0 for col in row: if col is None: @@ -476,26 +472,25 @@ def printout_result(self, r): colno = colno + 1 # Start printing. Header first. - result = "" colno = 0 - for f in fields: + for f in description: if colno > 0: result += "|" - result += " " + f.ljust(widths[colno]) + " " + result += " " + f.name.ljust(widths[colno]) + " " colno = colno + 1 result += "\n" # Then the bar ("----+----") colno = 0 - for f in fields: + for f in description: if colno > 0: result += "+" result += "".ljust(widths[colno] + 2, "-") - colno = colno + 1 + colno += 1 result += "\n" # Then the result set itself - for row in rset: + for row in rows: colno = 0 for col in row: if colno > 0: @@ -514,10 +509,10 @@ def printout_result(self, r): result += "\n" # Finally, the row count - if len(rset) == 1: + if len(rows) == 1: result += "(1 row)\n" else: - result += "(" + str(len(rset)) + " rows)\n" + result += "(" + str(len(rows)) + " rows)\n" return result @@ -526,20 +521,26 @@ def execute_command(self, command): Executes a given command """ try: - r = self.con.query(command) - if r is not None: - if type(r) == str: - # INSERT, UPDATE, etc that returns row count but not result set - echo_content = command[:-1].partition(" ")[0].upper() - return "%s %s" % (echo_content, r) + with self.con.cursor() as cur: + ## FIXME: Currently, psycopg2's cursor doesn't support executing 'COPY TO' + ## commands. We use the copy_expert() API to work around. + ## Issue: https://github.com/psycopg/psycopg2/issues/444 + if command.lower().startswith('copy'): + cur.copy_expert(command, io.StringIO()) else: - # SELECT or similar, print the result set without the command (type pg.Query) - return self.printout_result(r) - else: - # CREATE or other DDL without a result set or count - echo_content = command[:-1].partition(" ")[0].upper() - return echo_content - except Exception as e: + cur.execute(command) + if cur.description is not None: + return self.printout_result(cur.description, cur.fetchall()) + elif cur.statusmessage: + return str(cur.statusmessage) + return "" + except psycopg2.Error as e: + ## Normally, the exception is raised by the server and we can fetch the + ## error message via e.pgerror. However, there's some situation where the + ## exception is not raised by the server, e.g., ProgrammingError, we should + ## print out the error message directly to help debugging. + if e.pgerror: + return str(e.pgerror) return str(e) def do(self): @@ -601,11 +602,13 @@ def get_all_primary_contentids(self, dbname): if not dbname: dbname = self.dbname - con = pg.connect(dbname=dbname) - result = con.query("SELECT content FROM gp_segment_configuration WHERE role = 'p' order by content").getresult() - if len(result) == 0: - raise Exception("Invalid gp_segment_configuration contents") - return [int(content[0]) for content in result] + with psycopg2.connect(dbname=dbname) as con: + with con.cursor() as cur: + cur.execute("SELECT content FROM gp_segment_configuration WHERE role = 'p' order by content") + result = cur.fetchall() + if len(result) == 0: + raise Exception("Invalid gp_segment_configuration contents") + return [int(content[0]) for content in result] def __preprocess_sql(self, name, pre_run_cmd, sql, global_sh_executor): if not pre_run_cmd: @@ -915,13 +918,6 @@ class SQLIsolationTestCase: 2U: Executes a utility command connected to port 40000. - One difference to SQLTestCase is the output of INSERT. - SQLTestCase would output "INSERT 0 1" if one tuple is inserted. - SQLIsolationTestCase would output "INSERT 1". As the - SQLIsolationTestCase needs to have a more fine-grained control - over the execution order than possible with PSQL, it uses - the pygresql python library instead. - Connecting to a specific database: 1. If you specify a db_name metadata in the sql file, connect to that database in all open sessions. 2. If you want a specific session to be connected to a specific database , specify the sql as follows: diff --git a/src/test/singlenode_isolation2/sql/prevent_ao_wal.sql b/src/test/singlenode_isolation2/sql/prevent_ao_wal.sql index cfee9b0dc43..ea90eb81495 100644 --- a/src/test/singlenode_isolation2/sql/prevent_ao_wal.sql +++ b/src/test/singlenode_isolation2/sql/prevent_ao_wal.sql @@ -32,14 +32,18 @@ -- Create tables (AO, AOCO) -1U: CREATE TABLE ao_foo (n int) WITH (appendonly=true); +CREATE TABLE -1U: CREATE TABLE aoco_foo (n int, m int) WITH (appendonly=true, orientation=column); +CREATE TABLE -- Switch WAL file -1U: SELECT true FROM pg_switch_wal(); -- Insert data (AO) -1U: INSERT INTO ao_foo SELECT generate_series(1,10); +INSERT 0 10 -- Insert data (AOCO) -1U: INSERT INTO aoco_foo SELECT generate_series(1,10), generate_series(1,10); +INSERT 0 10 -- Delete data and run vacuum (AO) -1U: DELETE FROM ao_foo WHERE n > 5; -1U: VACUUM; @@ -62,8 +66,10 @@ -1U: SELECT true FROM pg_switch_wal(); -- Insert data (AO) -1U: INSERT INTO ao_foo SELECT generate_series(1,10); +INSERT 0 10 -- Insert data (AOCO) -1U: INSERT INTO aoco_foo SELECT generate_series(1,10), generate_series(1,10); +INSERT 0 10 -- Delete data and run vacuum (AO) -1U: DELETE FROM ao_foo WHERE n > 5; -1U: VACUUM; @@ -74,8 +80,10 @@ -- Validate wal records ! last_wal_file=$(psql -At -c "SELECT pg_walfile_name(pg_current_wal_lsn())" postgres) && pg_waldump ${last_wal_file} -p ${COORDINATOR_DATA_DIRECTORY}/pg_wal -r appendonly; --1U: DROP TABLE ao_foo; +-1U: DROP TABLE ao_foo; +DROP TABLE -1U: DROP TABLE aoco_foo; +DROP TABLE -- Reset wal_level !\retcode gpconfig -r wal_level --masteronly;