diff --git a/configure b/configure
index e612c658399..94a195b6d40 100755
--- a/configure
+++ b/configure
@@ -742,7 +742,6 @@ with_ldap
with_krb_srvnam
krb_srvtab
with_gssapi
-with_pythonsrc_ext
with_python
with_perl
with_tcl
@@ -909,7 +908,6 @@ with_tcl
with_tclconfig
with_perl
with_python
-with_pythonsrc_ext
with_gssapi
with_krb_srvnam
with_pam
@@ -1647,7 +1645,6 @@ Optional Packages:
--with-tclconfig=DIR tclConfig.sh is in DIR
--with-perl build Perl modules (PL/Perl)
--without-python build Python modules (PL/Python)
- --with-pythonsrc-ext build Python modules for gpMgmt
--with-gssapi build with GSSAPI support
--with-krb-srvnam=NAME default service principal name in Kerberos (GSSAPI)
[postgres]
@@ -9456,39 +9453,6 @@ fi
$as_echo "$with_python" >&6; }
-#
-# Optionally build Python modules for gpMgmt
-#
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build Python modules for gpMgmt" >&5
-$as_echo_n "checking whether to build Python modules for gpMgmt... " >&6; }
-
-
-
-# Check whether --with-pythonsrc-ext was given.
-if test "${with_pythonsrc_ext+set}" = set; then :
- withval=$with_pythonsrc_ext;
- case $withval in
- yes)
- :
- ;;
- no)
- :
- ;;
- *)
- as_fn_error $? "no argument expected for --with-pythonsrc-ext option" "$LINENO" 5
- ;;
- esac
-
-else
- with_pythonsrc_ext=no
-
-fi
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_pythonsrc_ext" >&5
-$as_echo "$with_pythonsrc_ext" >&6; }
-
-
#
# GSSAPI
diff --git a/configure.ac b/configure.ac
index a1e3d9709db..ca68dfa89e9 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1021,14 +1021,6 @@ PGAC_ARG_BOOL(with, python, yes, [build Python modules (PL/Python)])
AC_MSG_RESULT([$with_python])
AC_SUBST(with_python)
-#
-# Optionally build Python modules for gpMgmt
-#
-AC_MSG_CHECKING([whether to build Python modules for gpMgmt])
-PGAC_ARG_BOOL(with, pythonsrc-ext, no, [build Python modules for gpMgmt])
-AC_MSG_RESULT([$with_pythonsrc_ext])
-AC_SUBST(with_pythonsrc_ext)
-
#
# GSSAPI
diff --git a/deploy/build/README.Rhel-Rocky.bash b/deploy/build/README.Rhel-Rocky.bash
index 8fdf57c781e..93aa1e68965 100755
--- a/deploy/build/README.Rhel-Rocky.bash
+++ b/deploy/build/README.Rhel-Rocky.bash
@@ -10,6 +10,7 @@ sudo yum --enablerepo=powertools install -y libyaml-devel
sudo yum install -y postgresql
sudo yum install -y postgresql-devel
+sudo yum install -y python3-psycopg2
pip3.9 install -r ../../python-dependencies.txt
diff --git a/deploy/build/README.Ubuntu.bash b/deploy/build/README.Ubuntu.bash
index ec9ca3ee752..7a0086ada44 100755
--- a/deploy/build/README.Ubuntu.bash
+++ b/deploy/build/README.Ubuntu.bash
@@ -36,7 +36,6 @@ sudo apt-get install -y \
python3-dev \
python3-pip \
python3-psutil \
- python3-pygresql \
python3-yaml \
zlib1g-dev
diff --git a/gpAux/Makefile b/gpAux/Makefile
index 6502cffd0c4..0a63fd58fa6 100644
--- a/gpAux/Makefile
+++ b/gpAux/Makefile
@@ -142,7 +142,7 @@ endif
ifneq (false, ${PG_LANG})
-CONFIGFLAGS+= --with-perl --with-python PYTHON=python3
+CONFIGFLAGS+= --with-perl --with-python PYTHON=python3.9
ifdef TCL_CFG
CONFIGFLAGS+= --with-tcl-config=${TCL_CFG}
endif
@@ -218,8 +218,8 @@ perl_archlibexp:=$(shell perl -MConfig -e 'print $$Config{archlibexp}')
# set default build steps
define BUILD_STEPS
@$(MAKE) -C $(BUILDDIR)/src/backend generated-headers
- cd $(BUILDDIR) && PYGRESQL_LDFLAGS=' -Wl,-rpath,\$$$$ORIGIN/.. -Wl,--enable-new-dtags ' $(MAKE) $(PARALLEL_MAKE_OPTS) install
- cd $(BUILDDIR)/src/pl/plpython && $(MAKE) clean && $(MAKE) $(PARALLEL_MAKE_OPTS) install && cd $(BUILDDIR)
+ cd $(BUILDDIR) && $(MAKE) $(PARALLEL_MAKE_OPTS) install
+ cd $(BUILDDIR)/src/pl/plpython && $(MAKE) clean && echo 'LDFLAGS += -Wl,-rpath,\$$$$ORIGIN/../../ext/python/lib/ -Wl,--enable-new-dtags' >> Makefile && $(MAKE) $(PARALLEL_MAKE_OPTS) install && cd $(BUILDDIR)
cd $(BUILDDIR)/src/pl/plperl && $(MAKE) clean && echo "LDFLAGS += -Wl,-rpath,$(perl_archlibexp)/CORE -Wl,--enable-new-dtags" >> GNUmakefile && echo "LDFLAGS_SL += -Wl,-rpath,$(perl_archlibexp)/CORE -Wl,--enable-new-dtags" >> GNUmakefile && $(MAKE) $(PARALLEL_MAKE_OPTS) install && cd $(BUILDDIR)
#@$(MAKE) greenplum_path INSTLOC=$(INSTLOC)
#@$(MAKE) mgmtcopy INSTLOC=$(INSTLOC)
@@ -254,7 +254,6 @@ define BUILD_STEPS
cp -p $(GPMGMT)/bin/gpload $(INSTLOC)/bin/gpload
cp -p $(GPMGMT)/bin/gpload.py $(INSTLOC)/bin/gpload.py
$(MAKE) copylibs INSTLOC=$(INSTLOC)
- cd $(GPMGMT)/bin && $(MAKE) pygresql INSTLOC=$(INSTLOC)
$(MAKE) clients INSTLOC=$(INSTLOC) CLIENTSINSTLOC=$(CLIENTSINSTLOC)
endef
endif
@@ -348,7 +347,6 @@ CLIENTS_HOME_DIR=$(BLD_HOME)
endif
CLIENTSINSTLOC=$(CLIENTS_HOME_DIR)/cloudberry-clients
CLIENTSINSTLOC_BIN=$(CLIENTSINSTLOC)/bin
-CLIENTSINSTLOC_BINEXT=$(CLIENTSINSTLOC)/bin/ext/
CLIENTSINSTLOC_EXT=$(CLIENTSINSTLOC)/ext
CLIENTSINSTLOC_LIB=$(CLIENTSINSTLOC)/lib
CLIENTSINSTLOC_LIB_PWARE=$(CLIENTSINSTLOC)/lib/pware
@@ -372,15 +370,6 @@ define tmpCLIENTS_FILESET_BIN
endef
CLIENTS_FILESET_BIN = $(strip $(tmpCLIENTS_FILESET_BIN))
-# pg.py, pgdb.py, _pg.so are from pygresql which does not install into a single module
-define tmpCLIENTS_FILESET_BINEXT
- pg.py
- pgdb.py
- _pg*.so
- yaml
-endef
-CLIENTS_FILESET_BINEXT = $(strip $(tmpCLIENTS_FILESET_BINEXT))
-
BLD_PYTHON_FILESET=.
BLD_OS:=$(shell uname -s)
@@ -418,8 +407,6 @@ else
# ---- copy GPDB fileset ----
mkdir -p $(CLIENTSINSTLOC_BIN)
(cd $(INSTLOC)/bin/ && $(TAR) cf - $(CLIENTS_FILESET_BIN)) | (cd $(CLIENTSINSTLOC_BIN)/ && $(TAR) xpf -)$(check_pipe_for_errors)
- mkdir -p $(CLIENTSINSTLOC_BINEXT)
- (cd $(GPMGMT)/bin/ext/ && $(TAR) cf - $(CLIENTS_FILESET_BINEXT)) | (cd $(CLIENTSINSTLOC_BINEXT)/ && $(TAR) xpf -)$(check_pipe_for_errors)
ifneq "$(PYTHONHOME)" ""
mkdir -p $(CLIENTSINSTLOC_EXT)/python
(cd $(PYTHONHOME) && $(TAR) cf - $(BLD_PYTHON_FILESET)) | (cd $(CLIENTSINSTLOC_EXT)/python/ && $(TAR) xpf -)$(check_pipe_for_errors)
diff --git a/gpAux/client/install/src/windows/CreatePackage.bat b/gpAux/client/install/src/windows/CreatePackage.bat
index b6a782bebda..6eb46cd4ebe 100644
--- a/gpAux/client/install/src/windows/CreatePackage.bat
+++ b/gpAux/client/install/src/windows/CreatePackage.bat
@@ -11,8 +11,6 @@ type nul > %GPDB_INSTALL_PATH%\bin\gppylib\__init__.py
copy ..\..\..\..\..\gpMgmt\bin\gppylib\gpversion.py %GPDB_INSTALL_PATH%\bin\gppylib\
perl -pi.bak -e "s,\$Revision\$,%VERSION%," %GPDB_INSTALL_PATH%\bin\gpload.py
copy ..\..\..\..\..\gpMgmt\bin\gpload.bat %GPDB_INSTALL_PATH%\bin
-for %%f in (..\..\..\..\..\gpMgmt\bin\pythonSrc\ext\PyYAML-*.tar.gz) do tar -xf %%f
-for /D %%d in (PyYAML-*) do copy %%d\lib\yaml\* %GPDB_INSTALL_PATH%\lib\python\yaml
perl -p -e "s,__VERSION_PLACEHOLDER__,%VERSION%," greenplum-clients.wxs > greenplum-clients-%VERSION%.wxs
candle.exe -nologo greenplum-clients-%VERSION%.wxs -out greenplum-clients-%VERSION%.wixobj -dSRCDIR=%GPDB_INSTALL_PATH% -dVERSION=%VERSION%
light.exe -nologo -sval greenplum-clients-%VERSION%.wixobj -out greenplum-clients-x86_64.msi
\ No newline at end of file
diff --git a/gpAux/client/install/src/windows/greenplum-clients.wxs b/gpAux/client/install/src/windows/greenplum-clients.wxs
index 0f17c299112..bdb679f555e 100755
--- a/gpAux/client/install/src/windows/greenplum-clients.wxs
+++ b/gpAux/client/install/src/windows/greenplum-clients.wxs
@@ -1139,35 +1139,6 @@ If you want to review or change any of your installation settings, click Back. C
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
@@ -1179,8 +1150,6 @@ If you want to review or change any of your installation settings, click Back. C
-
-
diff --git a/gpMgmt/Makefile b/gpMgmt/Makefile
index 372dae8ed46..499f21170db 100644
--- a/gpMgmt/Makefile
+++ b/gpMgmt/Makefile
@@ -18,17 +18,6 @@ install: generate_greenplum_path_file
if [ -e bin/ext/__init__.py ]; then \
cp -rp bin/ext/__init__.py $(DESTDIR)$(prefix)/lib/python ; \
fi
- if [ -e bin/ext/psutil ]; then \
- cp -rp bin/ext/psutil $(DESTDIR)$(prefix)/lib/python ; \
- fi
- if [ -e bin/ext/pgdb.py ]; then \
- cp -rp bin/ext/pgdb.py $(DESTDIR)$(prefix)/lib/python && \
- cp -rp bin/ext/pg.py $(DESTDIR)$(prefix)/lib/python && \
- cp -rp bin/ext/_pg*.so $(DESTDIR)$(prefix)/lib/python ; \
- fi
- if [ -e bin/ext/yaml ]; then \
- cp -rp bin/ext/yaml $(DESTDIR)$(prefix)/lib/python ; \
- fi
clean distclean:
$(MAKE) -C bin $@
diff --git a/gpMgmt/bin/Makefile b/gpMgmt/bin/Makefile
index 3d9ab50ad79..91d17426b89 100644
--- a/gpMgmt/bin/Makefile
+++ b/gpMgmt/bin/Makefile
@@ -70,54 +70,12 @@ PYLIB_DIR=$(SRC)/ext
core:
python3 gpconfig_modules/parse_guc_metadata.py $(DESTDIR)$(prefix)
-ifeq ($(with_pythonsrc_ext), yes)
-install: installdirs installprograms core psutil pygresql pyyaml
-else
install: installdirs installprograms core
-endif
#
# Python Libraries
#
-#
-# PyGreSQL
-#
-PYGRESQL_VERSION=5.2
-PYGRESQL_DIR=PyGreSQL-$(PYGRESQL_VERSION)
-pygresql:
- @echo "--- PyGreSQL"
- cd $(PYLIB_SRC_EXT)/ && $(TAR) xzf $(PYGRESQL_DIR).tar.gz
- cd $(PYLIB_SRC_EXT)/$(PYGRESQL_DIR)/ && PATH=$(DESTDIR)$(bindir):$$PATH LDFLAGS='$(LDFLAGS) $(PYGRESQL_LDFLAGS)' python3 setup.py build
- cp -r $(PYLIB_SRC_EXT)/$(PYGRESQL_DIR)/build/lib*-3*/* $(PYLIB_DIR)/
-
-
-#
-# PSUTIL
-#
-PSUTIL_VERSION=5.7.0
-PSUTIL_DIR=psutil-$(PSUTIL_VERSION)
-
-psutil:
- @echo "--- psutil"
-ifeq "$(findstring $(BLD_ARCH),aix7_ppc_64 )" ""
- cd $(PYLIB_SRC_EXT)/ && $(TAR) xzf $(PSUTIL_DIR).tar.gz
- cd $(PYLIB_SRC_EXT)/$(PSUTIL_DIR)/ && env -u CC python3 setup.py build
- cp -r $(PYLIB_SRC_EXT)/$(PSUTIL_DIR)/build/lib.*/psutil $(PYLIB_DIR)
-endif
-
-#
-# PYYAML
-#
-PYYAML_VERSION=5.3.1
-PYYAML_DIR=PyYAML-$(PYYAML_VERSION)
-
-pyyaml:
- @echo "--- pyyaml"
- cd $(PYLIB_SRC_EXT)/ && $(TAR) xzf $(PYYAML_DIR).tar.gz
- cd $(PYLIB_SRC_EXT)/$(PYYAML_DIR)/ && env -u CC python3 setup.py build
- cp -r $(PYLIB_SRC_EXT)/$(PYYAML_DIR)/build/lib*-3*/* $(PYLIB_DIR)/
-
#
# PYLINT
#
@@ -125,8 +83,6 @@ pyyaml:
PYLINT_VERSION=0.21.0
PYLINT_DIR=pylint-$(PYLINT_VERSION)
PYLINT_PYTHONPATH=$(PYLIB_DIR):$(PYLIB_SRC_EXT)/$(PYLINT_DIR)/build/lib/
-MOCK_VERSION=1.0.1
-MOCK_DIR=mock-$(MOCK_VERSION)
SETUP_TOOLS_VERSION=36.6.0
PARSE_VERSION=1.8.2
SETUP_TOOLS_DIR=setuptools-$(SETUP_TOOLS_VERSION)
@@ -135,8 +91,6 @@ PYTHONSRC_INSTALL=$(PYLIB_SRC_EXT)/install
PYTHON_VERSION=$(shell python3 -c "import sys; print ('%s.%s' % (sys.version_info[0:2]))")
PYTHONSRC_INSTALL_SITE=$(PYLIB_SRC_EXT)/install/lib/python$(PYTHON_VERSION)/site-packages
PYTHONSRC_INSTALL_PYTHON_PATH=$(PYTHONPATH):$(PYTHONSRC_INSTALL_SITE)
-# TODO: mock-1.0.1-py2.6.egg package should be updated.
-MOCK_BIN=$(PYTHONSRC_INSTALL)/lib/python$(PYTHON_VERSION)/site-packages/mock-1.0.1-py2.6.egg
UBUNTU_PLATFORM=$(shell if lsb_release -a 2>/dev/null | grep -q 'Ubuntu' ; then echo "Ubuntu"; fi)
pylint:
@@ -145,17 +99,6 @@ pylint:
@cd $(PYLIB_SRC_EXT)/$(PYLINT_DIR)/ && python3 setup.py build 1> /dev/null
@touch $(PYLIB_SRC_EXT)/$(PYLINT_DIR)/build/lib/__init__.py
-$(MOCK_BIN):
- @echo "--- mock for platform $(UBUNTU_PLATFORM)"
- @if [ "$(UBUNTU_PLATFORM)" = "Ubuntu" ]; then\
- pip3 install mock;\
- else\
- mkdir -p $(PYTHONSRC_INSTALL_SITE) && \
- cd $(PYLIB_SRC_EXT)/ && $(TAR) xzf $(MOCK_DIR).tar.gz && \
- cd $(PYLIB_SRC_EXT)/$(MOCK_DIR)/ && \
- PYTHONPATH=$(PYTHONSRC_INSTALL_PYTHON_PATH) python3 setup.py install --prefix $(PYTHONSRC_INSTALL) ; \
- fi;
-
PYTHON_FILES=`grep -l --exclude=Makefile --exclude=gplogfilter "/bin/env python3" *`\
`grep -l "/bin/env python3" $(SRC)/../sbin/*`\
`find ./gppylib -name "*.py"`\
@@ -167,7 +110,7 @@ checkcode: pylint
@echo -n "pylint_score=" > $(SRC)/../pylint_score.properties
@grep "Your code has been rated at" $(SRC)/../pylint.txt | sed -e "s|Your .* \(.*\)/.*|\1|" >> $(SRC)/../pylint_score.properties
-check: $(MOCK_BIN)
+check:
@echo "Running pure unit and also "unit" tests that require cluster to be up..."
@TMPDIR=/tmp PYTHONPATH=$(SERVER_SRC):$(SERVER_SBIN):$(PYTHONPATH):$(PYTHONSRC_INSTALL_PYTHON_PATH):$(SRC)/ext:$(SBIN_DIR):$(LIB_DIR):$(PYLIB_DIR)/mock-1.0.1 \
gppylib/gpunit discover --verbose -s $(SRC)/gppylib -p "test_unit*.py" 2> $(SRC)/../gpMgmt_testunit_results.log 1> $(SRC)/../gpMgmt_testunit_output.log
@@ -190,8 +133,6 @@ installcheck: installcheck-bash
clean distclean:
rm -rf $(PYLIB_SRC_EXT)/$(PYLINT_DIR)
- rm -rf $(PYLIB_SRC_EXT)/$(PYGRESQL_DIR)/build
- rm -rf $(PYLIB_SRC)/$(PYGRESQL_DIR)/build
rm -rf *.pyc
rm -f analyzedbc gpactivatestandbyc gpaddmirrorsc gpcheckcatc \
gpcheckperfc gpcheckresgroupimplc gpchecksubnetcfgc gpconfigc \
diff --git a/gpMgmt/bin/analyzedb b/gpMgmt/bin/analyzedb
index cc51e265927..1cae7b6c55d 100755
--- a/gpMgmt/bin/analyzedb
+++ b/gpMgmt/bin/analyzedb
@@ -25,16 +25,15 @@ from contextlib import closing
import pipes # for shell-quoting, pipes.quote()
import fcntl
import itertools
-
+import psycopg2
try:
- import pg
-
from gppylib import gplog, pgconf, userinput
from gppylib.commands.base import Command, WorkerPool, Worker
from gppylib.operations import Operation
from gppylib.gpversion import GpVersion
from gppylib.db import dbconn
from gppylib.operations.unix import CheckDir, CheckFile, MakeDir
+ from gppylib.utils import escape_string
except ImportError as e:
sys.exit('Cannot import modules. Please check that you have sourced greenplum_path.sh. Detail: ' + str(e))
@@ -172,7 +171,7 @@ def validate_schema_exists(pg_port, dbname, schema):
try:
dburl = dbconn.DbURL(port=pg_port, dbname=dbname)
conn = dbconn.connect(dburl)
- count = dbconn.querySingleton(conn, "select count(*) from pg_namespace where nspname='%s';" % pg.escape_string(schema))
+ count = dbconn.querySingleton(conn, "select count(*) from pg_namespace where nspname='%s';" % escape_string(schema))
if count == 0:
raise ExceptionNoStackTraceNeeded("Schema %s does not exist in database %s." % (schema, dbname))
finally:
@@ -219,7 +218,7 @@ def get_partition_state_tuples(pg_port, dbname, catalog_schema, partition_info):
try:
modcount_sql = "select to_char(coalesce(sum(modcount::bigint), 0), '999999999999999999999') from gp_dist_random('%s.%s')" % (catalog_schema, tupletable)
modcount = dbconn.querySingleton(conn, modcount_sql)
- except pg.DatabaseError as e:
+ except psycopg2.DatabaseError as e:
if "does not exist" in str(e):
logger.info("Table %s.%s (%s) no longer exists and will not be analyzed", schemaname, partition_name, tupletable)
else:
@@ -982,7 +981,7 @@ def get_oid_str(table_list):
def regclass_schema_tbl(schema, tbl):
schema_tbl = "%s.%s" % (escape_identifier(schema), escape_identifier(tbl))
- return "to_regclass('%s')" % (pg.escape_string(schema_tbl))
+ return "to_regclass('%s')" % (escape_string(schema_tbl))
# Escape double-quotes in a string, so that the resulting string is suitable for
@@ -1250,7 +1249,7 @@ def validate_tables(conn, tablenames):
while curr_batch < nbatches:
batch = tablenames[curr_batch * batch_size:(curr_batch + 1) * batch_size]
- oid_str = ','.join(map((lambda x: "('%s')" % pg.escape_string(x)), batch))
+ oid_str = ','.join(map((lambda x: "('%s')" % escape_string(x)), batch))
if not oid_str:
break
@@ -1266,7 +1265,7 @@ def get_include_cols_from_exclude(conn, schema, table, exclude_cols):
"""
Given a list of excluded columns of a table, get the list of included columns
"""
- quoted_exclude_cols = ','.join(["'%s'" % pg.escape_string(x) for x in exclude_cols])
+ quoted_exclude_cols = ','.join(["'%s'" % escape_string(x) for x in exclude_cols])
oid_str = regclass_schema_tbl(schema, table)
cols = run_sql(conn, GET_INCLUDED_COLUMNS_FROM_EXCLUDE_SQL % (oid_str, quoted_exclude_cols))
@@ -1282,7 +1281,7 @@ def validate_columns(conn, schema, table, column_list):
return
sql = VALIDATE_COLUMN_NAMES_SQL % (regclass_schema_tbl(schema, table),
- ','.join(["'%s'" % pg.escape_string(x) for x in column_list]))
+ ','.join(["'%s'" % escape_string(x) for x in column_list]))
valid_col_count = dbconn.querySingleton(conn, sql)
if int(valid_col_count) != len(column_list):
diff --git a/gpMgmt/bin/gpactivatestandby b/gpMgmt/bin/gpactivatestandby
index 4c2069f0970..914d20120d0 100755
--- a/gpMgmt/bin/gpactivatestandby
+++ b/gpMgmt/bin/gpactivatestandby
@@ -21,10 +21,9 @@ import time
import shutil
import tempfile
from datetime import datetime, timedelta
-
+import psycopg2
# import GPDB modules
try:
- import pg as pygresql
from gppylib.commands import unix, gp, pg
from gppylib.db import dbconn
from gppylib.gpparseopts import OptParser, OptChecker, OptionGroup, SUPPRESS_HELP
@@ -341,7 +340,7 @@ def promote_standby(coordinator_data_dir):
dbconn.execSQL(conn, 'CHECKPOINT')
conn.close()
return True
- except pygresql.InternalError as e:
+ except (psycopg2.InternalError, psycopg2.OperationalError) as e:
pass
time.sleep(1)
diff --git a/gpMgmt/bin/gpcheckcat b/gpMgmt/bin/gpcheckcat
index 00c2e4b21f3..26c0e7e30d5 100755
--- a/gpMgmt/bin/gpcheckcat
+++ b/gpMgmt/bin/gpcheckcat
@@ -28,23 +28,20 @@ import re
import sys
import time
from functools import reduce
-
+import psycopg2
+from psycopg2 import extras
+from contextlib import closing
try:
from gppylib import gplog
- from gppylib.db import dbconn
from gppylib.gpcatalog import *
from gppylib.commands.unix import *
from gppylib.commands.gp import conflict_with_gpexpand
from gppylib.system.info import *
- from pgdb import DatabaseError
from gpcheckcat_modules.unique_index_violation_check import UniqueIndexViolationCheck
from gpcheckcat_modules.leaked_schema_dropper import LeakedSchemaDropper
from gpcheckcat_modules.repair import Repair
from gpcheckcat_modules.foreign_key_check import ForeignKeyCheck
from gpcheckcat_modules.orphaned_toast_tables_check import OrphanedToastTablesCheck
-
- import pg
-
except ImportError as e:
sys.exit('Error: unable to import module: ' + str(e))
@@ -138,7 +135,7 @@ class Global():
self.dbname = None
self.firstdb = None
self.alldb = []
- self.db = {}
+ self.conn = {}
self.tmpdir = None
self.reset_stmt_queues()
@@ -207,30 +204,27 @@ def usage(exitarg=None):
###############################
def getversion():
- db = connect()
- curs = db.query('''
- select regexp_replace(version(),
- E'.*PostgreSQL [^ ]+ .Apache Cloudberry ([1-9]+.[0-9]+|main).*',
- E'\\\\1') as ver;''')
-
- row = curs.getresult()[0]
- version = row[0]
-
- logger.debug('got version %s' % version)
- return version
-
+ with closing(connect()) as conn:
+ with conn.cursor() as curs:
+ curs.execute('''
+ select regexp_replace(version(),
+ E'.*PostgreSQL [^ ]+ .Greenplum Database ([1-9]+.[0-9]+|main).*',
+ E'\\\\1') as ver;''')
+ row = curs.fetchone()
+ version = row[0]
+ logger.debug('got version %s' % version)
+ return version
###############################
def getalldbs():
"""
get all connectable databases
"""
- db = connect()
- curs = db.query('''
- select datname from pg_database where datallowconn order by datname ''')
- row = curs.getresult()
- return row
-
+ with closing(connect()) as conn:
+ with conn.cursor() as curs:
+ curs.execute('''select datname from pg_database where datallowconn order by datname''')
+ row = curs.fetchall()
+ return row
###############################
def parseCommandLine():
@@ -343,19 +337,21 @@ def connect(user=None, password=None, host=None, port=None,
try:
logger.debug('connecting to %s:%s %s' % (host, port, database))
- db = pg.connect(host=host, port=port, user=user,
- passwd=password, dbname=database, opt=options)
-
- except pg.InternalError as ex:
+ conn = psycopg2.connect(host=host, port=port, user=user,
+ password=password, dbname=database, options=options)
+ ## Don't execute query in a transaction block.
+ conn.set_session(autocommit=True)
+ except (psycopg2.InternalError, psycopg2.OperationalError) as ex:
logger.fatal('could not connect to %s: "%s"' %
(database, str(ex).strip()))
exit(1)
logger.debug('connected with %s:%s %s' % (host, port, database))
- return db
+ return conn
-#############
+# NOTE: We cannot use connect2() with contextmanager, since we manage the connection
+# ourselves.
def connect2(cfgrec, user=None, password=None, database=None, utilityMode=True):
host = cfgrec['address']
port = cfgrec['port']
@@ -367,22 +363,22 @@ def connect2(cfgrec, user=None, password=None, database=None, utilityMode=True):
key = "%s.%s.%s.%s.%s.%s.%s" % (host, port, datadir, user, password, database,
str(utilityMode))
- conns = GV.db.get(key)
+ conns = GV.conn.get(key)
if conns:
return conns[0]
conn = connect(host=host, port=port, user=user, password=password,
database=database, utilityMode=utilityMode)
if conn:
- GV.db[key] = [conn, cfgrec]
+ GV.conn[key] = [conn, cfgrec]
return conn
class execThread(Thread):
- def __init__(self, cfg, db, qry):
+ def __init__(self, cfg, conn, qry):
self.cfg = cfg
- self.db = db
+ self.conn = conn
self.qry = qry
self.curs = None
self.error = None
@@ -390,11 +386,11 @@ class execThread(Thread):
def run(self):
try:
- self.curs = self.db.query(self.qry)
+ self.curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
+ self.curs.execute(self.qry)
except BaseException as e:
self.error = e
-
def processThread(threads):
batch = []
for th in threads:
@@ -423,9 +419,9 @@ def connect2run(qry, col=None):
# parallelise queries
for dbid in GV.cfg:
c = GV.cfg[dbid]
- db = connect2(c)
+ conn = connect2(c)
- thread = execThread(c, db, qry)
+ thread = execThread(c, conn, qry)
thread.start()
logger.debug('launching query thread %s for dbid %i' %
(thread.name, dbid))
@@ -445,8 +441,8 @@ def connect2run(qry, col=None):
err = []
for [cfg, curs] in batch:
if col is None:
- col = curs.listfields()
- for row in curs.dictresult():
+ col = [desc[0] for desc in curs.description]
+ for row in curs.fetchall():
err.append([cfg, col, row])
return err
@@ -464,7 +460,6 @@ def formatErr(c, col, row):
#############
def getGPConfiguration():
cfg = {}
- db = connect()
# note that in 4.0, sql commands cannot be run against the segment mirrors directly
# so we filter out non-primary segment databases in the query
qry = '''
@@ -474,13 +469,14 @@ def getGPConfiguration():
FROM gp_segment_configuration
WHERE (role = 'p' or content < 0 )
'''
- curs = db.query(qry)
- for row in curs.dictresult():
- if row['content'] == -1 and not row['isprimary']:
- continue # skip standby coordinator
- cfg[row['dbid']] = row
- db.close()
- return cfg
+ with closing(connect()) as conn:
+ with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as curs:
+ curs.execute(qry)
+ for row in curs.fetchall():
+ if row['content'] == -1 and not row['isprimary']:
+ continue # skip standby coordinator
+ cfg[row['dbid']] = row
+ return cfg
def checkDistribPolicy():
logger.info('-----------------------------------')
@@ -494,27 +490,27 @@ def checkDistribPolicy():
where pk.contype in('p', 'u') and d.policytype = 'p' and d.distkey = ''
'''
- db = connect2(GV.cfg[GV.coordinator_dbid])
try:
- curs = db.query(qry)
- err = []
- for row in curs.dictresult():
- err.append([GV.cfg[GV.coordinator_dbid], ('nspname', 'relname', 'constraint'), row])
-
- if not err:
- logger.info('[OK] randomly distributed tables')
- else:
- GV.checkStatus = False
- setError(ERROR_REMOVE)
- logger.info('[FAIL] randomly distributed tables')
- logger.error('pg_constraint has %d issue(s)' % len(err))
- logger.error(qry)
- for e in err:
- logger.error(formatErr(e[0], e[1], e[2]))
- for e in err:
- cons = e[2]
- removeIndexConstraint(cons['nspname'], cons['relname'],
- cons['constraint'])
+ conn = connect2(GV.cfg[GV.coordinator_dbid])
+ with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as curs:
+ curs.execute(qry)
+ err = []
+ for row in curs.fetchall():
+ err.append([GV.cfg[GV.coordinator_dbid], ('nspname', 'relname', 'constraint'), row])
+
+ if not err:
+ logger.info('[OK] randomly distributed tables')
+ else:
+ GV.checkStatus = False
+ setError(ERROR_REMOVE)
+ logger.info('[FAIL] randomly distributed tables')
+ logger.error('pg_constraint has %d issue(s)' % len(err))
+ logger.error(qry)
+ for e in err:
+ logger.error(formatErr(e[0], e[1], e[2]))
+ cons = e[2]
+ removeIndexConstraint(cons['nspname'], cons['relname'],
+ cons['constraint'])
except Exception as e:
setError(ERROR_NOREPAIR)
myprint('[ERROR] executing test: checkDistribPolicy')
@@ -536,22 +532,23 @@ def checkDistribPolicy():
and not d.distkey::int2[] operator(pg_catalog.<@) pk.conkey
'''
try:
- curs = db.query(qry)
-
- err = []
- for row in curs.dictresult():
- err.append([GV.cfg[GV.coordinator_dbid], ('nspname', 'relname', 'constraint'), row])
-
- if not err:
- logger.info('[OK] unique constraints')
- else:
- GV.checkStatus = False
- setError(ERROR_REMOVE)
- logger.info('[FAIL] unique constraints')
- logger.error('pg_constraint has %d issue(s)' % len(err))
- logger.error(qry)
- for e in err: logger.error(formatErr(e[0], e[1], e[2]))
+ conn = connect2(GV.cfg[GV.coordinator_dbid])
+ with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as curs:
+ err = []
+ curs.execute(qry)
+ for row in curs.fetchall():
+ err.append([GV.cfg[GV.coordinator_dbid], ('nspname', 'relname', 'constraint'), row])
+
+ if not err:
+ logger.info('[OK] unique constraints')
+ else:
+ GV.checkStatus = False
+ setError(ERROR_REMOVE)
+ logger.info('[FAIL] unique constraints')
+ logger.error('pg_constraint has %d issue(s)' % len(err))
+ logger.error(qry)
for e in err:
+ logger.error(formatErr(e[0], e[1], e[2]))
cons = e[2]
removeIndexConstraint(cons['nspname'], cons['relname'],
cons['constraint'])
@@ -567,7 +564,6 @@ def checkPartitionIntegrity():
logger.info('-----------------------------------')
logger.info('Checking pg_partition ...')
err = []
- db = connect()
# Check for the numsegments value of parent and child partition from the gp_distribution_policy table
qry = '''
@@ -579,53 +575,55 @@ def checkPartitionIntegrity():
and not (inhrelid in (select ftrelid from pg_catalog.pg_foreign_table) and child.numsegments = NULL);
'''
try:
- curs = db.query(qry)
- cols = ('inhparent', 'inhrelid', 'numsegments_parent', 'numsegments_child')
- col_names = {
- 'inhparent': 'table',
- 'inhrelid': 'affected child',
- 'numsegments_parent': 'parent numsegments value',
- 'numsegments_child': 'child numsegments value',
- }
-
- err = []
- for row in curs.dictresult():
- err.append([GV.cfg[GV.coordinator_dbid], cols, row])
-
- if not err:
- logger.info('[OK] partition numsegments check')
- else:
- err_count = len(err)
- GV.checkStatus = False
- setError(ERROR_REMOVE)
- logger.info('[FAIL] partition numsegments check')
- logger.error('partition numsegments check found %d issue(s)' % err_count)
- if err_count > 100:
- logger.error(qry)
+ with closing(connect()) as conn:
+ with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as curs:
+ curs.execute(qry)
+ cols = ('inhparent', 'inhrelid', 'numsegments_parent', 'numsegments_child')
+ col_names = {
+ 'inhparent': 'table',
+ 'inhrelid': 'affected child',
+ 'numsegments_parent': 'parent numsegments value',
+ 'numsegments_child': 'child numsegments value',
+ }
+
+ err = []
+ for row in curs.fetchall():
+ err.append([GV.cfg[GV.coordinator_dbid], cols, row])
+
+ if not err:
+ logger.info('[OK] partition numsegments check')
+ else:
+ err_count = len(err)
+ GV.checkStatus = False
+ setError(ERROR_REMOVE)
+ logger.info('[FAIL] partition numsegments check')
+ logger.error('partition numsegments check found %d issue(s)' % err_count)
+ if err_count > 100:
+ logger.error(qry)
- myprint(
- '[ERROR]: child partition(s) have different numsegments value '
- 'from the root partition. Check the gpcheckcat log for details.'
- )
- logger.error('The following tables have different numsegments value (showing at most 100 rows):')
+ myprint(
+ '[ERROR]: child partition(s) have different numsegments value '
+ 'from the root partition. Check the gpcheckcat log for details.'
+ )
+ logger.error('The following tables have different numsegments value (showing at most 100 rows):')
- # report at most 100 rows, for brevity
- err = err[:100]
+ # report at most 100 rows, for brevity
+ err = err[:100]
- for index, e in enumerate(err):
- cfg = e[0]
- col = e[1]
- row = e[2]
+ for index, e in enumerate(err):
+ cfg = e[0]
+ col = e[1]
+ row = e[2]
- if index == 0:
- logger.error("--------")
- logger.error(" " + " | ".join(map(col_names.get, col)))
- logger.error(" " + "-+-".join(['-' * len(col_names[x]) for x in col]))
+ if index == 0:
+ logger.error("--------")
+ logger.error(" " + " | ".join(map(col_names.get, col)))
+ logger.error(" " + "-+-".join(['-' * len(col_names[x]) for x in col]))
- logger.error(" " + " | ".join([str(row[x]) for x in col]))
+ logger.error(" " + " | ".join([str(row[x]) for x in col]))
- if err_count > 100:
- logger.error(" ...")
+ if err_count > 100:
+ logger.error(" ...")
except Exception as e:
setError(ERROR_NOREPAIR)
@@ -648,74 +646,74 @@ def checkPartitionIntegrity():
and (select isleaf from pg_partition_tree(inhparent) where relid = inhrelid));
'''
try:
- curs = db.query(qry)
- cols = ('inhparent', 'inhrelid', 'dby_parent', 'dby_child')
- col_names = {
- 'inhparent': 'table',
- 'inhrelid': 'affected child',
- 'dby_parent': 'table distribution key',
- 'dby_child': 'child distribution key',
- }
-
- err = []
- for row in curs.dictresult():
- err.append([GV.cfg[GV.coordinator_dbid], cols, row])
-
- if not err:
- logger.info('[OK] partition distribution policy check')
- else:
- GV.checkStatus = False
- setError(ERROR_REMOVE)
- logger.info('[FAIL] partition distribution policy check')
- logger.error('partition distribution policy check found %d issue(s)' % len(err))
- if len(err) > 100:
- logger.error(qry)
-
- myprint(
- '[ERROR]: child partition(s) are distributed differently from '
- 'the root partition, and must be manually redistributed, for '
- 'some tables. Check the gpcheckcat log for details.'
- )
- logger.error('The following tables must be manually redistributed:')
-
- count = 0
- for e in err:
- cfg = e[0]
- col = e[1]
- row = e[2]
-
- # TODO: generate a repair script for this row. This is
- # difficult, since we can't redistribute child partitions
- # directly.
-
- # report at most 100 rows, for brevity
- if count == 100:
- logger.error("...")
- count += 1
- if count > 100:
- continue
-
- if count == 0:
- logger.error("--------")
- logger.error(" " + " | ".join(map(col_names.get, col)))
- logger.error(" " + "-+-".join(['-' * len(col_names[x]) for x in col]))
-
- logger.error(" " + " | ".join([str(row[x]) for x in col]))
- count += 1
-
- logger.error(
- 'Execute an ALTER TABLE ... SET DISTRIBUTED BY statement, with '
- 'the desired distribution key, on the partition root for each '
- 'affected table.'
- )
+ with closing(connect()) as conn:
+ with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as curs:
+ curs.execute(qry)
+ cols = ('inhparent', 'inhrelid', 'dby_parent', 'dby_child')
+ col_names = {
+ 'inhparent': 'table',
+ 'inhrelid': 'affected child',
+ 'dby_parent': 'table distribution key',
+ 'dby_child': 'child distribution key',
+ }
+
+ err = []
+ for row in curs.fetchall():
+ err.append([GV.cfg[GV.coordinator_dbid], cols, row])
+
+ if not err:
+ logger.info('[OK] partition distribution policy check')
+ else:
+ GV.checkStatus = False
+ setError(ERROR_REMOVE)
+ logger.info('[FAIL] partition distribution policy check')
+ logger.error('partition distribution policy check found %d issue(s)' % len(err))
+ if len(err) > 100:
+ logger.error(qry)
+
+ myprint(
+ '[ERROR]: child partition(s) are distributed differently from '
+ 'the root partition, and must be manually redistributed, for '
+ 'some tables. Check the gpcheckcat log for details.'
+ )
+ logger.error('The following tables must be manually redistributed:')
+
+ count = 0
+ for e in err:
+ cfg = e[0]
+ col = e[1]
+ row = e[2]
+
+ # TODO: generate a repair script for this row. This is
+ # difficult, since we can't redistribute child partitions
+ # directly.
+
+ # report at most 100 rows, for brevity
+ if count == 100:
+ logger.error("...")
+ count += 1
+ if count > 100:
+ continue
+
+ if count == 0:
+ logger.error("--------")
+ logger.error(" " + " | ".join(map(col_names.get, col)))
+ logger.error(" " + "-+-".join(['-' * len(col_names[x]) for x in col]))
+
+ logger.error(" " + " | ".join([str(row[x]) for x in col]))
+ count += 1
+
+ logger.error(
+ 'Execute an ALTER TABLE ... SET DISTRIBUTED BY statement, with '
+ 'the desired distribution key, on the partition root for each '
+ 'affected table.'
+ )
except Exception as e:
setError(ERROR_NOREPAIR)
myprint('[ERROR] executing test: checkPartitionIntegrity')
myprint(' Execution error: ' + str(e))
- db.close()
-
checkPoliciesRepair()
#############
@@ -782,7 +780,7 @@ Produce repair scripts to remove dangling entries of gp_fastsequence:
'''
-def removeFastSequence(db):
+def removeFastSequence(conn):
'''
MPP-14758: gp_fastsequence does not get cleanup after a failed transaction (AO/CO)
Note: this is slightly different from the normal foreign key check
@@ -808,14 +806,15 @@ def removeFastSequence(db):
ON r.gp_segment_id = cfg.content
WHERE cfg.role = 'p';
"""
- curs = db.query(qry)
- for row in curs.dictresult():
- seg = row['dbid'] # dbid of targeted segment
- name = 'gp_fastsequence tuple' # for comment purposes
- table = 'gp_fastsequence' # table name
- cols = {'objid': row['objid']} # column name and value
- objname = 'gp_fastsequence' # for comment purposes
- buildRemove(seg, name, table, cols, objname)
+ with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as curs:
+ curs.execute(qry)
+ for row in curs.fetchall():
+ seg = row['dbid'] # dbid of targeted segment
+ name = 'gp_fastsequence tuple' # for comment purposes
+ table = 'gp_fastsequence' # table name
+ cols = {'objid': row['objid']} # column name and value
+ objname = 'gp_fastsequence' # for comment purposes
+ buildRemove(seg, name, table, cols, objname)
except Exception as e:
logger.error('removeFastSequence: ' + str(e))
@@ -877,42 +876,41 @@ def drop_leaked_schemas(leaked_schema_dropper, dbname):
logger.info('-----------------------------------')
logger.info('Checking for leaked temporary schemas')
- db_connection = connect(database=dbname)
try:
- dropped_schemas = leaked_schema_dropper.drop_leaked_schemas(db_connection)
- if not dropped_schemas:
- logger.info('[OK] temporary schemas')
- else:
- logger.info('[FAIL] temporary schemas')
- myprint("Found and dropped %d unbound temporary schemas" % len(dropped_schemas))
- logger.error('Dropped leaked schemas \'%s\' in the database \'%s\'' % (dropped_schemas, dbname))
+ with closing(connect(database=dbname)) as conn:
+ dropped_schemas = leaked_schema_dropper.drop_leaked_schemas(conn)
+ if not dropped_schemas:
+ logger.info('[OK] temporary schemas')
+ else:
+ logger.info('[FAIL] temporary schemas')
+ myprint("Found and dropped %d unbound temporary schemas" % len(dropped_schemas))
+ logger.error('Dropped leaked schemas \'%s\' in the database \'%s\'' % (dropped_schemas, dbname))
except Exception as e:
setError(ERROR_NOREPAIR)
myprint(' Execution error: ' + str(e))
- finally:
- db_connection.close()
def checkDepend():
# Check for dependencies on non-existent objects
logger.info('-----------------------------------')
logger.info('Checking Object Dependencies')
- db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
+ conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
+ with conn.cursor() as curs:
- # Catalogs that link up to pg_depend/pg_shdepend
- qry = """
- select relname from pg_class c
- where relkind='r'
- and relnamespace=%d
- and exists (select 1 from pg_attribute a where attname = 'oid' and a.attrelid = c.oid)
- """ % PG_CATALOG_OID
- curs = db.query(qry)
- catalogs = []
- for row in curs.getresult():
- catalogs.append(row[0])
-
- checkDependJoinCatalog(catalogs)
- checkCatalogJoinDepend(catalogs)
+ # Catalogs that link up to pg_depend/pg_shdepend
+ qry = """
+ select relname from pg_class c
+ where relkind='r'
+ and relnamespace=%d
+ and exists (select 1 from pg_attribute a where attname = 'oid' and a.attrelid = c.oid)
+ """ % PG_CATALOG_OID
+ curs.execute(qry)
+ catalogs = []
+ for row in curs.fetchall():
+ catalogs.append(row[0])
+
+ checkDependJoinCatalog(catalogs)
+ checkCatalogJoinDepend(catalogs)
def checkDependJoinCatalog(catalogs):
# Construct subquery that will verify that all (classid, objid)
@@ -1069,7 +1067,6 @@ def checkOwners():
#
# - Between 3.3 and 4.0 the ao segment columns migrated from pg_class
# to pg_appendonly.
- db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
qry = '''
select distinct n.nspname, coalesce(o.relname, c.relname) as relname,
a.rolname, m.rolname as coordinator_rolname
@@ -1086,20 +1083,21 @@ def checkOwners():
where c.relowner <> r.relowner
'''
try:
- curs = db.query(qry)
-
- rows = []
- for row in curs.dictresult():
- rows.append(row)
-
- if len(rows) == 0:
- logger.info('[OK] table ownership')
- else:
- GV.checkStatus = False
- setError(ERROR_REMOVE)
- logger.info('[FAIL] table ownership')
- logger.error('found %d table ownership issue(s)' % len(rows))
- logger.error('%s' % qry)
+ conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
+ with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as curs:
+ curs.execute(qry)
+ rows = []
+ for row in curs.fetchall():
+ rows.append(row)
+
+ if len(rows) == 0:
+ logger.info('[OK] table ownership')
+ else:
+ GV.checkStatus = False
+ setError(ERROR_REMOVE)
+ logger.info('[FAIL] table ownership')
+ logger.error('found %d table ownership issue(s)' % len(rows))
+ logger.error('%s' % qry)
for row in rows[0:100]:
logger.error(' %s.%s relowner %s != %s'
% (row['nspname'], row['relname'], row['rolname'],
@@ -1123,7 +1121,6 @@ def checkOwners():
# - Ignore implementation types of pg_class entries - they should be
# in the check above since ALTER TABLE is required to fix them, not
# ALTER TYPE.
- db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
qry = '''
select distinct n.nspname, t.typname, a.rolname, m.rolname as coordinator_rolname
from gp_dist_random('pg_type') r
@@ -1134,27 +1131,28 @@ def checkOwners():
where r.typowner <> t.typowner
'''
try:
- curs = db.query(qry)
+ conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
+ with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as curs:
+ curs.execute(qry)
- rows = []
- for row in curs.dictresult():
- rows.append(row)
+ rows = []
+ for row in curs.fetchall():
+ rows.append(row)
- if len(rows) == 0:
- logger.info('[OK] type ownership')
- else:
- GV.checkStatus = False
- setError(ERROR_NOREPAIR)
- logger.info('[FAIL] type ownership')
- logger.error('found %d type ownership issue(s)' % len(rows))
- logger.error('%s' % qry)
+ if len(rows) == 0:
+ logger.info('[OK] type ownership')
+ else:
+ GV.checkStatus = False
+ setError(ERROR_NOREPAIR)
+ logger.info('[FAIL] type ownership')
+ logger.error('found %d type ownership issue(s)' % len(rows))
+ logger.error('%s' % qry)
for row in rows[0:100]:
logger.error(' %s.%s typeowner %s != %s'
% (row['nspname'], row['typname'], row['rolname'],
row['coordinator_rolname']))
if len(rows) > 100:
logger.error("...")
-
except Exception as e:
setError(ERROR_NOREPAIR)
myprint("[ERROR] executing test: check type ownership")
@@ -1178,15 +1176,14 @@ def checkOwners():
def closeDbs():
- for key, conns in GV.db.items():
- db = conns[0]
- db.close()
- GV.db = {} # remove everything
+ for key, conns in GV.conn.items():
+ conn = conns[0]
+ conn.close()
+ GV.conn = {} # remove everything
# -------------------------------------------------------------------------------
def getCatObj(namestr):
- db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
try:
cat = GV.catalog.getCatalogTable(namestr)
except Exception as e:
@@ -1256,25 +1253,25 @@ def checkTableACL(cat):
# Execute the query
try:
- db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
- curs = db.query(qry)
- nrows = curs.ntuples()
-
- if nrows == 0:
- logger.info('[OK] Cross consistency acl check for ' + catname)
- else:
- GV.checkStatus = False
- setError(ERROR_NOREPAIR)
- GV.aclStatus = False
- logger.info('[FAIL] Cross consistency acl check for ' + catname)
- logger.error(' %s acl check has %d issue(s)' % (catname, nrows))
-
- fields = curs.listfields()
- gplog.log_literal(logger, logging.ERROR, " " + " | ".join(fields))
- for row in curs.getresult():
- gplog.log_literal(logger, logging.ERROR, " " + " | ".join(map(str, row)))
- processACLResult(catname, fields, curs.getresult())
+ conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
+ with conn.cursor() as curs:
+ curs.execute(qry)
+ nrows = curs.rowcount
+ if nrows == 0:
+ logger.info('[OK] Cross consistency acl check for ' + catname)
+ else:
+ GV.checkStatus = False
+ setError(ERROR_NOREPAIR)
+ GV.aclStatus = False
+ logger.info('[FAIL] Cross consistency acl check for ' + catname)
+ logger.error(' %s acl check has %d issue(s)' % (catname, nrows))
+
+ fields = [desc[0] for desc in curs.description]
+ gplog.log_literal(logger, logging.ERROR, " " + " | ".join(fields))
+ for row in curs.getresult():
+ gplog.log_literal(logger, logging.ERROR, " " + " | ".join(map(str, row)))
+ processACLResult(catname, fields, curs.getresult())
except Exception as e:
setError(ERROR_NOREPAIR)
GV.aclStatus = False
@@ -1295,9 +1292,9 @@ def checkForeignKey(cat_tables=None):
if not cat_tables:
cat_tables = GV.catalog.getCatalogTables()
- db_connection = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
+ conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
try:
- foreign_key_check = ForeignKeyCheck(db_connection, logger, GV.opt['-S'], autoCast)
+ foreign_key_check = ForeignKeyCheck(conn, logger, GV.opt['-S'], autoCast)
foreign_key_issues = foreign_key_check.runCheck(cat_tables)
if foreign_key_issues:
GV.checkStatus = False
@@ -1307,13 +1304,14 @@ def checkForeignKey(cat_tables=None):
processForeignKeyResult(catname, pkcatname, fields, results)
if catname == 'gp_fastsequence' and pkcatname == 'pg_class':
setError(ERROR_REMOVE)
- removeFastSequence(db_connection)
+ removeFastSequence(conn)
else:
setError(ERROR_NOREPAIR)
except Exception as ex:
setError(ERROR_NOREPAIR)
GV.foreignKeyStatus = False
myprint(' Execution error: ' + str(ex))
+
# -------------------------------------------------------------------------------
@@ -1391,40 +1389,39 @@ def checkTableMissingEntry(cat):
# Execute the query
try:
- db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
- curs = db.query(qry)
- nrows = curs.ntuples()
- results = curs.getresult()
- fields = curs.listfields()
-
- if nrows != 0:
- results = filterSpuriousFailures(catname, fields, results)
- nrows = len(results)
-
- if nrows == 0:
- logger.info('[OK] Checking for missing or extraneous entries for ' + catname)
- else:
- if catname in ['pg_constraint']:
- logger_with_level = logger.warning
- log_level = logging.WARNING
+ conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
+ with conn.cursor() as curs:
+ curs.execute(qry)
+ nrows = curs.rowcount
+ results = curs.fetchall()
+ fields = [desc[0] for desc in curs.description]
+
+ if nrows != 0:
+ results = filterSpuriousFailures(catname, fields, results)
+ nrows = len(results)
+
+ if nrows == 0:
+ logger.info('[OK] Checking for missing or extraneous entries for ' + catname)
else:
- GV.checkStatus = False
- GV.missingEntryStatus = False
- logger_with_level = logger.error
- log_level = logging.ERROR
-
- logger.info(('[%s] Checking for missing or extraneous entries for ' + catname) %
- ('WARNING' if log_level == logging.WARNING else 'FAIL'))
- logger_with_level(' %s has %d issue(s)' % (catname, nrows))
- gplog.log_literal(logger, log_level, " " + " | ".join(fields))
+ if catname in ['pg_constraint']:
+ logger_with_level = logger.warning
+ log_level = logging.WARNING
+ else:
+ GV.checkStatus = False
+ GV.missingEntryStatus = False
+ logger_with_level = logger.error
+ log_level = logging.ERROR
+
+ logger.info(('[%s] Checking for missing or extraneous entries for ' + catname) %
+ ('WARNING' if log_level == logging.WARNING else 'FAIL'))
+ logger_with_level(' %s has %d issue(s)' % (catname, nrows))
+ gplog.log_literal(logger, log_level, " " + " | ".join(fields))
for row in results:
gplog.log_literal(logger, log_level, " " + " | ".join(map(str, row)))
processMissingDuplicateEntryResult(catname, fields, results, "missing")
if catname == 'pg_type':
generateVerifyFile(catname, fields, results, 'missing_extraneous')
-
return results
-
except Exception as e:
setError(ERROR_NOREPAIR)
GV.missingEntryStatus = False
@@ -1434,8 +1431,8 @@ def checkTableMissingEntry(cat):
class checkAOSegVpinfoThread(execThread):
- def __init__(self, cfg, db):
- execThread.__init__(self, cfg, db, None)
+ def __init__(self, cfg, conn):
+ execThread.__init__(self, cfg, conn, None)
def run(self):
aoseg_query = """
@@ -1447,9 +1444,10 @@ class checkAOSegVpinfoThread(execThread):
try:
# Read the list of aoseg tables from the database
- curs = self.db.query(aoseg_query)
+ curs = self.conn.cursor()
+ curs.execute(aoseg_query)
- for relname, relid, segrelid, segrelname, attr_count in curs.getresult():
+ for relname, relid, segrelid, segrelname, attr_count in curs.fetchall():
# We check vpinfo consistency only for segs that are in state
# AOSEG_STATE_DEFAULT and which are not RESERVED_SEGNO.
# RESERVED_SEGNO can have a different number of attributes than
@@ -1459,8 +1457,8 @@ class checkAOSegVpinfoThread(execThread):
# The vpinfo for RESERVED_SEGNO will have more columns than
# relnatts in that case.
qry = "SELECT distinct(length(vpinfo)) FROM pg_aoseg.%s where state = 1 and segno <> 0;" % (segrelname)
- vpinfo_curs = self.db.query(qry)
- nrows = vpinfo_curs.ntuples()
+ curs.execute(qry)
+ nrows = curs.rowcount
if nrows == 0:
continue
elif nrows > 1:
@@ -1475,7 +1473,7 @@ class checkAOSegVpinfoThread(execThread):
logger.error(qry)
continue
- vpinfo_length = vpinfo_curs.getresult()[0][0]
+ vpinfo_length = curs.fetchone()[0]
# vpinfo is bytea type, the length of the first 3 fields is 12 bytes, and the size of AOCSVPInfoEntry is 16
# typedef struct AOCSVPInfo
@@ -1508,8 +1506,85 @@ def checkAOSegVpinfo():
# parallelise check
for dbid in GV.cfg:
cfg = GV.cfg[dbid]
- db_connection = connect2(cfg)
- thread = checkAOSegVpinfoThread(cfg, db_connection)
+ conn = connect2(cfg)
+ thread = checkAOSegVpinfoThread(cfg, conn)
+ thread.start()
+ logger.debug('launching check thread %s for dbid %i' %
+ (thread.name, dbid))
+ threads.append(thread)
+
+ if (i % GV.opt['-B']) == 0:
+ processThread(threads)
+ threads = []
+
+ i += 1
+
+ processThread(threads)
+
+class checkAOLastrownumThread(execThread):
+ def __init__(self, cfg, conn):
+ execThread.__init__(self, cfg, conn, None)
+
+ # pg_attribute_encoding.lastrownums[segno], if exists, should have a corresponding entry in gp_fastsequence with
+ # an objid same as segno. And the value of pg_attribute_encoding.lastrownums[segno] should fall in the range of
+ # [0, {last_sequence}] where {last_sequence} is the current gp_fastsequence value with the corresponding objid.
+ # Note that objmod starts from 0 but the array index starts from 1.
+ def run(self):
+ aolastrownum_query = """
+ SELECT
+ c.relname,
+ ao.relid,
+ ae.attnum,
+ ae.lastrownums,
+ f.objmod,
+ f.last_sequence,
+ ae.lastrownums[f.objmod + 1] AS lastrownum
+ FROM
+ pg_attribute_encoding ae
+ JOIN pg_appendonly ao ON ae.attrelid = ao.relid
+ LEFT JOIN gp_fastsequence f ON ao.segrelid = f.objid
+ JOIN pg_class c ON ao.relid = c.oid
+ WHERE
+ f.last_sequence IS NULL
+ OR f.last_sequence < ae.lastrownums[f.objmod + 1]
+ OR ae.lastrownums[f.objmod + 1] < 0;
+ """
+
+ try:
+ # Execute the query
+ curs = self.conn.cursor()
+ curs.execute(aolastrownum_query)
+ nrows = curs.rowcount
+
+ if nrows == 0:
+ logger.info('[OK] AO lastrownums check for pg_attribute_encoding')
+ else:
+ GV.checkStatus = False
+ # we could not fix this issue automatically
+ setError(ERROR_NOREPAIR)
+ logger.info('[FAIL] AO lastrownums check for pg_attribute_encoding')
+ for relname, relid, attnum, lastrownums, objmod, last_sequence, last_rownum in curs.fetchall():
+ logger.error(" found inconsistent last_rownum {rownum} with last_sequence {seqnum} of aoseg {segno} for table '{relname}' attribute {attnum} on segment {content}"
+ .format(rownum = last_rownum,
+ seqnum = last_sequence,
+ segno = objmod,
+ relname = relname,
+ attnum = attnum,
+ content = self.cfg['content']))
+
+ except Exception as e:
+ GV.checkStatus = False
+ self.error = e
+
+# for test "ao_lastrownums"
+def checkAOLastrownums():
+ threads = []
+ i = 1
+ # parallelise check
+ for dbid in GV.cfg:
+ cfg = GV.cfg[dbid]
+ conn = connect2(cfg)
+ thread = checkAOLastrownumThread(cfg, conn)
thread.start()
logger.debug('launching check thread %s for dbid %i' %
(thread.name, dbid))
@@ -1663,9 +1738,10 @@ def checkTableInconsistentEntry(cat):
# Execute the query
try:
- db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
- curs = db.query(qry)
- nrows = curs.ntuples()
+ conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
+ curs = conn.cursor()
+ curs.execute(qry)
+ nrows = curs.rowcount
if nrows == 0:
logger.info('[OK] Checking for inconsistent entries for ' + catname)
@@ -1676,16 +1752,14 @@ def checkTableInconsistentEntry(cat):
logger.info('[FAIL] Checking for inconsistent entries for ' + catname)
logger.error(' %s has %d issue(s)' % (catname, nrows))
- fields = curs.listfields()
+ fields = [desc[0] for desc in curs.description]
gplog.log_literal(logger, logging.ERROR, " " + " | ".join(fields))
- for row in curs.getresult():
+ results = curs.fetchall()
+ for row in results:
gplog.log_literal(logger, logging.ERROR, " " + " | ".join(map(str, row)))
- results = curs.getresult()
processInconsistentEntryResult(catname, pkey, fields, results)
if catname == 'pg_type':
generateVerifyFile(catname, fields, results, 'duplicate')
-
-
except Exception as e:
setError(ERROR_NOREPAIR)
GV.inconsistentEntryStatus = False
@@ -1799,9 +1873,10 @@ def checkTableDuplicateEntry(cat):
# Execute the query
try:
- db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
- curs = db.query(qry)
- nrows = curs.ntuples()
+ conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
+ curs = conn.cursor()
+ curs.execute(qry)
+ nrows = curs.rowcount
if nrows == 0:
logger.info('[OK] Checking for duplicate entries for ' + catname)
@@ -1813,7 +1888,7 @@ def checkTableDuplicateEntry(cat):
fields = curs.listfields()
gplog.log_literal(logger, logging.ERROR, " " + " | ".join(fields))
- results = curs.getresult()
+ results = curs.fetchall()
for row in results:
gplog.log_literal(logger, logging.ERROR, " " + " | ".join(map(str, row)))
processMissingDuplicateEntryResult(catname, fields, results, "duplicate")
@@ -1862,9 +1937,9 @@ def duplicateEntryQuery(catname, pkey):
def checkUniqueIndexViolation():
logger.info('-----------------------------------')
logger.info('Performing check: checking for violated unique indexes')
- db_connection = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
+ conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
- violations = UniqueIndexViolationCheck().runCheck(db_connection)
+ violations = UniqueIndexViolationCheck().runCheck(conn)
checkname = 'unique index violation(s)'
if violations:
@@ -1901,9 +1976,9 @@ def checkOrphanedToastTables():
logger.info('-----------------------------------')
logger.info('Performing check: checking for orphaned TOAST tables')
- db_connection = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
+ conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
checker = OrphanedToastTablesCheck()
- check_passed = checker.runCheck(db_connection)
+ check_passed = checker.runCheck(conn)
checkname = 'orphaned toast table(s)'
if check_passed:
@@ -2424,12 +2499,14 @@ def getOidFromPK(catname, pkeys):
pkeystr=pkeystr)
try:
- db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
- curs = db.query(qry)
- if (len(curs.dictresult()) == 0):
+ conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
+ curs = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
+ curs.execute(qry)
+ results = curs.fetchall()
+ if (len(results) == 0):
raise QueryException("No such entry '%s' in %s" % (pkeystr, catname))
- return curs.dictresult().pop()['oid']
+ return results.pop()['oid']
except Exception as e:
setError(ERROR_NOREPAIR)
@@ -2441,10 +2518,11 @@ def getOidFromPK(catname, pkeys):
def getClassOidForRelfilenode(relfilenode):
qry = "SELECT oid FROM pg_class WHERE relfilenode = %d;" % (relfilenode)
try:
- dburl = dbconn.DbURL(hostname=GV.opt['-h'], port=GV.opt['-p'], dbname=GV.dbname)
- conn = dbconn.connect(dburl)
- oid = dbconn.queryRow(conn, qry)[0]
- return oid
+ with closing(connect()) as conn:
+ with conn.cursor() as curs:
+ curs.execute(qry)
+ oid = curs.fetchone()[0]
+ return oid
except Exception as e:
setError(ERROR_NOREPAIR)
myprint(' Execution error: ' + str(e))
@@ -2464,10 +2542,12 @@ def getResourceTypeOid(oid):
""" % (oid, oid)
try:
- db = connect()
- curs = db.query(qry)
- if len(curs.dictresult()) == 0: return 0
- return curs.dictresult().pop()['oid']
+ with closing(connect()) as conn:
+ with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as curs:
+ curs.execute(qry)
+ results = curs.fetchall()
+ if len(results) == 0: return 0
+ return results.pop()['oid']
except Exception as e:
setError(ERROR_NOREPAIR)
myprint(' Execution error: ' + str(e))
@@ -3062,7 +3142,7 @@ class GPObject:
# Collect all tables with missing issues for later reporting
if len(self.missingIssues):
- db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
+ conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
oid_query = "select (select nspname from pg_namespace where oid=relnamespace) || '.' || relname from pg_class where oid=%d"
type_query = "select (select nspname from pg_namespace where oid=relnamespace) || '.' || relname from pg_class where reltype=%d"
for issues in self.missingIssues.values() :
@@ -3070,19 +3150,23 @@ class GPObject:
# Get schemaname.tablename corresponding to oid
for key in issue.pkeys:
if 'relid' in key or key in ['ev_class', 'reloid']:
- table_list = db.query(oid_query % issue.pkeys[key]).getresult()
+ curs = conn.cursor()
+ curs.execute(oid_query % issue.pkeys[key])
+ table_list = curs.fetchone()
if table_list:
if issue.type == 'missing':
- GV.missing_attr_tables.append( (table_list[0][0], issue.segids) )
+ GV.missing_attr_tables.append( (table_list[0], issue.segids) )
else:
- GV.extra_attr_tables.append( (table_list[0][0], issue.segids) )
+ GV.extra_attr_tables.append( (table_list[0], issue.segids) )
elif key == 'oid':
- table_list = db.query(type_query % issue.pkeys[key]).getresult()
+ curs = conn.cursor()
+ curs.execute(type_query % issue.pkeys[key])
+ table_list = curs.fetchone()
if table_list:
if issue.type == 'missing':
- GV.missing_attr_tables.append( (table_list[0][0], issue.segids) )
+ GV.missing_attr_tables.append( (table_list[0], issue.segids) )
else:
- GV.extra_attr_tables.append( (table_list[0][0], issue.segids) )
+ GV.extra_attr_tables.append( (table_list[0], issue.segids) )
def __cmp__(self, other):
@@ -3231,9 +3315,11 @@ def getRelInfo(objects):
""".format(oids=','.join(map(str, oids)))
try:
- db = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
- curs = db.query(qry)
- for row in curs.getresult():
+ conn = connect2(GV.cfg[GV.coordinator_dbid], utilityMode=False)
+ curs = conn.cursor()
+ curs.execute(qry)
+ results = curs.fetchall()
+ for row in results:
(oid, relname, nspname, relkind, paroid) = row
objects[oid, 'pg_class'].setRelInfo(relname, nspname, relkind, paroid)
diff --git a/gpMgmt/bin/gpcheckcat_modules/foreign_key_check.py b/gpMgmt/bin/gpcheckcat_modules/foreign_key_check.py
index ad4c1543fae..5ac49972110 100644
--- a/gpMgmt/bin/gpcheckcat_modules/foreign_key_check.py
+++ b/gpMgmt/bin/gpcheckcat_modules/foreign_key_check.py
@@ -2,6 +2,7 @@
from gppylib.gplog import *
from gppylib.gpcatalog import *
+from contextlib import closing
import re
class ForeignKeyCheck:
@@ -105,25 +106,25 @@ def checkTableForeignKey(self, cat):
def _validate_relation(self, catname, fkeystr, pkcatname, pkeystr, qry):
issue_list = []
try:
- curs = self.db_connection.query(qry)
- nrows = curs.ntuples()
-
- if nrows == 0:
- self.logger.info('[OK] Foreign key check for %s(%s) referencing %s(%s)' %
- (catname, fkeystr, pkcatname, pkeystr))
- else:
- self.logger.info('[FAIL] Foreign key check for %s(%s) referencing %s(%s)' %
- (catname, fkeystr, pkcatname, pkeystr))
- self.logger.error(' %s has %d issue(s): entry has NULL reference of %s(%s)' %
- (catname, nrows, pkcatname, pkeystr))
-
- fields = curs.listfields()
- log_literal(self.logger, logging.ERROR, " " + " | ".join(fields))
- for row in curs.getresult():
- log_literal(self.logger, logging.ERROR, " " + " | ".join(map(str, row)))
- results = curs.getresult()
- issue_list.append((pkcatname, fields, results))
-
+ with closing(self.db_connection.cursor()) as curs:
+ curs.execute(qry)
+ nrows = curs.rowcount
+
+ if nrows == 0:
+ self.logger.info('[OK] Foreign key check for %s(%s) referencing %s(%s)' %
+ (catname, fkeystr, pkcatname, pkeystr))
+ else:
+ self.logger.info('[FAIL] Foreign key check for %s(%s) referencing %s(%s)' %
+ (catname, fkeystr, pkcatname, pkeystr))
+ self.logger.error(' %s has %d issue(s): entry has NULL reference of %s(%s)' %
+ (catname, nrows, pkcatname, pkeystr))
+
+ fields = [desc[0] for desc in curs.description]
+ log_literal(self.logger, logging.ERROR, " " + " | ".join(fields))
+ results = curs.fetchall()
+ for row in results:
+ log_literal(self.logger, logging.ERROR, " " + " | ".join(map(str, row)))
+ issue_list.append((pkcatname, fields, results))
except Exception as e:
err_msg = '[ERROR] executing: Foreign key check for catalog table {0}. Query : \n {1}\n'.format(catname, qry)
err_msg += str(e)
diff --git a/gpMgmt/bin/gpcheckcat_modules/leaked_schema_dropper.py b/gpMgmt/bin/gpcheckcat_modules/leaked_schema_dropper.py
index 87e55a5cf7b..dc7cfacb32f 100644
--- a/gpMgmt/bin/gpcheckcat_modules/leaked_schema_dropper.py
+++ b/gpMgmt/bin/gpcheckcat_modules/leaked_schema_dropper.py
@@ -35,16 +35,19 @@ class LeakedSchemaDropper:
"""
def __get_leaked_schemas(self, db_connection):
- leaked_schemas = db_connection.query(self.leaked_schema_query)
+ with db_connection.cursor() as curs:
+ curs.execute(self.leaked_schema_query)
+ leaked_schemas = curs.fetchall()
- if not leaked_schemas:
- return []
+ if not leaked_schemas:
+ return []
- return [row[0] for row in leaked_schemas.getresult() if row[0]]
+ return [row[0] for row in leaked_schemas if row[0]]
def drop_leaked_schemas(self, db_connection):
leaked_schemas = self.__get_leaked_schemas(db_connection)
for leaked_schema in leaked_schemas:
escaped_schema_name = escapeDoubleQuoteInSQLString(leaked_schema)
- db_connection.query('DROP SCHEMA IF EXISTS %s CASCADE;' % (escaped_schema_name))
+ with db_connection.cursor() as curs:
+ curs.execute('DROP SCHEMA IF EXISTS %s CASCADE;' % (escaped_schema_name))
return leaked_schemas
diff --git a/gpMgmt/bin/gpcheckcat_modules/orphaned_toast_tables_check.py b/gpMgmt/bin/gpcheckcat_modules/orphaned_toast_tables_check.py
index 21ec8d18047..a76ef560867 100644
--- a/gpMgmt/bin/gpcheckcat_modules/orphaned_toast_tables_check.py
+++ b/gpMgmt/bin/gpcheckcat_modules/orphaned_toast_tables_check.py
@@ -4,6 +4,8 @@
from collections import namedtuple
from gpcheckcat_modules.orphan_toast_table_issues import OrphanToastTableIssue, DoubleOrphanToastTableIssue, ReferenceOrphanToastTableIssue, DependencyOrphanToastTableIssue, MismatchOrphanToastTableIssue
+import psycopg2
+from psycopg2 import extras
OrphanedTable = namedtuple('OrphanedTable', 'oid catname')
@@ -117,8 +119,10 @@ def __init__(self):
"""
def runCheck(self, db_connection):
- orphaned_toast_tables = db_connection.query(self.orphaned_toast_tables_query).dictresult()
- if len(orphaned_toast_tables) == 0:
+ curs = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
+ curs.execute(self.orphaned_toast_tables_query)
+ orphaned_toast_tables = curs.fetchall()
+ if curs.rowcount == 0:
return True
for row in orphaned_toast_tables:
diff --git a/gpMgmt/bin/gpcheckcat_modules/unique_index_violation_check.py b/gpMgmt/bin/gpcheckcat_modules/unique_index_violation_check.py
index 47999f5c59c..6778401f31a 100644
--- a/gpMgmt/bin/gpcheckcat_modules/unique_index_violation_check.py
+++ b/gpMgmt/bin/gpcheckcat_modules/unique_index_violation_check.py
@@ -34,22 +34,24 @@ def __init__(self):
) as violations
"""
- def runCheck(self, db_connection):
- unique_indexes = db_connection.query(self.unique_indexes_query).getresult()
- violations = []
+ def runCheck(self, conn):
+ with conn.cursor() as cur:
+ cur.execute(self.unique_indexes_query)
+ unique_indexes = cur.fetchall()
+ violations = []
- for (table_oid, index_name, table_name, column_names) in unique_indexes:
- column_names = ",".join(column_names)
- sql = self.get_violated_segments_query(table_name, column_names)
- violated_segments = db_connection.query(sql).getresult()
- if violated_segments:
- violations.append(dict(table_oid=table_oid,
- table_name=table_name,
- index_name=index_name,
- column_names=column_names,
- violated_segments=[row[0] for row in violated_segments]))
-
- return violations
+ for (table_oid, index_name, table_name, column_names) in unique_indexes:
+ column_names = ",".join(column_names)
+ sql = self.get_violated_segments_query(table_name, column_names)
+ cur.execute(sql)
+ violated_segments = cur.fetchall()
+ if violated_segments:
+ violations.append(dict(table_oid=table_oid,
+ table_name=table_name,
+ index_name=index_name,
+ column_names=column_names,
+ violated_segments=[row[0] for row in violated_segments]))
+ return violations
def get_violated_segments_query(self, table_name, column_names):
return self.violated_segments_query % (
diff --git a/gpMgmt/bin/gpconfig b/gpMgmt/bin/gpconfig
index 7bd3023ea85..464e9d79440 100755
--- a/gpMgmt/bin/gpconfig
+++ b/gpMgmt/bin/gpconfig
@@ -15,7 +15,7 @@
import os
import sys
import re
-
+from psycopg2 import DatabaseError
try:
from gppylib.gpparseopts import OptParser, OptChecker
from gppylib.gparray import GpArray
@@ -25,7 +25,6 @@ try:
from gppylib.commands.gp import *
from gppylib.db import dbconn
from gppylib.userinput import *
- from pg import DatabaseError
from gpconfig_modules.segment_guc import SegmentGuc
from gpconfig_modules.database_segment_guc import DatabaseSegmentGuc
from gpconfig_modules.file_segment_guc import FileSegmentGuc
diff --git a/gpMgmt/bin/gpexpand b/gpMgmt/bin/gpexpand
index 562fad58213..57cd6618773 100755
--- a/gpMgmt/bin/gpexpand
+++ b/gpMgmt/bin/gpexpand
@@ -17,10 +17,10 @@ import signal
import traceback
from collections import defaultdict
from time import strftime, sleep
-
+import psycopg2
+from psycopg2 import DatabaseError, OperationalError
+from psycopg2 import extras
try:
- import pg, pgdb
-
from gppylib.commands.unix import *
from gppylib.commands.gp import *
from gppylib.gparray import GpArray, MODE_NOT_SYNC, STATUS_DOWN
@@ -32,9 +32,7 @@ try:
from gppylib.operations.startSegments import MIRROR_MODE_MIRRORLESS
from gppylib.system import configurationInterface, configurationImplGpdb
from gppylib.system.environment import GpCoordinatorEnvironment
- from pgdb import DatabaseError
- from gppylib.gpcatalog import COORDINATOR_ONLY_TABLES_MAPPED
- from gppylib.gpcatalog import COORDINATOR_ONLY_TABLES_NON_MAPPED
+ from gppylib.gpcatalog import COORDINATOR_ONLY_TABLES
from gppylib.operations.package import SyncPackages
from gppylib.operations.utils import ParallelOperation
from gppylib.parseutils import line_reader, check_values, canonicalize_address
@@ -1864,7 +1862,7 @@ WHERE
expansionStopped)
dbconn.execSQL(self.conn, sql)
self.conn.close()
- except pgdb.OperationalError:
+ except OperationalError:
pass
except Exception:
# schema doesn't exist. Cancel or error during setup
@@ -1916,7 +1914,7 @@ WHERE
def connect_database(self, dbname):
test_url = copy.deepcopy(self.dburl)
test_url.pgdb = dbname
- c = dbconn.connect(test_url, encoding='UTF8', allowSystemTableMods=True)
+ c = dbconn.connect(test_url, encoding='UTF8', allowSystemTableMods=True, cursorFactory=psycopg2.extras.NamedTupleCursor)
return c
def sync_packages(self):
@@ -2157,7 +2155,7 @@ class ExpandCommand(SQLCommand):
try:
status_conn = dbconn.connect(self.status_url, encoding='UTF8')
- table_conn = dbconn.connect(self.table_url, encoding='UTF8')
+ table_conn = dbconn.connect(self.table_url, encoding='UTF8', cursorFactory=psycopg2.extras.NamedTupleCursor)
except DatabaseError as ex:
if self.options.verbose:
logger.exception(ex)
diff --git a/gpMgmt/bin/gpload.py b/gpMgmt/bin/gpload.py
index a1696947bbc..19963c7c80d 100755
--- a/gpMgmt/bin/gpload.py
+++ b/gpMgmt/bin/gpload.py
@@ -35,22 +35,8 @@
sys.exit(2)
import platform
-
-try:
- import pg
-except ImportError:
- try:
- from pygresql import pg
- except Exception as e:
- pass
-except Exception as e:
- print(repr(e))
- errorMsg = "gpload was unable to import The PyGreSQL Python module (pg.py) - %s\n" % str(e)
- sys.stderr.write(str(errorMsg))
- errorMsg = "Please check if you have the correct Visual Studio redistributable package installed.\n"
- sys.stderr.write(str(errorMsg))
- sys.exit(2)
-
+import psycopg2
+from psycopg2 import extras
import hashlib
import datetime,getpass,os,signal,socket,threading,time,traceback,re
import subprocess
@@ -562,6 +548,16 @@ def is_keyword(tab):
else:
return False
+# Escape single quotes, backslashes appearing in the string according to the SQL string constants syntax.
+# E.g.,
+# >>> escape_string(r"O'Reilly")
+# "O''Reilly"
+def escape_string(string):
+ adapted = psycopg2.extensions.QuotedString(string)
+ # The getquoted() API returns 'latin-1' encoded binary string by default, we need to specify
+ # the encoding manually.
+ adapted.encoding = 'utf-8'
+ return adapted.getquoted().decode()[1:-1]
def caseInsensitiveDictLookup(key, dictionary):
"""
@@ -724,25 +720,6 @@ def match_notice_obj(notice):
else:
return 0
-
-def notice_processor_Notice(notice):
- # process the notice in main branch
- # notice is a class which is different in 6X, we need a new function to process
- global NUM_WARN_ROWS
- if windowsPlatform == True:
- # We don't have a pygresql with our notice fix, so skip for windows.
- # This means we will not get any warnings on windows (MPP10989).
- return
- theNotices = notice.message
- messageNumber = 0
- if isinstance(theNotices, list):
- while messageNumber < len(theNotices) and NUM_WARN_ROWS==0:
- NUM_WARN_ROWS = match_notice_obj(theNotices[messageNumber])
- messageNumber+=1
- else:
- NUM_WARN_ROWS = match_notice_obj(theNotices)
-
-
def notice_processor(notice):
global NUM_WARN_ROWS
if windowsPlatform == True:
@@ -1757,9 +1734,9 @@ def setup_connection(self, recurse = 0):
"""
Connect to the backend
"""
- if self.db != None:
- self.db.close()
- self.db = None
+ if self.conn != None:
+ self.conn.close()
+ self.conn = None
if self.options.W:
if self.options.password==None:
self.options.password = getpass.getpass()
@@ -1778,19 +1755,20 @@ def setup_connection(self, recurse = 0):
" host=" + str(self.options.h) +
" port=" + str(self.options.p) +
" database=" + str(self.options.d))
- self.db = pg.DB( dbname=self.options.d
- , host=self.options.h
- , port=self.options.p
- , user=self.options.U
- , passwd=self.options.password
- )
+ self.conn = psycopg2.connect(dbname=self.options.d,
+ host=self.options.h,
+ port=self.options.p,
+ user=self.options.U,
+ password=self.options.password)
+ self.conn.set_session(autocommit=True)
self.log(self.DEBUG, "Successfully connected to database")
if noGpVersion == False:
# Get GPDB version
- curs = self.db.query("SELECT version()")
- self.gpdb_version = GpVersion(curs.getresult()[0][0])
- self.log(self.DEBUG, "GPDB version is: %s" % self.gpdb_version)
+ with self.conn.cursor() as cur:
+ cur.execute("SELECT version()")
+ self.gpdb_version = GpVersion(cur.fetchall()[0][0])
+ self.log(self.DEBUG, "GPDB version is: %s" % self.gpdb_version)
except Exception as e:
errorMessage = str(e)
@@ -1884,13 +1862,15 @@ def read_table_metadata(self):
WHERE c.relname = '%s'
AND pg_catalog.pg_table_is_visible(c.oid);""" % quote_unident(self.table)
- resultList = self.db.query(queryString).getresult()
+ with self.conn.cursor() as cur:
+ cur.execute(queryString)
+ resultList = cur.fetchall()
- if len(resultList) > 0:
- self.schema = (resultList[0])[0]
- self.log(self.INFO, "setting schema '%s' for table '%s'" % (self.schema, quote_unident(self.table)))
- else:
- self.log(self.ERROR, "table %s not found in any database schema" % self.table)
+ if len(resultList) > 0:
+ self.schema = (resultList[0])[0]
+ self.log(self.INFO, "setting schema '%s' for table '%s'" % (self.schema, quote_unident(self.table)))
+ else:
+ self.log(self.ERROR, "table %s not found in any database schema" % self.table)
queryString = """select nt.nspname as table_schema,
@@ -1913,41 +1893,45 @@ def read_table_metadata(self):
count = 0
self.into_columns = []
self.into_columns_dict = dict()
- resultList = self.db.query(queryString).dictresult()
- while count < len(resultList):
- row = resultList[count]
- count += 1
- ct = str(row['data_type'])
- if ct == 'bigserial':
- ct = 'bigint'
- elif ct == 'serial':
- ct = 'int4'
- name = row['column_name']
- name = quote_ident(name)
- has_seq = row['has_sequence']
- if has_seq == str('f') or has_seq==False:
- has_seq_bool = False
- if has_seq == str('t') or has_seq==True:
- has_sql_bool = True
- i = [name,ct,None, has_seq_bool]
- # i: [column name, column data type, mapping target, has_sequence]
- self.into_columns.append(i)
- self.into_columns_dict[name] = i
- self.log(self.DEBUG, "found input column: " + str(i))
- if count == 0:
- # see if it's a permissions issue or it actually doesn't exist
- tableName = quote_unident(self.table)
- tableSchema = quote_unident(self.schema)
- sql = """select 1 from pg_class c, pg_namespace n
- where c.relname = '%s' and
- n.nspname = '%s' and
- n.oid = c.relnamespace""" % (tableName, tableSchema)
- resultList = self.db.query(sql).getresult()
- if len(resultList) > 0:
- self.log(self.ERROR, "permission denied for table %s.%s" % \
- (tableSchema, tableName))
- else:
- self.log(self.ERROR, 'table %s.%s does not exist in database %s'% (tableSchema, tableName, self.options.d))
+ with self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cur:
+ cur.execute(queryString)
+ resultList = cur.fetchall()
+ while count < len(resultList):
+ row = resultList[count]
+ count += 1
+ ct = str(row['data_type'])
+ if ct == 'bigserial':
+ ct = 'bigint'
+ elif ct == 'serial':
+ ct = 'int4'
+ name = row['column_name']
+ name = quote_ident(name)
+ has_seq = row['has_sequence']
+ if has_seq == str('f') or has_seq==False:
+ has_seq_bool = False
+ if has_seq == str('t') or has_seq==True:
+ has_sql_bool = True
+ i = [name,ct,None, has_seq_bool]
+ # i: [column name, column data type, mapping target, has_sequence]
+ self.into_columns.append(i)
+ self.into_columns_dict[name] = i
+ self.log(self.DEBUG, "found input column: " + str(i))
+ if count == 0:
+ # see if it's a permissions issue or it actually doesn't exist
+ tableName = quote_unident(self.table)
+ tableSchema = quote_unident(self.schema)
+ sql = """select 1 from pg_class c, pg_namespace n
+ where c.relname = '%s' and
+ n.nspname = '%s' and
+ n.oid = c.relnamespace""" % (tableName, tableSchema)
+ with self.conn.cursor() as cur:
+ cur.execute(sql)
+ resultList = cur.fetchall()
+ if len(resultList) > 0:
+ self.log(self.ERROR, "permission denied for table %s.%s" % \
+ (tableSchema, tableName))
+ else:
+ self.log(self.ERROR, 'table %s.%s does not exist in database %s'% (tableSchema, tableName, self.options.d))
def read_mapping(self):
'''
@@ -2190,8 +2174,10 @@ def get_table_oid(self, tableName):
if tableName:
sql = "select %s::regclass::oid" % quote(quote_unident(tableName))
try:
- resultList = self.db.query(sql).getresult()
- return resultList[0][0]
+ with self.conn.cursor() as cur:
+ cur.execute(sql)
+ resultList = cur.fetchall()
+ return resultList[0][0]
except Exception as e:
pass
return None
@@ -2322,15 +2308,19 @@ def create_external_table(self):
encodingCode = None
encodingStr = self.getconfig('gpload:input:encoding', str, None)
if encodingStr is None:
- result = self.db.query("SHOW SERVER_ENCODING").getresult()
- if len(result) > 0:
- encodingStr = result[0][0]
+ with self.conn.cursor() as cur:
+ cur.execute("SHOW SERVER_ENCODING")
+ result = cur.fetchall()
+ if len(result) > 0:
+ encodingStr = result[0][0]
if encodingStr:
sql = "SELECT pg_char_to_encoding('%s')" % encodingStr
- result = self.db.query(sql).getresult()
- if len(result) > 0:
- encodingCode = result[0][0]
+ with self.conn.cursor() as cur:
+ cur.execute(sql)
+ result = cur.fetchall()
+ if len(result) > 0:
+ encodingCode = result[0][0]
limitStr = self.getconfig('gpload:input:error_limit',int, None)
if self.log_errors and not limitStr:
@@ -2370,11 +2360,13 @@ def create_external_table(self):
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'"""
- result = self.db.query(sql).getresult()
- if len(result) > 0:
- self.extSchemaTable = self.get_schematable(quote_unident(self.extSchemaName), self.extTableName)
- self.log(self.INFO, "reusing external staging table %s" % self.extSchemaTable)
- return
+ with self.conn.cursor() as cur:
+ cur.execute(sql)
+ result = cur.fetchall()
+ if len(result) > 0:
+ self.extSchemaTable = self.get_schematable(self.extSchemaName, self.extTableName)
+ self.log(self.INFO, "reusing external staging table %s" % self.extSchemaTable)
+ return
# staging table is not specified, we need to find it manually
else:
# process the single quotes in order to successfully find an existing external table to reuse.
@@ -2386,17 +2378,19 @@ def create_external_table(self):
sql = self.get_reuse_exttable_query(formatType, self.formatOpts,
limitStr, from_cols, self.extSchemaName, self.log_errors, encodingCode)
- resultList = self.db.query(sql).getresult()
- if len(resultList) > 0:
- # found an external table to reuse. no need to create one. we're done here.
- self.extTableName = (resultList[0])[0]
- # fast match result is only table name, so we need add schema info
- if self.fast_match:
- self.extSchemaTable = self.get_schematable(quote_unident(self.extSchemaName), self.extTableName)
- else:
- self.extSchemaTable = self.extTableName
- self.log(self.INFO, "reusing external table %s" % self.extSchemaTable)
- return
+ with self.conn.cursor() as cur:
+ cur.execute(sql)
+ resultList = cur.fetchall()
+ if len(resultList) > 0:
+ # found an external table to reuse. no need to create one. we're done here.
+ self.extTableName = (resultList[0])[0]
+ # fast match result is only table name, so we need add schema info
+ if self.fast_match:
+ self.extSchemaTable = self.get_schematable(self.extSchemaName, self.extTableName)
+ else:
+ self.extSchemaTable = self.extTableName
+ self.log(self.INFO, "reusing external table %s" % self.extSchemaTable)
+ return
# didn't find an existing external table suitable for reuse. Format a reusable
# name and issue a CREATE EXTERNAL TABLE on it. Hopefully we can use it next time
@@ -2428,7 +2422,8 @@ def create_external_table(self):
sql += "segment reject limit %s "%limitStr
try:
- self.db.query(sql.encode('utf-8'))
+ with self.conn.cursor() as cur:
+ cur.execute(sql)
except Exception as e:
self.log(self.ERROR, 'could not run SQL "%s": %s' % (sql, str(e)))
@@ -2446,7 +2441,9 @@ def get_distribution_key(self):
sql = '''select * from pg_get_table_distributedby('%s.%s'::regclass::oid)'''% (self.schema, self.table)
try:
- dk_text = self.db.query(sql.encode('utf-8')).getresult()
+ with self.conn.cursor() as cur:
+ cur.execute(sql)
+ dk_text = cur.fetchall()
except Exception as e:
self.log(self.ERROR, 'could not run SQL "%s": %s ' % (sql, str(e)))
@@ -2470,11 +2467,13 @@ def create_staging_table(self):
distcols = self.get_distribution_key()
sql = "SELECT * FROM pg_class WHERE relname LIKE 'temp_gpload_reusable_%%';"
- resultList = self.db.query(sql).getresult()
- if len(resultList) > 0:
- self.log(self.WARN, """Old style, reusable tables named "temp_gpload_reusable_*" from a previous versions were found.
- Cloudberry recommends running "DROP TABLE temp_gpload_reusable_..." on each table. This only needs to be done once.""")
-
+ with self.conn.cursor() as cur:
+ cur.execute(sql)
+ resultList = cur.fetchall()
+ if len(resultList) > 0:
+ self.log(self.WARN, """Old style, reusable tables named "temp_gpload_reusable_*" from a previous versions were found.
+ Greenplum recommends running "DROP TABLE temp_gpload_reusable_..." on each table. This only needs to be done once.""")
+
# If the 'reuse tables' option was specified we now try to find an
# already existing staging table in the catalog which will match
# the one that we need to use. It must meet the reuse conditions
@@ -2491,20 +2490,19 @@ def create_staging_table(self):
# create a string from all reuse conditions for staging tables and ancode it
conditions_str = self.get_staging_conditions_string(target_table_name, target_columns, distcols).encode()
encoding_conditions = hashlib.md5(conditions_str).hexdigest()
+ table_name = 'staging_gpload_reusable_%s'% (encoding_conditions)
+ sql = self.get_reuse_staging_table_query(table_name)
+ with self.conn.cursor() as cur:
+ cur.execute(sql)
+ resultList = cur.fetchall()
- sql = self.get_reuse_staging_table_query(encoding_conditions)
- resultList = self.db.query(sql).getresult()
-
- if len(resultList) > 0:
-
- # found a temp table to reuse. no need to create one. we're done here.
- self.staging_table_name = (resultList[0])[0]
- self.log(self.INFO, "reusing staging table %s" % self.staging_table_name)
-
- # truncate it so we don't use old data
- self.do_truncate(self.staging_table_name)
-
- return
+ if len(resultList) > 0:
+ # found a temp table to reuse. no need to create one. we're done here.
+ self.staging_table_name = self.get_schematable(self.extSchemaName, table_name)
+ self.log(self.INFO, "reusing staging table %s" % self.staging_table_name)
+ # truncate it so we don't use old data
+ self.do_truncate(self.staging_table_name)
+ return
# didn't find an existing staging table suitable for reuse. Format a reusable
# name and issue a CREATE TABLE on it (without TEMP!). Hopefully we can use it
@@ -2541,27 +2539,31 @@ def create_staging_table(self):
self.log(self.LOG, sql)
if not self.options.D:
- self.db.query(sql)
+ with self.conn.cursor() as cur:
+ cur.execute(sql)
if not self.reuse_tables:
self.cleanupSql.append('DROP TABLE IF EXISTS %s' % self.staging_table_name)
def count_errors(self):
- # callback function is setted before insert
- # notice processor will be called automaticly
+ notice_processor(self.conn.notices)
if self.log_errors and not self.options.D:
# make sure we only get errors for our own instance
if not self.reuse_tables:
- queryStr = "select count(*) from gp_read_error_log('%s')" % pg.escape_string(self.extSchemaTable)
- results = self.db.query(queryStr).getresult()
- return (results[0])[0]
+ queryStr = "select count(*) from gp_read_error_log('%s')" % escape_string(self.extSchemaTable)
+ with self.conn.cursor() as cur:
+ cur.execute(queryStr)
+ results = cur.fetchall()
+ return (results[0])[0]
else: # reuse_tables
- queryStr = "select count(*) from gp_read_error_log('%s') where cmdtime > to_timestamp(%s)" % (pg.escape_string(self.extSchemaTable), self.startTimestamp)
- results = self.db.query(queryStr).getresult()
- global NUM_WARN_ROWS
- NUM_WARN_ROWS = (results[0])[0]
- return (results[0])[0]
+ queryStr = "select count(*) from gp_read_error_log('%s') where cmdtime > to_timestamp(%s)" % (escape_string(self.extSchemaTable), self.startTimestamp)
+ with self.conn.cursor() as cur:
+ cur.execute(queryStr)
+ results = cur.fetchall()
+ global NUM_WARN_ROWS
+ NUM_WARN_ROWS = (results[0])[0]
+ return (results[0])[0];
return 0
def report_errors(self):
@@ -2575,7 +2577,7 @@ def report_errors(self):
# if reuse_table is set, error message is not deleted.
if errors and self.log_errors and self.reuse_tables:
self.log(self.WARN, "Please use following query to access the detailed error")
- self.log(self.WARN, "select * from gp_read_error_log('{0}') where cmdtime > to_timestamp('{1}')".format(pg.escape_string(self.extSchemaTable), self.startTimestamp))
+ self.log(self.WARN, "select * from gp_read_error_log('{0}') where cmdtime > to_timestamp('{1}')".format(escape_string(self.extSchemaTable), self.startTimestamp))
self.exitValue = 1 if errors else 0
@@ -2603,9 +2605,9 @@ def do_insert(self, dest):
self.log(self.LOG, sql)
if not self.options.D:
try:
- # we need to set the notice receiver function before do insert
- self.db.set_notice_receiver(notice_processor_Notice)
- self.rowsInserted = self.db.query(sql.encode('utf-8'))
+ with self.conn.cursor() as cur:
+ cur.execute(sql)
+ self.rowsInserted = cur.rowcount
except Exception as e:
# We need to be a bit careful about the error since it may contain non-unicode characters
strE = e.__str__().encode().decode('unicode-escape')
@@ -2699,7 +2701,9 @@ def do_update(self,fromname,index):
self.log(self.LOG, sql)
if not self.options.D:
try:
- self.rowsUpdated = self.db.query(sql.encode('utf-8'))
+ with self.conn.cursor() as cur:
+ cur.execute(sql)
+ self.rowsUpdated = cur.rowcount
except Exception as e:
# We need to be a bit careful about the error since it may contain non-unicode characters
strE = str(str(e), errors = 'ignore')
@@ -2725,14 +2729,15 @@ def get_table_dist_key(self):
"c.relnamespace = n.oid and " + \
"n.nspname = '%s' and c.relname = '%s'; " % (quote_unident(self.schema), quote_unident(self.table))
- resultList = self.db.query(sql).getresult()
- attrs = []
- count = 0
- while count < len(resultList):
- attrs.append((resultList[count])[0])
- count = count + 1
-
- return attrs
+ with self.conn.cursor() as cur:
+ cur.execute(sql)
+ resultList = cur.fetchall()
+ attrs = []
+ count = 0
+ while count < len(resultList):
+ attrs.append((resultList[count])[0])
+ count = count + 1
+ return attrs
def table_supports_update(self):
""" Check wether columns being updated are distribution key."""
@@ -2789,7 +2794,8 @@ def do_method_merge(self):
self.log(self.LOG, sql)
if not self.options.D:
try:
- self.db.query(sql.encode('utf-8'))
+ with self.conn.cursor() as cur:
+ cur.execute(sql)
except Exception as e:
strE = str(str(e), errors = 'ignore')
strF = str(str(sql), errors = 'ignore')
@@ -2810,7 +2816,9 @@ def do_method_merge(self):
self.log(self.LOG, sql)
if not self.options.D:
try:
- self.rowsInserted = self.db.query(sql.encode('utf-8'))
+ with self.conn.cursor() as cur:
+ cur.execute(sql)
+ self.rowsInserted = cur.rowcount
except Exception as e:
# We need to be a bit careful about the error since it may contain non-unicode characters
strE = str(str(e), errors = 'ignore')
@@ -2822,7 +2830,8 @@ def do_truncate(self, tblname):
if not self.options.D:
try:
truncateSQLtext = "truncate %s" % tblname
- self.db.query(truncateSQLtext.encode('utf-8'))
+ with self.conn.cursor() as cur:
+ cur.execute(truncateSQLtext)
except Exception as e:
self.log(self.ERROR, 'could not execute truncate target %s: %s' % (tblname, str(e)))
@@ -2843,8 +2852,9 @@ def do_method(self):
truncate = False
self.reuse_tables = False
- if not self.options.no_auto_trans:
- self.db.query("BEGIN")
+ if not self.options.no_auto_trans and not method=='insert':
+ with self.conn.cursor() as cur:
+ cur.execute("BEGIN")
self.extSchemaName = self.getconfig('gpload:external:schema', str, None)
if self.extSchemaName == '%':
@@ -2880,7 +2890,8 @@ def do_method(self):
self.log(self.LOG, "Pre-SQL from user: %s" % before)
if not self.options.D:
try:
- self.db.query(before.encode('utf-8'))
+ with self.conn.cursor() as cur:
+ cur.execute(before)
except Exception as e:
self.log(self.ERROR, 'could not execute SQL in sql:before "%s": %s' %
(before, str(e)))
@@ -2903,15 +2914,15 @@ def do_method(self):
self.log(self.LOG, "Post-SQL from user: %s" % after)
if not self.options.D:
try:
- self.db.query(after.encode('utf-8'))
+ with self.conn.cursor() as cur:
+ cur.execute(after)
except Exception as e:
self.log(self.ERROR, 'could not execute SQL in sql:after "%s": %s' %
(after, str(e)))
- if not self.options.no_auto_trans:
- self.db.query("COMMIT")
-
-
+ if not self.options.no_auto_trans and not method=='insert':
+ with self.conn.cursor() as cur:
+ cur.execute("COMMIT")
def stop_gpfdists(self):
if self.subprocesses:
@@ -2948,7 +2959,7 @@ def run2(self):
self.log(self.INFO, 'running time: %.2f seconds'%(time.time()-start))
def run(self):
- self.db = None
+ self.conn = None
self.rowsInserted = 0
self.rowsUpdated = 0
signal.signal(signal.SIGINT, handle_kill)
@@ -2979,14 +2990,15 @@ def run(self):
for a in self.cleanupSql:
try:
self.log(self.DEBUG, a)
- self.db.query(a)
+ with self.conn.cursor() as cur:
+ cur.execute(a)
except (Exception, SystemExit):
traceback.print_exc(file=self.logfile)
self.logfile.flush()
traceback.print_exc()
- if self.db != None:
- self.db.close()
+ if self.conn != None:
+ self.conn.close()
self.log(self.INFO, 'rows Inserted = ' + str(self.rowsInserted))
self.log(self.INFO, 'rows Updated = ' + str(self.rowsUpdated))
diff --git a/gpMgmt/bin/gpload_test/gpload2/TEST_REMOTE.py b/gpMgmt/bin/gpload_test/gpload2/TEST_REMOTE.py
index ae1618f78cf..eb59c129dae 100755
--- a/gpMgmt/bin/gpload_test/gpload2/TEST_REMOTE.py
+++ b/gpMgmt/bin/gpload_test/gpload2/TEST_REMOTE.py
@@ -11,7 +11,7 @@
import re
import subprocess
from shutil import copyfile
-import pg
+import psycopg2
"""
Global Values
@@ -275,44 +275,34 @@ def copy_data(source='',target=''):
copyfile(os.path.join('data', source), target)
def get_table_name():
- try:
- db = pg.DB(dbname='reuse_gptest'
- ,host='localhost'
- ,port=int(PGPORT)
- )
- except Exception as e:
- errorMessage = str(e)
- print(('could not connect to database: ' + errorMessage))
- queryString = """SELECT relname
- from pg_class
- WHERE relname
- like 'ext_gpload_reusable%'
- OR relname
- like 'staging_gpload_reusable%';"""
- resultList = db.query(queryString.encode('utf-8')).getresult()
- return resultList
+ with psycopg2.connect(dbname='reuse_gptest',
+ host='localhost',
+ port=int(PGPORT)) as conn:
+ with conn.cursor() as cur:
+ queryString = """SELECT relname
+ from pg_class
+ WHERE relname
+ like 'ext_gpload_reusable%'
+ OR relname
+ like 'staging_gpload_reusable%';"""
+ cur.execute(queryString)
+ resultList = cur.fetchall()
+ return resultList
def drop_tables():
- try:
- db = pg.DB(dbname='reuse_gptest'
- ,host='localhost'
- ,port=int(PGPORT)
- )
- except Exception as e:
- errorMessage = str(e)
- print(('could not connect to database: ' + errorMessage))
-
- list = get_table_name()
- for i in list:
- name = i[0]
- match = re.search('ext_gpload',name)
- if match:
- queryString = "DROP EXTERNAL TABLE %s" % name
- db.query(queryString.encode('utf-8'))
-
- else:
- queryString = "DROP TABLE %s" % name
- db.query(queryString.encode('utf-8'))
+ table_list = get_table_name()
+ with psycopg2.connect(dbname='reuse_gptest',
+ host='localhost',
+ port=int(PGPORT)) as conn:
+ with conn.cursor() as cur:
+ for i in table_list:
+ name = i[0]
+ match = re.search('ext_gpload',name)
+ if match:
+ queryString = "DROP EXTERNAL TABLE %s" % name
+ else:
+ queryString = "DROP TABLE %s" % name
+ cur.execute(queryString)
class PSQLError(Exception):
'''
diff --git a/gpMgmt/bin/gpload_test/gpload2/TEST_local_base.py b/gpMgmt/bin/gpload_test/gpload2/TEST_local_base.py
index 5c47e724ff3..6aa60f9d8bc 100755
--- a/gpMgmt/bin/gpload_test/gpload2/TEST_local_base.py
+++ b/gpMgmt/bin/gpload_test/gpload2/TEST_local_base.py
@@ -12,6 +12,7 @@
import re
#import yaml
import pytest
+import psycopg2
from gppylib.commands.gp import get_coordinatordatadir
@@ -19,20 +20,6 @@
import subprocess32 as subprocess
except:
import subprocess
-try:
- import pg
-except ImportError:
- try:
- from pygresql import pg
- except Exception as e:
- pass
-except Exception as e:
- print(repr(e))
- errorMsg = "gpload was unable to import The PyGreSQL Python module (pg.py) - %s\n" % str(e)
- sys.stderr.write(str(errorMsg))
- errorMsg = "Please check if you have the correct Visual Studio redistributable package installed.\n"
- sys.stderr.write(str(errorMsg))
- sys.exit(2)
def get_port_from_conf():
file = get_coordinatordatadir()+'/postgresql.conf'
@@ -414,49 +401,38 @@ def copy_data(source='',target=''):
def get_table_name():
- try:
- db = pg.DB(dbname='reuse_gptest'
- ,host='localhost'
- ,port=int(PGPORT)
- )
- except Exception as e:
- errorMessage = str(e)
- print ('could not connect to database: ' + errorMessage)
- queryString = """SELECT sch.table_schema, cls.relname
- FROM pg_class AS cls, information_schema.tables AS sch
- WHERE
- (cls.relname LIKE 'ext_gpload_reusable%'
- OR
- relname LIKE 'staging_gpload_reusable%')
- AND cls.relname=sch.table_name;"""
- resultList = db.query(queryString.encode('utf-8')).getresult()
- print(resultList)
- return resultList
+ with psycopg2.connect(dbname='reuse_gptest',
+ host='localhost',
+ port=int(PGPORT)) as conn:
+ with conn.cursor() as cur:
+ queryString = """SELECT sch.table_schema, cls.relname
+ FROM pg_class AS cls, information_schema.tables AS sch
+ WHERE
+ (cls.relname LIKE 'ext_gpload_reusable%'
+ OR
+ relname LIKE 'staging_gpload_reusable%')
+ AND cls.relname=sch.table_name;"""
+ cur.execute(queryString)
+ resultList = cur.fetchall()
+ return resultList
def drop_tables():
'''drop external and staging tables'''
- try:
- db = pg.DB(dbname='reuse_gptest'
- ,host='localhost'
- ,port=int(PGPORT)
- )
- except Exception as e:
- errorMessage = str(e)
- print ('could not connect to database: ' + errorMessage)
-
tableList = get_table_name()
- for i in tableList:
- schema = i[0]
- name = i[1]
- match = re.search('ext_gpload',name)
- if match:
- queryString = 'DROP EXTERNAL TABLE "%s"."%s";'%(schema, name)
- db.query(queryString.encode('utf-8'))
-
- else:
- queryString = 'DROP TABLE "%s"."%s";'%(schema, name)
- db.query(queryString.encode('utf-8'))
+ with psycopg2.connect(dbname='reuse_gptest',
+ host='localhost',
+ port=int(PGPORT)) as conn:
+ with conn.cursor() as cur:
+ for i in tableList:
+ schema = i[0]
+ name = i[1]
+ match = re.search('ext_gpload',name)
+ if match:
+ queryString = 'DROP EXTERNAL TABLE "%s"."%s";'%(schema, name)
+ else:
+ queryString = 'DROP TABLE "%s"."%s";'%(schema, name)
+ cur.execute(queryString)
class PSQLError(Exception):
'''
diff --git a/gpMgmt/bin/gpload_test/gpload2/TEST_local_schema_and_mode.py b/gpMgmt/bin/gpload_test/gpload2/TEST_local_schema_and_mode.py
index 79f05d1c74c..8ad5e45deeb 100644
--- a/gpMgmt/bin/gpload_test/gpload2/TEST_local_schema_and_mode.py
+++ b/gpMgmt/bin/gpload_test/gpload2/TEST_local_schema_and_mode.py
@@ -880,3 +880,14 @@ def test_547_gpload_insert_staging_without_DK():
f = open(TestBase.mkpath('query547.sql'), 'a')
f.write("\\! psql -d reuse_gptest -c '\\d staging_gpload_reusable_*'")
f.close()
+
+@TestBase.prepare_before_test(num=548, times=1)
+def test_548_gpload_exttable_with_special_schema_name():
+ "548 gpload reuse external table with special schema name"
+ setup_file = TestBase.mkpath('setup.sql')
+ TestBase.runfile(setup_file)
+ with open(TestBase.mkpath('query548.sql'), 'wt') as f:
+ f.write("\\! gpload -f " + TestBase.mkpath('config/config_file') + "\n")
+ f.write("\\! psql -d reuse_gptest -c 'select count(*) from csvtable;'\n")
+ TestBase.copy_data('external_file_13.csv','data_file.csv')
+ TestBase.write_config_file(reuse_tables=True, format='csv', file='data_file.csv', table='csvtable', delimiter="','",log_errors=True,error_limit=10, staging_table='staging_table',externalSchema='spiegelungssätze')
diff --git a/gpMgmt/bin/gpload_test/gpload2/query205.ans b/gpMgmt/bin/gpload_test/gpload2/query205.ans
index e47d90653e6..542c9597b12 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query205.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query205.ans
@@ -2,7 +2,7 @@
2021-11-29 11:56:45|INFO|setting schema 'public' for table 'texttable2'
2021-11-29 11:56:45|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30
2021-11-29 11:56:45|INFO|did not find an external table to reuse. creating ext_gpload_reusable_5e8c16ac_50c8_11ec_a3c6_0050569e2380
-2021-11-29 11:56:45|ERROR|could not run SQL "create external table ext_gpload_reusable_5e8c16ac_50c8_11ec_a3c6_0050569e2380("s1" text,"s2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter '' null '\N' escape '\' ) encoding'UTF8' ": ERROR: COPY delimiter must be a single one-byte character, or 'off'
+2021-11-29 11:56:45|ERROR|could not run SQL "create external table ext_gpload_reusable_5e8c16ac_50c8_11ec_a3c6_0050569e2380("s1" text,"s2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter '' null '\N' escape '\' ) encoding'UTF8' ": COPY delimiter must be a single one-byte character, or 'off'
2021-11-29 11:56:45|INFO|rows Inserted = 0
2021-11-29 11:56:45|INFO|rows Updated = 0
diff --git a/gpMgmt/bin/gpload_test/gpload2/query207.ans b/gpMgmt/bin/gpload_test/gpload2/query207.ans
index bde4034f5af..332aff46d38 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query207.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query207.ans
@@ -2,7 +2,7 @@
2020-12-17 15:38:22|INFO|setting schema 'public' for table 'texttable2'
2020-12-17 15:38:22|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30
2020-12-17 15:38:22|INFO|reusing external table ext_gpload_reusable_6f2dfb50_4035_11eb_b7f6_00505698d059
-2020-12-17 15:38:22|ERROR|ERROR: missing data for column "s2" (seg0 slice1 10.152.8.113:7002 pid=6654)
+2020-12-17 15:38:22|ERROR|missing data for column "s2" (seg0 slice1 10.152.8.113:7002 pid=6654)
CONTEXT: External table ext_gpload_reusable_6f2dfb50_4035_11eb_b7f6_00505698d059, line 1 of gpfdist://*:pathto/data_file.txt: "123456789 abcd"
encountered while running INSERT INTO public."texttable2" ("s1","s2") SELECT "s1","s2" FROM ext_gpload_reusable_6f2dfb50_4035_11eb_b7f6_00505698d059
2020-12-17 15:38:22|INFO|rows Inserted = 0
diff --git a/gpMgmt/bin/gpload_test/gpload2/query211.ans b/gpMgmt/bin/gpload_test/gpload2/query211.ans
index 086b23498e8..f9e732da4e6 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query211.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query211.ans
@@ -2,7 +2,7 @@
2020-12-18 16:59:08|INFO|setting schema 'public' for table 'texttable2'
2020-12-18 16:59:08|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30
2020-12-18 16:59:08|INFO|did not find an external table to reuse. creating ext_gpload_reusable_49ed34de_410f_11eb_bbac_00505698d059
-2020-12-18 16:59:08|ERROR|ERROR: extra data after last expected column (seg2 slice1 10.152.8.113:7004 pid=2301)
+2020-12-18 16:59:08|ERROR|extra data after last expected column (seg2 slice1 10.152.8.113:7004 pid=2301)
CONTEXT: External table ext_gpload_reusable_49ed34de_410f_11eb_bbac_00505698d059, line 1 of gpfdist://*:pathto/data_file.txt: "a|||b"
encountered while running INSERT INTO public."texttable2" ("s1","s2") SELECT "s1","s2" FROM ext_gpload_reusable_49ed34de_410f_11eb_bbac_00505698d059
2020-12-18 16:59:08|INFO|rows Inserted = 0
diff --git a/gpMgmt/bin/gpload_test/gpload2/query220.ans b/gpMgmt/bin/gpload_test/gpload2/query220.ans
index 77139e22a90..af6249e4be0 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query220.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query220.ans
@@ -2,7 +2,7 @@
2020-12-21 10:55:38|INFO|setting schema 'public' for table 'texttable1'
2020-12-21 10:55:38|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30
2020-12-21 10:55:38|INFO|did not find an external table to reuse. creating ext_gpload_reusable_015f190a_4338_11eb_b807_00505698d059
-2020-12-21 10:55:38|ERROR|ERROR: missing data for column "n8" (seg0 slice1 10.152.8.113:7002 pid=3859)
+2020-12-21 10:55:38|ERROR|missing data for column "n8" (seg0 slice1 10.152.8.113:7002 pid=3859)
CONTEXT: External table ext_gpload_reusable_015f190a_4338_11eb_b807_00505698d059, line 1 of gpfdist://*:pathto/data_file.txt: "aaa|qwer|shjhjg|2012-06-01 15:30:30|1|111|834567|45.67|789.123|7.12345|123.456789"
encountered while running INSERT INTO public."texttable1" ("s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8") SELECT "s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8" FROM ext_gpload_reusable_015f190a_4338_11eb_b807_00505698d059
2020-12-21 10:55:38|INFO|rows Inserted = 0
diff --git a/gpMgmt/bin/gpload_test/gpload2/query233.ans b/gpMgmt/bin/gpload_test/gpload2/query233.ans
index 1a0ac829822..d2bf87c241d 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query233.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query233.ans
@@ -2,7 +2,7 @@
2021-11-29 11:56:55|INFO|setting schema 'public' for table 'texttable2'
2021-11-29 11:56:55|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30
2021-11-29 11:56:55|INFO|did not find an external table to reuse. creating ext_gpload_reusable_64b5f930_50c8_11ec_ad50_0050569e2380
-2021-11-29 11:56:55|ERROR|could not run SQL "create external table ext_gpload_reusable_64b5f930_50c8_11ec_ad50_0050569e2380("s1" text,"s2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter '|' null 'E''\x08E'\'' escape '\' ) encoding'UTF8' ": ERROR: syntax error at or near "\"
+2021-11-29 11:56:55|ERROR|could not run SQL "create external table ext_gpload_reusable_64b5f930_50c8_11ec_ad50_0050569e2380("s1" text,"s2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter '|' null 'E''\x08E'\'' escape '\' ) encoding'UTF8' ": syntax error at or near "\"
LINE 1: ....txt') format'text' (delimiter '|' null 'E''\x08E'\'' escape...
^
diff --git a/gpMgmt/bin/gpload_test/gpload2/query241.ans b/gpMgmt/bin/gpload_test/gpload2/query241.ans
index 89f88129cec..09f834db98a 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query241.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query241.ans
@@ -2,7 +2,7 @@
2021-01-04 16:30:12|INFO|setting schema 'public' for table 'texttable'
2021-01-04 16:30:12|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30
2021-01-04 16:30:12|INFO|did not find an external table to reuse. creating ext_gpload_reusable_0ff6e3e6_4e67_11eb_9d70_00505698d059
-2021-01-04 16:30:12|ERROR|ERROR: character with byte sequence 0xad 0xe5 in encoding "GBK" has no equivalent in encoding "UTF8" (seg1 slice1 10.152.8.113:7003 pid=20453)
+2021-01-04 16:30:12|ERROR|character with byte sequence 0xad 0xe5 in encoding "GBK" has no equivalent in encoding "UTF8" (seg1 slice1 10.152.8.113:7003 pid=20453)
CONTEXT: External table ext_gpload_reusable_0ff6e3e6_4e67_11eb_9d70_00505698d059, line 1 of file gpfdist://*:pathto/data_file.txt
encountered while running INSERT INTO public."texttable" ("s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7") SELECT "s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7" FROM ext_gpload_reusable_0ff6e3e6_4e67_11eb_9d70_00505698d059
2021-01-04 16:30:12|INFO|rows Inserted = 0
diff --git a/gpMgmt/bin/gpload_test/gpload2/query244.ans b/gpMgmt/bin/gpload_test/gpload2/query244.ans
index 4349fd2fa78..316c5b6bb21 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query244.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query244.ans
@@ -2,7 +2,7 @@
2021-01-04 16:35:29|INFO|setting schema 'public' for table 'texttable2'
2021-01-04 16:35:29|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30
2021-01-04 16:35:29|INFO|did not find an external table to reuse. creating ext_gpload_reusable_ccfc0c6e_4e67_11eb_bac2_00505698d059
-2021-01-04 16:35:29|ERROR|ERROR: invalid byte sequence for encoding "UTF8": 0xd6 0xd0 (seg0 slice1 10.152.8.113:7002 pid=21163)
+2021-01-04 16:35:29|ERROR|invalid byte sequence for encoding "UTF8": 0xd6 0xd0 (seg0 slice1 10.152.8.113:7002 pid=21163)
CONTEXT: External table ext_gpload_reusable_ccfc0c6e_4e67_11eb_bac2_00505698d059, line 1 of file gpfdist://*:pathto/data_file.txt
encountered while running INSERT INTO public."texttable2" ("s1","s2") SELECT "s1","s2" FROM ext_gpload_reusable_ccfc0c6e_4e67_11eb_bac2_00505698d059
2021-01-04 16:35:29|INFO|rows Inserted = 0
diff --git a/gpMgmt/bin/gpload_test/gpload2/query259.ans b/gpMgmt/bin/gpload_test/gpload2/query259.ans
index 44553764526..da5c847fec1 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query259.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query259.ans
@@ -2,7 +2,7 @@
2021-11-29 11:57:05|INFO|setting schema 'public' for table 'texttable2'
2021-11-29 11:57:05|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30
2021-11-29 11:57:05|INFO|did not find an external table to reuse. creating ext_gpload_reusable_6adbab20_50c8_11ec_94a0_0050569e2380
-2021-11-29 11:57:05|ERROR|could not run SQL "create external table ext_gpload_reusable_6adbab20_50c8_11ec_94a0_0050569e2380("s1" text,"s2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter '|' null '\N' escape '\' newline 'LFCR' ) encoding'UTF8' ": ERROR: invalid value for NEWLINE "LFCR"
+2021-11-29 11:57:05|ERROR|could not run SQL "create external table ext_gpload_reusable_6adbab20_50c8_11ec_94a0_0050569e2380("s1" text,"s2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter '|' null '\N' escape '\' newline 'LFCR' ) encoding'UTF8' ": invalid value for NEWLINE "LFCR"
HINT: Valid options are: 'LF', 'CRLF' and 'CR'.
2021-11-29 11:57:05|INFO|rows Inserted = 0
diff --git a/gpMgmt/bin/gpload_test/gpload2/query260.ans b/gpMgmt/bin/gpload_test/gpload2/query260.ans
index 0019311b50c..8256642f596 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query260.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query260.ans
@@ -2,7 +2,7 @@
2021-11-30 15:00:23|INFO|setting schema 'public' for table 'texttable2'
2021-11-30 15:00:23|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.csv" -t 30
2021-11-30 15:00:23|INFO|did not find an external table to reuse. creating ext_gpload_reusable_30a0dcd0_51ab_11ec_839b_0050569e2380
-2021-11-30 15:00:23|ERROR|could not run SQL "create external table ext_gpload_reusable_30a0dcd0_51ab_11ec_839b_0050569e2380("s1" text,"s2" text)location('gpfdist://*:pathto/data_file.csv') format'csv' (delimiter ',' null '' escape '"' quote '"' newline 'LFCR' ) encoding'UTF8' ": ERROR: invalid value for NEWLINE "LFCR"
+2021-11-30 15:00:23|ERROR|could not run SQL "create external table ext_gpload_reusable_30a0dcd0_51ab_11ec_839b_0050569e2380("s1" text,"s2" text)location('gpfdist://*:pathto/data_file.csv') format'csv' (delimiter ',' null '' escape '"' quote '"' newline 'LFCR' ) encoding'UTF8' ": invalid value for NEWLINE "LFCR"
HINT: Valid options are: 'LF', 'CRLF' and 'CR'.
2021-11-30 15:00:23|INFO|rows Inserted = 0
diff --git a/gpMgmt/bin/gpload_test/gpload2/query31.ans b/gpMgmt/bin/gpload_test/gpload2/query31.ans
index f01dc093e85..e43ec31c74f 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query31.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query31.ans
@@ -3,7 +3,7 @@
2018-07-20 09:06:30|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30
2018-07-20 09:06:30|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_9faa546d615fa55cc3e9e2cee6f130b0
2018-07-20 09:06:30|INFO|reusing external table ext_gpload_reusable_30024be2_8bfc_11e8_83d4_0242ac110002
-2018-07-20 09:06:30|ERROR|ERROR: column "n8" does not exist
+2018-07-20 09:06:30|ERROR|column "n8" does not exist
LINE 1: ..."s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8" FROM ...
^
encountered while running INSERT INTO staging_gpload_reusable_9faa546d615fa55cc3e9e2cee6f130b0 ("s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8") SELECT "s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8" FROM ext_gpload_reusable_30024be2_8bfc_11e8_83d4_0242ac110002
@@ -16,7 +16,7 @@ LINE 1: ..."s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8" FROM ...
2018-07-20 09:06:30|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30
2018-07-20 09:06:30|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_9faa546d615fa55cc3e9e2cee6f130b0
2018-07-20 09:06:30|INFO|reusing external table ext_gpload_reusable_30024be2_8bfc_11e8_83d4_0242ac110002
-2018-07-20 09:06:30|ERROR|ERROR: column "n8" does not exist
+2018-07-20 09:06:30|ERROR|column "n8" does not exist
LINE 1: ..."s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8" FROM ...
^
encountered while running INSERT INTO staging_gpload_reusable_9faa546d615fa55cc3e9e2cee6f130b0 ("s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8") SELECT "s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8" FROM ext_gpload_reusable_30024be2_8bfc_11e8_83d4_0242ac110002
diff --git a/gpMgmt/bin/gpload_test/gpload2/query312.ans b/gpMgmt/bin/gpload_test/gpload2/query312.ans
index 84bea6cc455..030f88c59de 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query312.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query312.ans
@@ -2,7 +2,7 @@
2020-12-17 15:57:28|INFO|setting schema 'public' for table 'texttable'
2020-12-17 15:57:28|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30
2020-12-17 15:57:28|INFO|did not find an external table to reuse. creating ext_gpload_reusable_823e0c46_403d_11eb_ba00_000c299afcc5
-2020-12-17 15:57:29|ERROR|ERROR: segment reject limit reached, aborting operation (seg0 slice1 127.0.0.1:6000 pid=3953)
+2020-12-17 15:57:29|ERROR|segment reject limit reached, aborting operation (seg0 slice1 127.0.0.1:6000 pid=3953)
DETAIL: Last error was: invalid input syntax for type smallint: "invalid string", column n1
CONTEXT: External table ext_gpload_reusable_823e0c46_403d_11eb_ba00_000c299afcc5, line 4 of gpfdist://*:pathto/data_file.txt, column n1
encountered while running INSERT INTO public."texttable" ("s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7") SELECT "s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7" FROM ext_gpload_reusable_823e0c46_403d_11eb_ba00_000c299afcc5
diff --git a/gpMgmt/bin/gpload_test/gpload2/query37.ans b/gpMgmt/bin/gpload_test/gpload2/query37.ans
index c51d7cdcd7d..b0050f24435 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query37.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query37.ans
@@ -5,7 +5,7 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur
2021-11-29 11:57:32|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30
2021-11-29 11:57:32|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_afbaac0da7ced19791c9ab9c537f41d3
2021-11-29 11:57:32|INFO|did not find an external table to reuse. creating ext_gpload_reusable_7ac93e9e_50c8_11ec_821e_0050569e2380
-2021-11-29 11:57:32|ERROR|could not run SQL "create external table ext_gpload_reusable_7ac93e9e_50c8_11ec_821e_0050569e2380("s1" text,"s2" text,"s3" text,"dt" timestamp without time zone,"n1" smallint,"n2" integer,"n3" bigint,"n4" numeric,"n5" numeric,"n6" real,"n7" double precision)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter '|' null '\N' escape '\' ) encoding'xxxx' ": ERROR: xxxx is not a valid encoding name
+2021-11-29 11:57:32|ERROR|could not run SQL "create external table ext_gpload_reusable_7ac93e9e_50c8_11ec_821e_0050569e2380("s1" text,"s2" text,"s3" text,"dt" timestamp without time zone,"n1" smallint,"n2" integer,"n3" bigint,"n4" numeric,"n5" numeric,"n6" real,"n7" double precision)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter '|' null '\N' escape '\' ) encoding'xxxx' ": xxxx is not a valid encoding name
2021-11-29 11:57:32|INFO|rows Inserted = 0
2021-11-29 11:57:32|INFO|rows Updated = 0
@@ -18,7 +18,7 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur
2021-11-29 11:57:32|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30
2021-11-29 11:57:32|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_afbaac0da7ced19791c9ab9c537f41d3
2021-11-29 11:57:32|INFO|did not find an external table to reuse. creating ext_gpload_reusable_7ae5d7e8_50c8_11ec_9979_0050569e2380
-2021-11-29 11:57:32|ERROR|could not run SQL "create external table ext_gpload_reusable_7ae5d7e8_50c8_11ec_9979_0050569e2380("s1" text,"s2" text,"s3" text,"dt" timestamp without time zone,"n1" smallint,"n2" integer,"n3" bigint,"n4" numeric,"n5" numeric,"n6" real,"n7" double precision)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter '|' null '\N' escape '\' ) encoding'xxxx' ": ERROR: xxxx is not a valid encoding name
+2021-11-29 11:57:32|ERROR|could not run SQL "create external table ext_gpload_reusable_7ae5d7e8_50c8_11ec_9979_0050569e2380("s1" text,"s2" text,"s3" text,"dt" timestamp without time zone,"n1" smallint,"n2" integer,"n3" bigint,"n4" numeric,"n5" numeric,"n6" real,"n7" double precision)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter '|' null '\N' escape '\' ) encoding'xxxx' ": xxxx is not a valid encoding name
2021-11-29 11:57:32|INFO|rows Inserted = 0
2021-11-29 11:57:32|INFO|rows Updated = 0
diff --git a/gpMgmt/bin/gpload_test/gpload2/query402.ans b/gpMgmt/bin/gpload_test/gpload2/query402.ans
index b2b699b307e..9ec5a34c349 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query402.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query402.ans
@@ -2,7 +2,7 @@
2021-11-30 15:17:58|INFO|setting schema 'public' for table 'texttable'
2021-11-30 15:17:58|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30
2021-11-30 15:17:58|INFO|did not find an external table to reuse. creating non_ext_schema_test.ext_gpload_reusable_a53b654a_51ad_11ec_b1b3_0050569e2380
-2021-11-30 15:17:58|ERROR|could not run SQL "create external table non_ext_schema_test.ext_gpload_reusable_a53b654a_51ad_11ec_b1b3_0050569e2380("s1" text,"s2" text,"s3" text,"dt" timestamp without time zone,"n1" smallint,"n2" integer,"n3" bigint,"n4" numeric,"n5" numeric,"n6" real,"n7" double precision)location('gpfdist://*:pathto/data_file.txt') format'text' (delimiter '|' null '\N' escape '\' ) encoding'UTF8' ": ERROR: schema "non_ext_schema_test" does not exist
+2021-11-30 15:17:58|ERROR|could not run SQL "create external table non_ext_schema_test.ext_gpload_reusable_a53b654a_51ad_11ec_b1b3_0050569e2380("s1" text,"s2" text,"s3" text,"dt" timestamp without time zone,"n1" smallint,"n2" integer,"n3" bigint,"n4" numeric,"n5" numeric,"n6" real,"n7" double precision)location('gpfdist://*:pathto/data_file.txt') format'text' (delimiter '|' null '\N' escape '\' ) encoding'UTF8' ": schema "non_ext_schema_test" does not exist
2021-11-30 15:17:58|INFO|rows Inserted = 0
2021-11-30 15:17:58|INFO|rows Updated = 0
diff --git a/gpMgmt/bin/gpload_test/gpload2/query497.ans b/gpMgmt/bin/gpload_test/gpload2/query497.ans
index 8a9dd85417e..9d291185652 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query497.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query497.ans
@@ -5,7 +5,10 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur
2021-01-04 16:55:52|INFO|started gpfdist -p 8081 -P 8082 -f "/home/cc/repo/gpdb/gpMgmt/bin/gpload_test/gpload2/data/external_file_01.txt" -t 30
2021-01-04 16:55:52|INFO|reusing staging table STAGING_GPLOAD_REUSABLE
2021-01-04 16:55:52|INFO|did not find an external table to reuse. creating ext_gpload_reusable_a651e6f8_4e6a_11eb_b8a4_7085c2381836
-2021-01-04 16:55:53|ERROR|unexpected error -- backtrace written to log file
+2021-01-04 16:55:52|ERROR|column "non_col" does not exist
+LINE 1: ...ble."s1" and into_table."s2"=from_table."s2" and non_col = ...
+ ^
+ encountered while running update public."texttable" into_table set "n2"=from_table."n2" from staging_gpload_reusable_4b4814f7db18b678f1605a0caec3c1fe from_table where into_table."n1"=from_table."n1" and into_table."s1"=from_table."s1" and into_table."s2"=from_table."s2" and non_col = 5
2021-01-04 16:55:53|INFO|rows Inserted = 0
2021-01-04 16:55:53|INFO|rows Updated = 0
2021-01-04 16:55:53|INFO|data formatting errors = 0
diff --git a/gpMgmt/bin/gpload_test/gpload2/query522.ans b/gpMgmt/bin/gpload_test/gpload2/query522.ans
index 5114fb13fad..79634a5feaa 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query522.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query522.ans
@@ -7,7 +7,7 @@ CREATE TABLE
2021-01-07 16:25:52|INFO|setting schema 'public' for table 'mapping_test'
2021-01-07 16:25:52|INFO|started gpfdist -p 8081 -P 8082 -f "/home/cc/repo/gpdb/gpMgmt/bin/gpload_test/gpload2/data/column_mapping_01.txt" -t 30
2021-01-07 16:25:52|INFO|did not find an external table to reuse. creating ext_gpload_reusable_f42535ca_50c1_11eb_a32e_7085c2381836
-2021-01-07 16:25:52|ERROR|ERROR: column "n3" does not exist
+2021-01-07 16:25:52|ERROR|column "n3" does not exist
LINE 1: ...blic."mapping_test" ("s1","s2","s3") SELECT c1,c2,n3 FROM ex...
^
HINT: Perhaps you meant to reference the column "ext_gpload_reusable_f42535ca_50c1_11eb_a32e_7085c2381836.c3" or the column "mapping_test.s3".
diff --git a/gpMgmt/bin/gpload_test/gpload2/query523.ans b/gpMgmt/bin/gpload_test/gpload2/query523.ans
index 2faf394b2a4..e9b6bff5bd8 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query523.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query523.ans
@@ -7,7 +7,7 @@ CREATE TABLE
2021-01-07 16:26:05|INFO|setting schema 'public' for table 'mapping_test'
2021-01-07 16:26:06|INFO|started gpfdist -p 8081 -P 8082 -f "/home/cc/repo/gpdb/gpMgmt/bin/gpload_test/gpload2/data/column_mapping_01.txt" -t 30
2021-01-07 16:26:06|INFO|did not find an external table to reuse. creating ext_gpload_reusable_fc7440a4_50c1_11eb_a0a7_7085c2381836
-2021-01-07 16:26:06|ERROR|ERROR: column "s4" is of type integer but expression is of type text
+2021-01-07 16:26:06|ERROR|column "s4" is of type integer but expression is of type text
LINE 1: ...blic."mapping_test" ("s2","s3","s4") SELECT c2,c3,c1 FROM ex...
^
HINT: You will need to rewrite or cast the expression.
diff --git a/gpMgmt/bin/gpload_test/gpload2/query529.ans b/gpMgmt/bin/gpload_test/gpload2/query529.ans
index 092c9478e23..4f10bb12c7b 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query529.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query529.ans
@@ -7,7 +7,7 @@ CREATE TABLE
2021-01-07 16:37:32|INFO|setting schema 'public' for table 'mapping_test'
2021-01-07 16:37:32|INFO|started gpfdist -p 8081 -P 8082 -f "/home/cc/repo/gpdb/gpMgmt/bin/gpload_test/gpload2/data/column_mapping_01.txt" -t 30
2021-01-07 16:37:32|INFO|did not find an external table to reuse. creating ext_gpload_reusable_959774bc_50c3_11eb_b301_7085c2381836
-2021-01-07 16:37:32|ERROR|ERROR: function rocket_bites(unknown) does not exist
+2021-01-07 16:37:32|ERROR|function rocket_bites(unknown) does not exist
LINE 1: INSERT INTO public."mapping_test" ("s1") SELECT rocket_bites...
^
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
diff --git a/gpMgmt/bin/gpload_test/gpload2/query532.ans b/gpMgmt/bin/gpload_test/gpload2/query532.ans
index 98d7ea1ee9a..244ad2f3be6 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query532.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query532.ans
@@ -12,7 +12,7 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur
2021-01-07 17:38:14|INFO|started gpfdist -p 8081 -P 8082 -f "/home/cc/repo/gpdb/gpMgmt/bin/gpload_test/gpload2/data/column_mapping_01.txt" -t 30
2021-01-07 17:38:14|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_edcb757d70ae1c70cdd2f7d15496f54b
2021-01-07 17:38:14|INFO|did not find an external table to reuse. creating ext_gpload_reusable_102fae3a_50cc_11eb_b6c8_7085c2381836
-2021-01-07 17:38:14|ERROR|ERROR: column "s4" is of type integer but expression is of type text
+2021-01-07 17:38:14|ERROR|column "s4" is of type integer but expression is of type text
LINE 1: ...5c64da950cfbc41ff55 ("s1","s2","s4") SELECT c1,c3,c2 FROM ex...
^
HINT: You will need to rewrite or cast the expression.
diff --git a/gpMgmt/bin/gpload_test/gpload2/query533.ans b/gpMgmt/bin/gpload_test/gpload2/query533.ans
index 809573e625a..64d947128fa 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query533.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query533.ans
@@ -12,7 +12,7 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur
2021-01-07 17:38:26|INFO|started gpfdist -p 8081 -P 8082 -f "/home/cc/repo/gpdb/gpMgmt/bin/gpload_test/gpload2/data/column_mapping_01.txt" -t 30
2021-01-07 17:38:26|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_edcb757d70ae1c70cdd2f7d15496f54b
2021-01-07 17:38:26|INFO|did not find an external table to reuse. creating ext_gpload_reusable_1777c5e2_50cc_11eb_bb05_7085c2381836
-2021-01-07 17:38:26|ERROR|ERROR: column "s4" is of type integer but expression is of type text
+2021-01-07 17:38:26|ERROR|column "s4" is of type integer but expression is of type text
LINE 1: ...5c64da950cfbc41ff55 ("s1","s2","s4") SELECT c1,c3,c2 FROM ex...
^
HINT: You will need to rewrite or cast the expression.
diff --git a/gpMgmt/bin/gpload_test/gpload2/query548.ans b/gpMgmt/bin/gpload_test/gpload2/query548.ans
new file mode 100644
index 00000000000..0933a693444
--- /dev/null
+++ b/gpMgmt/bin/gpload_test/gpload2/query548.ans
@@ -0,0 +1,13 @@
+2023-09-11 11:05:29|INFO|gpload session started 2023-09-11 11:05:29
+2023-09-11 11:05:29|INFO|setting schema 'public' for table 'csvtable'
+2023-09-11 11:05:29|INFO|started gpfdist -p 8081 -P 8082 -f "/home/v/workspace/gpdb7/gpMgmt/bin/gpload_test/gpload2/data_file.csv" -t 30
+2023-09-11 11:05:29|INFO|running time: 0.09 seconds
+2023-09-11 11:05:29|INFO|rows Inserted = 2
+2023-09-11 11:05:29|INFO|rows Updated = 0
+2023-09-11 11:05:29|INFO|data formatting errors = 0
+2023-09-11 11:05:29|INFO|gpload succeeded
+ count
+-------
+ 2
+(1 row)
+
diff --git a/gpMgmt/bin/gpload_test/gpload2/query60.ans b/gpMgmt/bin/gpload_test/gpload2/query60.ans
index 2d1c9dc10bf..673ff09913b 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query60.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query60.ans
@@ -20,7 +20,7 @@
2020-12-07 09:57:17|INFO|setting schema 'public' for table 'texttable'
2020-12-07 09:57:17|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30
2020-12-07 09:57:17|INFO|did not find an external table to reuse. creating ext_gpload_reusable_89036be0_382f_11eb_95c3_00505698707d
-2020-12-07 09:57:18|ERROR|ERROR: connection with gpfdist failed for "gpfdist://*:pathto/data_file.txt", effective url: "http://*:pathto/data_file.txt": error code = 111 (Connection refused); (seg1 slice1 10.152.8.160:7003 pid=4267)
+2020-12-07 09:57:18|ERROR|connection with gpfdist failed for "gpfdist://*:pathto/data_file.txt", effective url: "http://*:pathto/data_file.txt": error code = 111 (Connection refused); (seg1 slice1 10.152.8.160:7003 pid=4267)
encountered while running INSERT INTO public."texttable" ("s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7") SELECT "s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7" FROM ext_gpload_reusable_89036be0_382f_11eb_95c3_00505698707d
2020-12-07 09:57:18|INFO|rows Inserted = 0
2020-12-07 09:57:18|INFO|rows Updated = 0
diff --git a/gpMgmt/bin/gpload_test/gpload2/query604.ans b/gpMgmt/bin/gpload_test/gpload2/query604.ans
index a245c1a586b..a8fb8e49529 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query604.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query604.ans
@@ -1,8 +1,8 @@
2021-01-17 20:28:18|INFO|gpload session started 2021-01-17 20:28:18
2021-01-17 20:28:18|INFO|setting schema 'public' for table 'texttable'
2021-01-17 20:28:18|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/temp/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30
-2021-01-17 20:28:18|INFO|reusing external staging table staging_test
-2021-01-17 20:28:18|ERROR|ERROR: column "n8" does not exist
+2021-01-17 20:28:18|INFO|reusing external staging table "STAGING_test"
+2021-01-17 20:28:18|ERROR|column "n8" does not exist
LINE 1: ..."s2","s3","dt","n1","n2","n3","n4","n5","n6","n7","n8" FROM ...
^
HINT: There is a column named "n8" in table "texttable", but it cannot be referenced from this part of the query.
diff --git a/gpMgmt/bin/gpload_test/gpload2/query65.ans b/gpMgmt/bin/gpload_test/gpload2/query65.ans
index 574bc77fb24..442d8ce5e12 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query65.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query65.ans
@@ -2,7 +2,7 @@
2021-01-08 16:05:19|INFO|setting schema 'public' for table 'texttable'
2021-01-08 16:05:19|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt pathto/data_file1.txt pathto/data_file99.txt" -t 30
2021-01-08 16:05:19|INFO|did not find an external table to reuse. creating ext_gpload_reusable_3fe4da80_5188_11eb_bc9e_00505698707d
-2021-01-08 16:05:20|ERROR|ERROR: http response code 404 from gpfdist (gpfdist://*:pathto/data_file.txt%pathto/data_file1.txt%pathto/data_file99.txt): HTTP/1.0 404 file not found (seg0 slice1 10.152.8.160:7002 pid=18998)
+2021-01-08 16:05:20|ERROR|http response code 404 from gpfdist (gpfdist://*:pathto/data_file.txt%pathto/data_file1.txt%pathto/data_file99.txt): HTTP/1.0 404 file not found (seg0 slice1 10.152.8.160:7002 pid=18998)
encountered while running INSERT INTO public."texttable" ("s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7") SELECT "s1","s2","s3","dt","n1","n2","n3","n4","n5","n6","n7" FROM ext_gpload_reusable_3fe4da80_5188_11eb_bc9e_00505698707d
2021-01-08 16:05:20|INFO|rows Inserted = 0
2021-01-08 16:05:20|INFO|rows Updated = 0
diff --git a/gpMgmt/bin/gpload_test/gpload2/query652.ans b/gpMgmt/bin/gpload_test/gpload2/query652.ans
index 65314566d42..e0904221073 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query652.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query652.ans
@@ -1,7 +1,7 @@
2021-01-04 19:52:14|INFO|gpload session started 2021-01-04 19:52:14
2021-01-04 19:52:14|INFO|setting schema 'public' for table 'texttable_652'
2021-01-04 19:52:14|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data/external_file_01.txt" -t 30
-2021-01-04 19:52:14|ERROR|could not execute SQL in sql:before "INSERT INTO test_652 VALUES(1)": ERROR: relation "test_652" does not exist
+2021-01-04 19:52:14|ERROR|could not execute SQL in sql:before "INSERT INTO test_652 VALUES(1)": relation "test_652" does not exist
LINE 1: INSERT INTO test_652 VALUES(1)
^
diff --git a/gpMgmt/bin/gpload_test/gpload2/query662.ans b/gpMgmt/bin/gpload_test/gpload2/query662.ans
index d88ee406f42..9eb0144afd1 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query662.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query662.ans
@@ -2,7 +2,7 @@
2021-01-11 18:03:26|INFO|setting schema 'public' for table 'texttable_662'
2021-01-11 18:03:26|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data/external_file_01.txt" -t 30
2021-01-11 18:03:26|INFO|did not find an external table to reuse. creating ext_gpload_reusable_3f87f84c_53f4_11eb_8b61_005056983e1a
-2021-01-11 18:03:26|ERROR|could not execute SQL in sql:after "INSERT INTO test_662 VALUES(1)": ERROR: relation "test_662" does not exist
+2021-01-11 18:03:26|ERROR|could not execute SQL in sql:after "INSERT INTO test_662 VALUES(1)": relation "test_662" does not exist
LINE 1: INSERT INTO test_662 VALUES(1)
^
diff --git a/gpMgmt/bin/gpload_test/gpload2/query664.ans b/gpMgmt/bin/gpload_test/gpload2/query664.ans
index 794c289b470..bffb8338e58 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query664.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query664.ans
@@ -1,7 +1,7 @@
2021-01-11 18:56:05|INFO|gpload session started 2021-01-11 18:56:05
2021-01-11 18:56:05|INFO|setting schema 'public' for table 'texttable_664'
2021-01-11 18:56:05|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data/external_file_01.txt" -t 30
-2021-01-11 18:56:05|ERROR|could not execute SQL in sql:before "INSERT INTO test_664_before VALUES('a')": ERROR: invalid input syntax for type integer: "a"
+2021-01-11 18:56:05|ERROR|could not execute SQL in sql:before "INSERT INTO test_664_before VALUES('a')": invalid input syntax for type integer: "a"
LINE 1: INSERT INTO test_664_before VALUES('a')
^
diff --git a/gpMgmt/bin/gpload_test/gpload2/query665.ans b/gpMgmt/bin/gpload_test/gpload2/query665.ans
index 8a005605cca..c634c40d808 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query665.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query665.ans
@@ -2,7 +2,7 @@
2021-01-11 18:56:59|INFO|setting schema 'public' for table 'texttable_665'
2021-01-11 18:56:59|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data/external_file_01.txt" -t 30
2021-01-11 18:56:59|INFO|did not find an external table to reuse. creating ext_gpload_reusable_ba362bb6_53fb_11eb_a8b6_005056983e1a
-2021-01-11 18:56:59|ERROR|could not execute SQL in sql:after "INSERT INTO test_665_after VALUES('a')": ERROR: invalid input syntax for type integer: "a"
+2021-01-11 18:56:59|ERROR|could not execute SQL in sql:after "INSERT INTO test_665_after VALUES('a')": invalid input syntax for type integer: "a"
LINE 1: INSERT INTO test_665_after VALUES('a')
^
diff --git a/gpMgmt/bin/gpload_test/gpload2/query666.ans b/gpMgmt/bin/gpload_test/gpload2/query666.ans
index a2ebf47574a..85e34e6e217 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query666.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query666.ans
@@ -1,7 +1,7 @@
2021-01-11 19:18:24|INFO|gpload session started 2021-01-11 19:18:24
2021-01-11 19:18:24|INFO|setting schema 'public' for table 'texttable_666'
2021-01-11 19:18:24|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data/external_file_01.txt" -t 30
-2021-01-11 19:18:24|ERROR|could not execute SQL in sql:before "INSERT INTO test_666_before VALUES('a')": ERROR: invalid input syntax for type integer: "a"
+2021-01-11 19:18:24|ERROR|could not execute SQL in sql:before "INSERT INTO test_666_before VALUES('a')": invalid input syntax for type integer: "a"
LINE 1: INSERT INTO test_666_before VALUES('a')
^
diff --git a/gpMgmt/bin/gpload_test/gpload2/query667.ans b/gpMgmt/bin/gpload_test/gpload2/query667.ans
index 89653613c34..205403e8ce2 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query667.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query667.ans
@@ -5,7 +5,7 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur
2021-01-11 19:20:03|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data/external_file_01.txt" -t 30
2021-01-11 19:20:03|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_08aff1d5e0be087569323178726e90f6
2021-01-11 19:20:03|INFO|did not find an external table to reuse. creating ext_gpload_reusable_f39a934e_53fe_11eb_898a_005056983e1a
-2021-01-11 19:20:04|ERROR|could not execute SQL in sql:after "INSERT INTO test_667_after VALUES('a')": ERROR: invalid input syntax for type integer: "a"
+2021-01-11 19:20:04|ERROR|could not execute SQL in sql:after "INSERT INTO test_667_after VALUES('a')": invalid input syntax for type integer: "a"
LINE 1: INSERT INTO test_667_after VALUES('a')
^
diff --git a/gpMgmt/bin/gpload_test/gpload2/query68.ans b/gpMgmt/bin/gpload_test/gpload2/query68.ans
index 808eff402fd..fcd3dfde0ff 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query68.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query68.ans
@@ -5,7 +5,7 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur
2021-01-08 16:05:22|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file2.txt" -t 30
2021-01-08 16:05:22|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_a1101b5024707ea34f55e778f329e548
2021-01-08 16:05:22|INFO|did not find an external table to reuse. creating ext_gpload_reusable_418b1b42_5188_11eb_93db_00505698707d
-2021-01-08 16:05:22|ERROR|ERROR: column "Field1" is of type bigint but expression is of type text
+2021-01-08 16:05:22|ERROR|column "Field1" is of type bigint but expression is of type text
LINE 1: ...bb31496d7e9a13bd29b90 ("Field1","Field#2") SELECT "Field1","...
^
HINT: You will need to rewrite or cast the expression.
diff --git a/gpMgmt/bin/gpload_test/gpload2/query69.ans b/gpMgmt/bin/gpload_test/gpload2/query69.ans
index c157448556b..fce4bb8c984 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query69.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query69.ans
@@ -5,7 +5,7 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur
2021-01-08 16:28:20|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file2.txt" -t 30
2021-01-08 16:28:20|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_a1101b5024707ea34f55e778f329e548
2021-01-08 16:28:20|INFO|did not find an external table to reuse. creating ext_gpload_reusable_770f4452_518b_11eb_98a2_00505698707d
-2021-01-08 16:28:20|ERROR|ERROR: column "Field1" is of type bigint but expression is of type text
+2021-01-08 16:28:20|ERROR|column "Field1" is of type bigint but expression is of type text
LINE 1: ...bb31496d7e9a13bd29b90 ("Field1","Field#2") SELECT "Field1","...
^
HINT: You will need to rewrite or cast the expression.
diff --git a/gpMgmt/bin/gpload_test/gpload2/query75.ans b/gpMgmt/bin/gpload_test/gpload2/query75.ans
index dd5b01b3add..9bfe7c6edf0 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query75.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query75.ans
@@ -14,7 +14,11 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur
2021-08-10 14:56:46|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30
2021-08-10 14:56:46|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_5171458efa83aaf8c5bc7004bae85d5b
2021-08-10 14:56:46|INFO|reusing external table ext_gpload_reusable_2092c476_f9a8_11eb_b503_0050569e2380
-2021-08-10 14:56:46|ERROR|unexpected error -- backtrace written to log file
+2021-08-10 14:56:46|ERROR|column "列" does not exist
+LINE 1: ..." FROM (SELECT *, row_number() OVER (PARTITION BY 列#2) AS g...
+ ^
+HINT: Perhaps you meant to reference the column "staging_gpload_reusable_77874e55aae34d59751eb574ff0f5cf7.列1" or the column "chinese表.列1".
+ encountered while running INSERT INTO public."chinese表" ("列1","列#2","lie3") (SELECT from_table."列1",from_table."列#2",from_table."lie3" FROM (SELECT *, row_number() OVER (PARTITION BY 列#2) AS gpload_row_number FROM staging_gpload_reusable_77874e55aae34d59751eb574ff0f5cf7) AS from_table WHERE gpload_row_number=1)
2021-08-10 14:56:46|INFO|rows Inserted = 0
2021-08-10 14:56:46|INFO|rows Updated = 8
2021-08-10 14:56:46|INFO|data formatting errors = 0
@@ -33,7 +37,11 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur
2021-08-10 14:56:46|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30
2021-08-10 14:56:46|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_5171458efa83aaf8c5bc7004bae85d5b
2021-08-10 14:56:46|INFO|reusing external table ext_gpload_reusable_2092c476_f9a8_11eb_b503_0050569e2380
-2021-08-10 14:56:46|ERROR|unexpected error -- backtrace written to log file
+2021-08-10 14:56:46|ERROR|column "列" does not exist
+LINE 1: ..." FROM (SELECT *, row_number() OVER (PARTITION BY 列#2) AS g...
+ ^
+HINT: Perhaps you meant to reference the column "staging_gpload_reusable_77874e55aae34d59751eb574ff0f5cf7.列1" or the column "chinese表.列1".
+ encountered while running INSERT INTO public."chinese表" ("列1","列#2","lie3") (SELECT from_table."列1",from_table."列#2",from_table."lie3" FROM (SELECT *, row_number() OVER (PARTITION BY 列#2) AS gpload_row_number FROM staging_gpload_reusable_77874e55aae34d59751eb574ff0f5cf7) AS from_table WHERE gpload_row_number=1)
2021-08-10 14:56:46|INFO|rows Inserted = 0
2021-08-10 14:56:46|INFO|rows Updated = 8
2021-08-10 14:56:46|INFO|data formatting errors = 0
diff --git a/gpMgmt/bin/gpload_test/gpload2/query76.ans b/gpMgmt/bin/gpload_test/gpload2/query76.ans
index cb958591c94..26872cf30e8 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query76.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query76.ans
@@ -3,18 +3,12 @@ LINE 13: and pgext.fmtopts like '%delimiter '';'' nu...
^
HINT: Use the escape string syntax for backslashes, e.g., E'\\'.
WARNING: nonstandard use of escape in a string literal
-LINE 1: ...pathto/data_file.txt') format'text' (delimiter ';' null '\N' escap...
- ^
-HINT: Use the escape string syntax for escapes, e.g., E'\r\n'.
WARNING: nonstandard use of \' in a string literal
-LINE 1: ...xt') format'text' (delimiter ';' null '\N' escape '\' ) enco...
- ^
-HINT: Use '' to write quotes in strings, or use the escape string syntax (E'...').
2021-11-29 15:29:03|INFO|gpload session started 2021-11-29 15:29:03
2021-11-29 15:29:03|INFO|setting schema 'public' for table 'chinese表'
2021-11-29 15:29:03|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30
2021-11-29 15:29:03|INFO|did not find an external table to reuse. creating ext_gpload_reusable_075bc846_50e6_11ec_8cb7_0050569e2380
-2021-11-29 15:29:03|ERROR|could not run SQL "create external table ext_gpload_reusable_075bc846_50e6_11ec_8cb7_0050569e2380("列1" text,"列#2" int,"lie3" timestamp)location('gpfdist://*:pathto/data_file.txt') format'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' ": ERROR: syntax error at or near "UTF8"
+2021-11-29 15:29:03|ERROR|could not run SQL "create external table ext_gpload_reusable_075bc846_50e6_11ec_8cb7_0050569e2380("列1" text,"列#2" int,"lie3" timestamp)location('gpfdist://*:pathto/data_file.txt') format'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' ": syntax error at or near "UTF8"
LINE 1: ...'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8'
^
@@ -41,7 +35,7 @@ HINT: Use the escape string syntax for backslashes, e.g., E'\\'.
2021-11-29 15:29:04|INFO|started gpfdist -p 8081 -P 8082 -f "pathto/data_file.txt" -t 30
2021-11-29 15:29:04|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_5171458efa83aaf8c5bc7004bae85d5b
2021-11-29 15:29:04|INFO|did not find an external table to reuse. creating ext_gpload_reusable_07a3c70e_50e6_11ec_9873_0050569e2380
-2021-11-29 15:29:04|ERROR|could not run SQL "create external table ext_gpload_reusable_07a3c70e_50e6_11ec_9873_0050569e2380(列1 text,列#2 int,lie3 timestamp)location('gpfdist://*:pathto/data_file.txt') format'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' ": ERROR: syntax error at or near "#"
+2021-11-29 15:29:04|ERROR|could not run SQL "create external table ext_gpload_reusable_07a3c70e_50e6_11ec_9873_0050569e2380(列1 text,列#2 int,lie3 timestamp)location('gpfdist://*:pathto/data_file.txt') format'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' ": syntax error at or near "#"
LINE 1: ...(列1 text,列#2 int,lie...
^
diff --git a/gpMgmt/bin/gpload_test/gpload2/query77.ans b/gpMgmt/bin/gpload_test/gpload2/query77.ans
index cfeacc66960..02d530e7eaa 100644
--- a/gpMgmt/bin/gpload_test/gpload2/query77.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/query77.ans
@@ -3,18 +3,12 @@ LINE 13: and pgext.fmtopts like '%delimiter '';'' nu...
^
HINT: Use the escape string syntax for backslashes, e.g., E'\\'.
WARNING: nonstandard use of escape in a string literal
-LINE 1: .../data_file.txt') format'text' (delimiter ';' null '\N' escap...
- ^
-HINT: Use the escape string syntax for escapes, e.g., E'\r\n'.
WARNING: nonstandard use of \' in a string literal
-LINE 1: ...xt') format'text' (delimiter ';' null '\N' escape '\' ) enco...
- ^
-HINT: Use '' to write quotes in strings, or use the escape string syntax (E'...').
2021-11-29 11:57:13|INFO|gpload session started 2021-11-29 11:57:13
2021-11-29 11:57:13|INFO|setting schema 'public' for table 'testspecialchar'
2021-11-29 11:57:13|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30
2021-11-29 11:57:13|INFO|did not find an external table to reuse. creating ext_gpload_reusable_6f40f210_50c8_11ec_89e8_0050569e2380
-2021-11-29 11:57:13|ERROR|could not run SQL "create external table ext_gpload_reusable_6f40f210_50c8_11ec_89e8_0050569e2380("Field1" text,"Field#2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' ": ERROR: syntax error at or near "UTF8"
+2021-11-29 11:57:13|ERROR|could not run SQL "create external table ext_gpload_reusable_6f40f210_50c8_11ec_89e8_0050569e2380("Field1" text,"Field#2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' ": syntax error at or near "UTF8"
LINE 1: ...'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8'
^
@@ -22,26 +16,18 @@ LINE 1: ...'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8'
2021-11-29 11:57:13|INFO|rows Updated = 0
2021-11-29 11:57:13|INFO|data formatting errors = 0
2021-11-29 11:57:13|INFO|gpload failed
-NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'Field1' as the Apache Cloudberry data distribution key for this table.
-HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
WARNING: nonstandard use of \\ in a string literal
LINE 13: and pgext.fmtopts like '%delimiter '';'' nu...
^
HINT: Use the escape string syntax for backslashes, e.g., E'\\'.
WARNING: nonstandard use of escape in a string literal
-LINE 1: .../data_file.txt') format'text' (delimiter ';' null '\N' escap...
- ^
-HINT: Use the escape string syntax for escapes, e.g., E'\r\n'.
WARNING: nonstandard use of \' in a string literal
-LINE 1: ...xt') format'text' (delimiter ';' null '\N' escape '\' ) enco...
- ^
-HINT: Use '' to write quotes in strings, or use the escape string syntax (E'...').
2021-11-29 11:57:13|INFO|gpload session started 2021-11-29 11:57:13
2021-11-29 11:57:13|INFO|setting schema 'public' for table 'testspecialchar'
2021-11-29 11:57:13|INFO|started gpfdist -p 8081 -P 8082 -f "/home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt" -t 30
2021-11-29 11:57:13|INFO|did not find a staging table to reuse. creating staging_gpload_reusable_40df9a45044f2d17b97f89bbbc58f24f
2021-11-29 11:57:13|INFO|did not find an external table to reuse. creating ext_gpload_reusable_6f5c6568_50c8_11ec_ae2f_0050569e2380
-2021-11-29 11:57:13|ERROR|could not run SQL "create external table ext_gpload_reusable_6f5c6568_50c8_11ec_ae2f_0050569e2380("Field1" text,"Field#2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' ": ERROR: syntax error at or near "UTF8"
+2021-11-29 11:57:13|ERROR|could not run SQL "create external table ext_gpload_reusable_6f5c6568_50c8_11ec_ae2f_0050569e2380("Field1" text,"Field#2" text)location('gpfdist://10.117.190.10:8081//home/gpadmin/workspace/gpdb/gpMgmt/bin/gpload_test/gpload2/data_file.txt') format'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8' ": syntax error at or near "UTF8"
LINE 1: ...'text' (delimiter ';' null '\N' escape '\' ) encoding'UTF8'
^
diff --git a/gpMgmt/bin/gpload_test/gpload2/setup.ans b/gpMgmt/bin/gpload_test/gpload2/setup.ans
index ea4eeaeb625..386cdd1e1f8 100644
--- a/gpMgmt/bin/gpload_test/gpload2/setup.ans
+++ b/gpMgmt/bin/gpload_test/gpload2/setup.ans
@@ -5,6 +5,10 @@ CREATE DATABASE
You are now connected to database "reuse_gptest" as user "gpadmin".
CREATE SCHEMA test;
CREATE SCHEMA
+CREATE SCHEMA "EXT_schema_test";
+CREATE SCHEMA
+CREATE SCHEMA "spiegelungssätze";
+CREATE SCHEMA
set client_min_messages='warning';
SET
DROP EXTERNAL TABLE IF EXISTS temp_gpload_staging_table;
diff --git a/gpMgmt/bin/gpload_test/gpload2/setup.sql b/gpMgmt/bin/gpload_test/gpload2/setup.sql
index 37bd494cafc..75b1d47fced 100644
--- a/gpMgmt/bin/gpload_test/gpload2/setup.sql
+++ b/gpMgmt/bin/gpload_test/gpload2/setup.sql
@@ -5,6 +5,8 @@ CREATE DATABASE reuse_gptest;
\c reuse_gptest
CREATE SCHEMA test;
+CREATE SCHEMA "EXT_schema_test";
+CREATE SCHEMA "spiegelungssätze";
set client_min_messages='warning';
DROP EXTERNAL TABLE IF EXISTS temp_gpload_staging_table;
diff --git a/gpMgmt/bin/gpmemwatcher b/gpMgmt/bin/gpmemwatcher
index 6569015bc09..ecca0eac168 100755
--- a/gpMgmt/bin/gpmemwatcher
+++ b/gpMgmt/bin/gpmemwatcher
@@ -136,7 +136,7 @@ def launchProcess(host, workdir):
# Now let's just quick check the host as to whether the python version is >= 2.6
try:
- subprocess.check_call("ssh -T %s '%s python -c \"import sys; sys.exit(1) if sys.hexversion < 0x020600f0 else 0\"'" % (host, py_string), shell=True)
+ subprocess.check_call("ssh -T %s '%s python3 -c \"import sys; sys.exit(1) if sys.hexversion < 0x020600f0 else 0\"'" % (host, py_string), shell=True)
except subprocess.CalledProcessError as e:
#print >> sys.stderr, 'Python version on host %s is < 2.6.0. Aborting' % (host)
print('Python version on host %s is < 2.6.0. Aborting' % (host), file=sys.stderr)
diff --git a/gpMgmt/bin/gpmovemirrors b/gpMgmt/bin/gpmovemirrors
index 7220a0f897e..cd78c3b2771 100755
--- a/gpMgmt/bin/gpmovemirrors
+++ b/gpMgmt/bin/gpmovemirrors
@@ -10,10 +10,8 @@ import os
import sys
import signal
import itertools
-
+from psycopg2 import DatabaseError
try:
- import pg
-
from gppylib.commands.unix import *
from gppylib.commands.gp import *
from gppylib.commands.pg import PgControlData
@@ -23,7 +21,6 @@ try:
from gppylib.db import dbconn
from gppylib.userinput import *
from gppylib.operations.startSegments import *
- from pgdb import DatabaseError
from gppylib import gparray, gplog, pgconf, userinput, utils
from gppylib.parseutils import line_reader, check_values, canonicalize_address
from gppylib.operations.segment_tablespace_locations import get_tablespace_locations
diff --git a/gpMgmt/bin/gppylib/commands/base.py b/gpMgmt/bin/gppylib/commands/base.py
index 98481425071..35f2bf4e4f1 100755
--- a/gpMgmt/bin/gppylib/commands/base.py
+++ b/gpMgmt/bin/gppylib/commands/base.py
@@ -29,7 +29,6 @@
from gppylib import gplog
from gppylib import gpsubprocess
-from pg import DB
logger = gplog.get_default_logger()
@@ -637,8 +636,7 @@ def cancel(self):
# if self.conn is not set we cannot cancel.
if self.cancel_conn:
- DB(self.cancel_conn).cancel()
-
+ self.cancel_conn.cancel()
class CommandNotFoundException(Exception):
def __init__(self, cmd, paths):
diff --git a/gpMgmt/bin/gppylib/commands/pg.py b/gpMgmt/bin/gppylib/commands/pg.py
index a2af133c28f..8ae45dbccfa 100644
--- a/gpMgmt/bin/gppylib/commands/pg.py
+++ b/gpMgmt/bin/gppylib/commands/pg.py
@@ -12,6 +12,7 @@
from .unix import *
from gppylib.commands.base import *
from gppylib.commands.gp import RECOVERY_REWIND_APPNAME
+from psycopg2 import DatabaseError
logger = get_default_logger()
diff --git a/gpMgmt/bin/gppylib/commands/test/unit/test_unit_pg_base_backup.py b/gpMgmt/bin/gppylib/commands/test/unit/test_unit_pg_base_backup.py
index 117f62b41ea..3ff231a4a92 100644
--- a/gpMgmt/bin/gppylib/commands/test/unit/test_unit_pg_base_backup.py
+++ b/gpMgmt/bin/gppylib/commands/test/unit/test_unit_pg_base_backup.py
@@ -1,6 +1,119 @@
import unittest
from gppylib.commands import pg
+from test.unit.gp_unittest import GpTestCase, run_tests
+from psycopg2 import DatabaseError
+from gppylib.test.unit.gp_unittest import GpTestCase
+from gppylib.commands.base import CommandResult
+
+class TestUnitPgReplicationSlot(GpTestCase):
+ def setUp(self):
+ mock_logger = Mock(spec=['log', 'warn', 'info', 'debug', 'error', 'warning', 'fatal'])
+ self.replication_slot_name = "internal_wal_replication_slot"
+ self.source_host = "bar"
+ self.source_port = 1234
+
+ self.pg_replication_slot = pg.PgReplicationSlot(
+ self.source_host,
+ self.source_port,
+ self.replication_slot_name,
+ )
+ self.apply_patches([
+ patch('gppylib.commands.pg.logger', return_value=mock_logger),
+ patch('gppylib.db.dbconn.DbURL', return_value=Mock())
+ ])
+
+ self.mock_logger = self.get_mock_from_apply_patch('logger')
+
+ @patch('gppylib.db.dbconn.connect', side_effect=Exception())
+ def test_slot_exist_conn_exception(self, mock1):
+
+ with self.assertRaises(Exception) as ex:
+ self.pg_replication_slot.slot_exists()
+
+ self.assertEqual(1, self.mock_logger.debug.call_count)
+ self.assertEqual([call('Checking if slot internal_wal_replication_slot exists for host:bar, port:1234')],
+ self.mock_logger.debug.call_args_list)
+ self.assertTrue('Failed to query pg_replication_slots for' in str(ex.exception))
+
+ @patch('gppylib.db.dbconn.connect', autospec=True)
+ @patch('gppylib.db.dbconn.querySingleton', return_value=1)
+ def test_slot_exist_query_true(self, mock1, mock2):
+ self.assertTrue(self.pg_replication_slot.slot_exists())
+ self.assertEqual(1, self.mock_logger.debug.call_count)
+ self.assertEqual([call('Checking if slot internal_wal_replication_slot exists for host:bar, port:1234')],
+ self.mock_logger.debug.call_args_list)
+
+ @patch('gppylib.db.dbconn.connect', autospec=True)
+ @patch('gppylib.db.dbconn.querySingleton', return_value=0)
+ def test_slot_exist_query_false(self, mock1, mock2):
+ self.assertFalse(self.pg_replication_slot.slot_exists())
+ self.assertEqual(2, self.mock_logger.debug.call_count)
+ self.assertEqual([call('Checking if slot internal_wal_replication_slot exists for host:bar, port:1234'),
+ call('Slot internal_wal_replication_slot does not exist for host:bar, port:1234')],
+ self.mock_logger.debug.call_args_list)
+
+ @patch('gppylib.db.dbconn.connect', side_effect=Exception())
+ def test_drop_slot_conn_exception(self, mock1):
+ with self.assertRaises(Exception) as ex:
+ self.pg_replication_slot.drop_slot()
+
+ self.assertEqual(1, self.mock_logger.debug.call_count)
+ self.assertEqual([call('Dropping slot internal_wal_replication_slot for host:bar, port:1234')],
+ self.mock_logger.debug.call_args_list)
+ self.assertTrue('Failed to drop replication slot for host:bar, port:1234' in str(ex.exception))
+
+ @patch('gppylib.db.dbconn.connect', autospec=True)
+ @patch('gppylib.db.dbconn.query', side_effect=DatabaseError("DatabaseError Exception"))
+ def test_drop_slot_db_error_exception(self, mock1, mock2):
+ self.pg_replication_slot.drop_slot()
+ self.assertEqual(1, self.mock_logger.debug.call_count)
+ self.assertEqual(1, self.mock_logger.exception.call_count)
+ self.assertEqual([call('Dropping slot internal_wal_replication_slot for host:bar, port:1234')],
+ self.mock_logger.debug.call_args_list)
+ self.assertEqual([call('Failed to query pg_drop_replication_slot for host:bar, port:1234: DatabaseError Exception')],
+ self.mock_logger.exception.call_args_list)
+
+ @patch('gppylib.db.dbconn.connect', autospec=True)
+ @patch('gppylib.db.dbconn.query', autospec=True)
+ def test_drop_slot_success(self, mock1, mock2):
+ self.assertTrue(self.pg_replication_slot.drop_slot())
+ self.assertEqual(2, self.mock_logger.debug.call_count)
+ self.assertEqual([call('Dropping slot internal_wal_replication_slot for host:bar, port:1234'),
+ call('Successfully dropped replication slot internal_wal_replication_slot for host:bar, port:1234')],
+ self.mock_logger.debug.call_args_list)
+
+ @patch('gppylib.db.dbconn.connect', side_effect=Exception())
+ def test_create_slot_conn_exception(self, mock1):
+ with self.assertRaises(Exception) as ex:
+ self.pg_replication_slot.create_slot()
+
+ self.assertEqual(1, self.mock_logger.debug.call_count)
+ self.assertEqual([call('Creating slot internal_wal_replication_slot for host:bar, port:1234')],
+ self.mock_logger.debug.call_args_list)
+ self.assertTrue('Failed to create replication slot for host:bar, port:1234' in str(ex.exception))
+
+ @patch('gppylib.db.dbconn.connect', autospec=True)
+ @patch('gppylib.db.dbconn.query', side_effect=DatabaseError("DatabaseError Exception"))
+ def test_create_slot_db_error_exception(self, mock1, mock2):
+ self.pg_replication_slot.create_slot()
+ self.assertEqual(1, self.mock_logger.debug.call_count)
+ self.assertEqual(1, self.mock_logger.exception.call_count)
+ self.assertEqual([call('Creating slot internal_wal_replication_slot for host:bar, port:1234')],
+ self.mock_logger.debug.call_args_list)
+ self.assertEqual(
+ [call('Failed to query pg_create_physical_replication_slot for host:bar, port:1234: DatabaseError Exception')],
+ self.mock_logger.exception.call_args_list)
+
+ @patch('gppylib.db.dbconn.connect', autospec=True)
+ @patch('gppylib.db.dbconn.query', autospec=True)
+ def test_create_slot_success(self, mock1, mock2):
+ self.assertTrue(self.pg_replication_slot.create_slot())
+ self.assertEqual(2, self.mock_logger.debug.call_count)
+ self.assertEqual([call('Creating slot internal_wal_replication_slot for host:bar, port:1234'),
+ call(
+ 'Successfully created replication slot internal_wal_replication_slot for host:bar, port:1234')],
+ self.mock_logger.debug.call_args_list)
class TestUnitPgBaseBackup(unittest.TestCase):
def test_replication_slot_not_passed_when_not_given_slot_name(self):
diff --git a/gpMgmt/bin/gppylib/db/catalog.py b/gpMgmt/bin/gppylib/db/catalog.py
index 6214b805c29..80ca66289eb 100644
--- a/gpMgmt/bin/gppylib/db/catalog.py
+++ b/gpMgmt/bin/gppylib/db/catalog.py
@@ -6,8 +6,8 @@
"""
import copy
-
-import pg
+import os
+from contextlib import closing
from gppylib import gplog
from gppylib.db import dbconn
diff --git a/gpMgmt/bin/gppylib/db/dbconn.py b/gpMgmt/bin/gppylib/db/dbconn.py
index b85f802d02b..1492b2511c5 100644
--- a/gpMgmt/bin/gppylib/db/dbconn.py
+++ b/gpMgmt/bin/gppylib/db/dbconn.py
@@ -9,9 +9,8 @@
import sys
import os
import stat
-
+import psycopg2
try:
- import pgdb
from gppylib.commands.unix import UserId
except ImportError as e:
@@ -159,67 +158,44 @@ def canonicalize(s):
# 1. pg notice is accessible to a user of connection returned by dbconn.connect(),
# lifted from the underlying _pg connection
# 2. multiple calls to dbconn.close() should not return an error
-class Connection(pgdb.Connection):
+class Connection:
def __init__(self, connection):
- self._notices = collections.deque(maxlen=100)
- # we must do an attribute by attribute copy of the notices here
- # due to limitations in pg implementation. Wrap with with a
- # namedtuple for ease of use.
- def handle_notice(notice):
- received = {}
- for attr in dir(notice):
- if attr.startswith('__'):
- continue
- value = getattr(notice, attr)
- received[attr] = value
- Notice = collections.namedtuple('Notice', sorted(received))
- self._notices.append(Notice(**received))
-
-
- self._impl = connection
- self._impl._cnx.set_notice_receiver(handle_notice)
+ self._conn = connection
+ self._conn.notices = collections.deque(maxlen=100)
def __enter__(self):
- return self._impl.__enter__()
+ return self._conn.__enter__()
# __exit__() does not close the connection. This is in line with the
# python DB API v2 specification (pep-0249), where close() is done on
# __del__(), not __exit__().
def __exit__(self, *args):
- return self._impl.__exit__(*args)
+ return self._conn.__exit__(*args)
def __getattr__(self, name):
- return getattr(self._impl, name)
+ return getattr(self._conn, name)
def notices(self):
- notice_list = list(self._notices)
- self._notices.clear()
+ notice_list = list(self._conn.notices)
+ self._conn.notices.clear()
return notice_list
# don't return operational error if connection is already closed
def close(self):
- if not self._impl.closed:
- self._impl.close()
+ if not self._conn.closed:
+ self._conn.close()
def connect(dburl, utility=False, verbose=False,
- encoding=None, allowSystemTableMods=False, logConn=True, unsetSearchPath=True):
+ encoding=None, allowSystemTableMods=False, logConn=True, unsetSearchPath=True, cursorFactory=None):
conninfo = {
'user': dburl.pguser,
'password': dburl.pgpass,
'host': dburl.pghost,
'port': dburl.pgport,
- # dbname is very subtle, Package pgdb contains a bug it will only escape the string when
- # 1. a space in the dbname, and
- # 2. there are other keyword arguments of pgdb.connect method
- # See issue https://github.com/PyGreSQL/PyGreSQL/issues/77 for details
- # The code here is test if there is space, if so, we know pgdb will escape, let's not do here
- # if not, let's do escape here since pgdb forget to do.
- #
- # NB: we always provide port keyword argument to connect method of pgdb, thus
- # we will always enter the code path of pgdb.connect of the above escape logic.
- 'database': dburl.pgdb if ' ' in dburl.pgdb else dburl.pgdb.replace('\\', '\\\\').replace("'", "\\'"),
+ 'database': dburl.pgdb,
+ 'cursor_factory': cursorFactory
}
# building options
@@ -257,22 +233,23 @@ def connect(dburl, utility=False, verbose=False,
logFunc = logger.info if dburl.timeout is not None else logger.debug
logFunc("Connecting to db {} on host {}".format(dburl.pgdb, dburl.pghost))
- connection = None
+ conn = None
for i in range(retries):
try:
- connection = pgdb.connect(**conninfo)
+ conn = psycopg2.connect(**conninfo)
+ conn.set_session(autocommit=True)
break
- except pgdb.OperationalError as e:
+ except psycopg2.OperationalError as e:
if 'timeout expired' in str(e):
logger.warning('Timeout expired connecting to %s, attempt %d/%d' % (dburl.pgdb, i+1, retries))
continue
raise
- if connection is None:
+ if conn is None:
raise ConnectionError('Failed to connect to %s' % dburl.pgdb)
- return Connection(connection)
+ return Connection(conn)
def execSQL(conn, sql, autocommit=True):
"""
@@ -286,7 +263,6 @@ def execSQL(conn, sql, autocommit=True):
Using `with dbconn.connect() as conn` syntax will override autocommit and complete
queries in a transaction followed by a commit on context close
"""
- conn.autocommit = autocommit
with conn.cursor() as cursor:
cursor.execute(sql)
diff --git a/gpMgmt/bin/gppylib/db/test/unit/test_cluster_dbconn.py b/gpMgmt/bin/gppylib/db/test/unit/test_cluster_dbconn.py
index 086ccce9298..a03eb4db42b 100644
--- a/gpMgmt/bin/gppylib/db/test/unit/test_cluster_dbconn.py
+++ b/gpMgmt/bin/gppylib/db/test/unit/test_cluster_dbconn.py
@@ -46,7 +46,7 @@ def test_verbose_mode_allows_warnings_to_be_sent_to_the_client(self):
for notice in notices:
- if warning in notice.message:
+ if warning in notice:
return # found it!
self.fail("Didn't find expected notice '{}' in {!r}".format(
diff --git a/gpMgmt/bin/gppylib/gpcatalog.py b/gpMgmt/bin/gppylib/gpcatalog.py
index dc95de34e8f..f802e02da0e 100644
--- a/gpMgmt/bin/gppylib/gpcatalog.py
+++ b/gpMgmt/bin/gppylib/gpcatalog.py
@@ -168,7 +168,7 @@ def __init__(self, dbConnection):
curs = self._query(version_query)
except Exception as e:
raise GPCatalogException("Error reading database version: " + str(e))
- self._version = GpVersion(curs.getresult()[0][0])
+ self._version = GpVersion(curs.fetchone()[0])
# Read the list of catalog tables from the database
try:
@@ -178,7 +178,7 @@ def __init__(self, dbConnection):
# Construct our internal representation of the catalog
- for [oid, relname, relisshared] in curs.getresult():
+ for [oid, relname, relisshared] in curs.fetchall():
self._tables[relname] = GPCatalogTable(self, relname)
# Note: stupid API returns t/f for boolean value
self._tables[relname]._setShared(relisshared == 't')
@@ -217,7 +217,9 @@ def _query(self, qry):
"""
Simple wrapper around querying the database connection
"""
- return self._dbConnection.query(qry)
+ cur = self._dbConnection.cursor()
+ cur.execute(qry)
+ return cur
def _markCoordinatorOnlyTables(self):
"""
@@ -507,10 +509,10 @@ def __init__(self, parent, name, pkey=None):
# exist.
raise GPCatalogException("Catalog table %s does not exist" % name)
- if cur.ntuples() == 0:
+ if cur.rowcount == 0:
raise GPCatalogException("Catalog table %s does not exist" % name)
- for row in cur.getresult():
+ for row in cur.fetchall():
(attname, atttype, typname) = row
# Mark if the catalog has an oid column
@@ -546,7 +548,7 @@ def __init__(self, parent, name, pkey=None):
WHERE attrelid = 'pg_catalog.{catname}'::regclass
""".format(catname=name)
cur = parent._query(qry)
- self._pkey = [row[0] for row in cur.getresult()]
+ self._pkey = [row[0] for row in cur.fetchall()]
# Primary key must be in the column list
for k in self._pkey:
diff --git a/gpMgmt/bin/gppylib/operations/buildMirrorSegments.py b/gpMgmt/bin/gppylib/operations/buildMirrorSegments.py
index a071aa89e83..1eb8b6dc334 100644
--- a/gpMgmt/bin/gppylib/operations/buildMirrorSegments.py
+++ b/gpMgmt/bin/gppylib/operations/buildMirrorSegments.py
@@ -565,7 +565,7 @@ def _get_running_postgres_segments(self, segments):
return running_segments
def dereference_remote_symlink(self, datadir, host):
- cmdStr = """python -c 'import os; print(os.path.realpath("%s"))'""" % datadir
+ cmdStr = """python3 -c 'import os; print(os.path.realpath("%s"))'""" % datadir
cmd = base.Command('dereference a symlink on a remote host', cmdStr=cmdStr, ctxt=base.REMOTE, remoteHost=host)
cmd.run()
results = cmd.get_results()
diff --git a/gpMgmt/bin/gppylib/operations/segment_reconfigurer.py b/gpMgmt/bin/gppylib/operations/segment_reconfigurer.py
index 643489fd086..913847a5eb5 100644
--- a/gpMgmt/bin/gppylib/operations/segment_reconfigurer.py
+++ b/gpMgmt/bin/gppylib/operations/segment_reconfigurer.py
@@ -1,8 +1,7 @@
import time
-
from gppylib.commands import base
from gppylib.db import dbconn
-import pg
+from contextlib import closing
FTS_PROBE_QUERY = 'SELECT pg_catalog.gp_request_fts_probe_scan()'
@@ -13,15 +12,19 @@ def __init__(self, logger, worker_pool, timeout):
self.timeout = timeout
def _trigger_fts_probe(self, dburl):
- conn = pg.connect(dbname=dburl.pgdb,
- host=dburl.pghost,
- port=dburl.pgport,
- opt=None,
- user=dburl.pguser,
- passwd=dburl.pgpass,
- )
- conn.query(FTS_PROBE_QUERY)
- conn.close()
+ start_time = time.time()
+ while True:
+ try:
+ with closing(dbconn.connect(dburl)) as conn:
+ with conn.cursor() as cur:
+ cur.execute(FTS_PROBE_QUERY)
+ break
+ except Exception as e:
+ now = time.time()
+ if now < start_time + self.timeout:
+ continue
+ else:
+ raise RuntimeError("FTS probing did not complete in {} seconds.".format(self.timeout))
def reconfigure(self):
# issue a distributed query to make sure we pick up the fault
@@ -36,9 +39,12 @@ def reconfigure(self):
# Empty block of 'BEGIN' and 'END' won't start a distributed transaction,
# execute a DDL query to start a distributed transaction.
# so the primaries'd better be up
- conn = dbconn.connect(dburl)
- conn.cursor().execute('CREATE TEMP TABLE temp_test(a int)')
- conn.cursor().execute('COMMIT')
+ with closing(dbconn.connect(dburl)) as conn:
+ with conn.cursor() as cur:
+ cur.execute('BEGIN')
+ cur.execute('CREATE TEMP TABLE temp_test(a int)')
+ cur.execute('COMMIT')
+ break
except Exception as e:
# Should close conn here
# Otherwise, the postmaster will be blocked by abort transaction
@@ -48,6 +54,3 @@ def reconfigure(self):
continue
else:
raise RuntimeError("Mirror promotion did not complete in {0} seconds.".format(self.timeout))
- else:
- conn.close()
- break
diff --git a/gpMgmt/bin/gppylib/operations/test/unit/test_unit_segment_reconfigurer.py b/gpMgmt/bin/gppylib/operations/test/unit/test_unit_segment_reconfigurer.py
index 9d86071bafd..d4c2a0c26e7 100644
--- a/gpMgmt/bin/gppylib/operations/test/unit/test_unit_segment_reconfigurer.py
+++ b/gpMgmt/bin/gppylib/operations/test/unit/test_unit_segment_reconfigurer.py
@@ -4,8 +4,7 @@
from gppylib.operations.segment_reconfigurer import SegmentReconfigurer, FTS_PROBE_QUERY
from gppylib.test.unit.gp_unittest import GpTestCase
-import pg
-import pgdb
+import psycopg2
import mock
from mock import Mock, patch, call, MagicMock
import contextlib
@@ -38,22 +37,22 @@ def setUp(self):
self.apply_patches([
patch('gppylib.db.dbconn.connect', new=self.connect),
patch('gppylib.db.dbconn.DbURL', return_value=self.db_url),
- patch('pg.connect'),
+ patch('psycopg2.connect'),
])
def test_it_triggers_fts_probe(self):
reconfigurer = SegmentReconfigurer(logger=self.logger,
worker_pool=self.worker_pool, timeout=self.timeout)
reconfigurer.reconfigure()
- pg.connect.assert_has_calls([
- call(dbname=self.db, host=self.host, port=self.port, opt=None, user=self.user, passwd=self.passwd),
+ psycopg2.connect.assert_has_calls([
+ call(dbname=self.db, host=self.host, port=self.port, options=None, user=self.user, password=self.passwd),
call().query(FTS_PROBE_QUERY),
call().close(),
]
)
def test_it_retries_the_connection(self):
- self.connect.configure_mock(side_effect=[pgdb.DatabaseError, pgdb.DatabaseError, self.conn])
+ self.connect.configure_mock(side_effect=[psycopg2.DatabaseError, psycopg2.DatabaseError, self.conn])
reconfigurer = SegmentReconfigurer(logger=self.logger,
worker_pool=self.worker_pool, timeout=self.timeout)
@@ -74,7 +73,7 @@ def fail_for_five_minutes():
# leap forward 300 seconds
new_time += self.timeout / 2
now_mock.configure_mock(return_value=new_time)
- yield pgdb.DatabaseError
+ yield psycopg2.DatabaseError
self.connect.configure_mock(side_effect=fail_for_five_minutes())
@@ -87,3 +86,27 @@ def fail_for_five_minutes():
self.connect.assert_has_calls([call(self.db_url), call(self.db_url), ])
self.conn.close.assert_has_calls([])
+
+ @patch('time.time')
+ def test_it_gives_up_after_600_seconds_2(self, now_mock):
+ start_datetime = datetime.datetime(2023, 7, 27, 16, 0, 0)
+ start_time = time.mktime(start_datetime.timetuple())
+ now_mock.configure_mock(return_value=start_time)
+
+ def fail_for_ten_minutes():
+ new_time = start_time
+ # leap forward 600 seconds
+ new_time += self.timeout
+ now_mock.configure_mock(return_value=new_time)
+ yield psycopg2.DatabaseError
+
+ self.connect.configure_mock(side_effect=fail_for_ten_minutes())
+
+ reconfigurer = SegmentReconfigurer(logger=self.logger,
+ worker_pool=self.worker_pool, timeout=self.timeout)
+ with self.assertRaises(RuntimeError) as context:
+ reconfigurer.reconfigure()
+ self.assertEqual("FTS probing did not complete in {} seconds.".format(self.timeout), context.exception.message)
+
+ self.connect.assert_has_calls([call(self.db_url)])
+ self.conn.close.assert_has_calls([])
diff --git a/gpMgmt/bin/gppylib/operations/test/unit/test_unit_utils.py b/gpMgmt/bin/gppylib/operations/test/unit/test_unit_utils.py
index 38fcd327dee..e62aa7a40b7 100755
--- a/gpMgmt/bin/gppylib/operations/test/unit/test_unit_utils.py
+++ b/gpMgmt/bin/gppylib/operations/test/unit/test_unit_utils.py
@@ -11,7 +11,7 @@
from gppylib.operations.test_utils_helper import TestOperation, RaiseOperation, RaiseOperation_Unpicklable, RaiseOperation_Safe, ExceptionWithArgs
from operations.unix import ListFiles
from test.unit.gp_unittest import GpTestCase, run_tests
-from pg import DatabaseError
+from psycopg2 import DatabaseError
class UtilsTestCase(GpTestCase):
"""
diff --git a/gpMgmt/bin/gppylib/operations/test_utils_helper.py b/gpMgmt/bin/gppylib/operations/test_utils_helper.py
index 109bc83bb87..74cfeda2bc0 100755
--- a/gpMgmt/bin/gppylib/operations/test_utils_helper.py
+++ b/gpMgmt/bin/gppylib/operations/test_utils_helper.py
@@ -1,4 +1,5 @@
from gppylib.operations import Operation
+import psycopg2
"""
These objects needed for gppylib.operations.test.test_utils are pulled out of said file for
@@ -37,5 +38,4 @@ def __init__(self, x, y):
class RaiseOperation_Unpicklable(Operation):
def execute(self):
- import pg
- raise pg.DatabaseError()
+ raise psycopg2.DatabaseError()
diff --git a/gpMgmt/bin/gppylib/programs/clsSystemState.py b/gpMgmt/bin/gppylib/programs/clsSystemState.py
index e234b633122..4bf55a783c4 100644
--- a/gpMgmt/bin/gppylib/programs/clsSystemState.py
+++ b/gpMgmt/bin/gppylib/programs/clsSystemState.py
@@ -10,7 +10,7 @@
import sys, os
import re
import collections
-import pgdb
+import psycopg2
from contextlib import closing
from gppylib import gparray, gplog
from gppylib.commands import base, gp
@@ -1026,7 +1026,7 @@ def _get_unsync_segs_add_wal_remaining_bytes(data, gpArray):
wal_sync_bytes_out = 'Unknown'
unsync_segs.append(s)
data.addValue(VALUE__REPL_SYNC_REMAINING_BYTES, wal_sync_bytes_out)
- except pgdb.InternalError:
+ except (psycopg2.InternalError, psycopg2.OperationalError):
logger.warning('could not query segment {} ({}:{})'.format(
s.dbid, s.hostname, s.port
))
@@ -1098,7 +1098,7 @@ def _add_replication_info(data, primary, mirror):
cursor.close()
- except pgdb.InternalError:
+ except (psycopg2.InternalError, psycopg2.OperationalError):
logger.warning('could not query segment {} ({}:{})'.format(
primary.dbid, primary.hostname, primary.port
))
diff --git a/gpMgmt/bin/gppylib/test/unit/test_unit_foreign_key_check.py b/gpMgmt/bin/gppylib/test/unit/test_unit_foreign_key_check.py
index 9888472bb43..b9e691a7e56 100755
--- a/gpMgmt/bin/gppylib/test/unit/test_unit_foreign_key_check.py
+++ b/gpMgmt/bin/gppylib/test/unit/test_unit_foreign_key_check.py
@@ -11,7 +11,7 @@
class GpCheckCatTestCase(GpTestCase):
def setUp(self):
self.logger = Mock(spec=['log', 'info', 'debug', 'error'])
- self.db_connection = Mock(spec=['close', 'query'])
+ self.db_connection = Mock(spec=['close', 'cursor'])
self.autoCast = {'regproc': '::oid',
'regprocedure': '::oid',
'regoper': '::oid',
@@ -25,9 +25,10 @@ def setUp(self):
self.full_join_cat_tables = set(['pg_attribute','gp_distribution_policy','pg_appendonly','pg_constraint','pg_index'])
self.foreign_key_check= Mock(spec=['runCheck'])
self.foreign_key_check.runCheck.return_value = []
- self.db_connection.query.return_value.ntuples.return_value = 2
- self.db_connection.query.return_value.listfields.return_value = ['pkey1', 'pkey2']
- self.db_connection.query.return_value.getresult.return_value = [('r1','r2'), ('r3','r4')]
+
+ self.db_connection.cursor.return_value.rowcount = 2
+ self.db_connection.cursor.return_value.description = [('pkey1',), ('pkey2',)]
+ self.db_connection.cursor.return_value.fetchall.return_value = [('r1','r2'), ('r3','r4')]
def test_get_fk_query_left_join_returns_the_correct_query(self):
@@ -127,7 +128,7 @@ def test_checkTableForeignKey__returns_correct_join_query(self, log_literal_mock
self.assertEqual(len(issue_list) , 2)
self.assertEqual(issue_list[0], ('pg_class', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')]))
self.assertEqual(issue_list[1], ('arbitrary_catalog_table', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')]))
- self.assertEqual(self.db_connection.query.call_count, 2)
+ self.assertEqual(self.db_connection.cursor.call_count, 2)
def __generate_pg_class_call(table, primary_key_cat_name, col_type, with_filter=True):
if with_filter:
@@ -168,7 +169,7 @@ def __generate_pg_class_call(table, primary_key_cat_name, col_type, with_filter=
self.assertEqual(fk_query_full_join_mock.call_count, 0)
fk_query_left_join_mock.assert_has_calls(foreign_key_mock_calls_left, any_order=False)
- self.db_connection.query.call_count = 0
+ self.db_connection.cursor.call_count = 0
fk_query_full_join_mock.call_count = 0
fk_query_left_join_mock.call_count = 0
diff --git a/gpMgmt/bin/gppylib/test/unit/test_unit_gpcheckcat.py b/gpMgmt/bin/gppylib/test/unit/test_unit_gpcheckcat.py
index ccdfb03a7ad..28aa0f1227c 100755
--- a/gpMgmt/bin/gppylib/test/unit/test_unit_gpcheckcat.py
+++ b/gpMgmt/bin/gppylib/test/unit/test_unit_gpcheckcat.py
@@ -18,11 +18,11 @@ def setUp(self):
self.subject = imp.load_source('gpcheckcat', gpcheckcat_file)
self.subject.check_gpexpand = lambda : (True, "")
- self.db_connection = Mock(spec=['close', 'query'])
+ self.db_connection = Mock(spec=['close', 'cursor', 'set_session'])
self.unique_index_violation_check = Mock(spec=['runCheck'])
self.foreign_key_check = Mock(spec=['runCheck', 'checkTableForeignKey'])
self.apply_patches([
- patch("gpcheckcat.pg.connect", return_value=self.db_connection),
+ patch("gpcheckcat.connect", return_value=self.db_connection),
patch("gpcheckcat.UniqueIndexViolationCheck", return_value=self.unique_index_violation_check),
patch("gpcheckcat.ForeignKeyCheck", return_value=self.foreign_key_check),
patch('os.environ', new={}),
@@ -129,23 +129,26 @@ def test_drop_leaked_schemas__when_leaked_schemas_exist__reports_which_schemas_a
self.assertIn(expected_message, log_messages)
def test_automatic_thread_count(self):
- self.db_connection.query.return_value.getresult.return_value = [[0]]
+ self.db_connection.cursor.return_value.fetchall.return_value = [[0]]
self._run_batch_size_experiment(100)
self._run_batch_size_experiment(101)
+ @patch('gpcheckcat.getversion', return_value='4.3')
@patch('gpcheckcat.GPCatalog', return_value=Mock())
@patch('sys.exit')
@patch('gppylib.gplog.log_literal')
- def test_truncate_batch_size(self, mock_log, mock_gpcheckcat, mock_sys_exit):
+ def test_truncate_batch_size(self, mock_log, mock_sys_exit, mock_gpcatalog, mock_version):
self.subject.GV.opt['-B'] = 300 # override the setting from available memory
# setup conditions for 50 primaries and plenty of RAM such that max threads > 50
primaries = [dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=-1, dbid=0, isprimary='t')]
for i in range(1, 50):
primaries.append(dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=1, dbid=i, isprimary='t'))
- self.db_connection.query.return_value.getresult.return_value = [['4.3']]
- self.db_connection.query.return_value.dictresult.return_value = primaries
+ self.db_connection.cursor.return_value = Mock()
+ self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute']))
+ self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False)
+ self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = primaries
testargs = ['some_string','-port 1', '-R foo']
@@ -221,10 +224,11 @@ def test_checkForeignKey__no_arg(self, process_foreign_key_mock):
self.foreign_key_check.runCheck.assert_called_once_with(cat_tables)
# Test gpcheckat -C option with checkForeignKey
+ @patch('gpcheckcat.getversion', return_value='4.3')
@patch('gpcheckcat.GPCatalog', return_value=Mock())
@patch('sys.exit')
@patch('gpcheckcat.checkTableMissingEntry')
- def test_runCheckCatname__for_checkForeignKey(self, mock1, mock2, mock3):
+ def test_runCheckCatname__for_checkForeignKey(self, mock1, mock2, mock3, mock4):
self.subject.checkForeignKey = Mock()
gpcat_class_mock = Mock(spec=['getCatalogTable'])
cat_obj_mock = Mock()
@@ -234,8 +238,12 @@ def test_runCheckCatname__for_checkForeignKey(self, mock1, mock2, mock3):
for i in range(1, 50):
primaries.append(dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=1, dbid=i, isprimary='t'))
- self.db_connection.query.return_value.getresult.return_value = [['4.3']]
- self.db_connection.query.return_value.dictresult.return_value = primaries
+
+ # context manager helper functions.
+ self.db_connection.cursor.return_value = Mock()
+ self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute']))
+ self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False)
+ self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = primaries
self.subject.GV.opt['-C'] = 'pg_class'
@@ -314,7 +322,13 @@ def test_skip_one_test(self, mock_ver, mock_run, mock1, mock2):
primaries = [dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=-1, dbid=0, isprimary='t')]
for i in range(1, 50):
primaries.append(dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=1, dbid=i, isprimary='t'))
- self.db_connection.query.return_value.dictresult.return_value = primaries
+
+ # context manager helper functions.
+ self.db_connection.cursor.return_value = Mock()
+ self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute']))
+ self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False)
+ self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = primaries
+
self.subject.all_checks = {'test1': 'a', 'test2': 'b', 'test3': 'c'}
testargs = ['gpcheckcat', '-port 1', '-s test2']
@@ -330,7 +344,13 @@ def test_skip_multiple_test(self, mock_ver, mock_run, mock1, mock2):
primaries = [dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=-1, dbid=0, isprimary='t')]
for i in range(1, 50):
primaries.append(dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=1, dbid=i, isprimary='t'))
- self.db_connection.query.return_value.dictresult.return_value = primaries
+
+ # context manager helper functions.
+ self.db_connection.cursor.return_value = Mock()
+ self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute']))
+ self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False)
+ self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = primaries
+
self.subject.all_checks = {'test1': 'a', 'test2': 'b', 'test3': 'c'}
testargs = ['gpcheckcat', '-port 1', '-s', "test1, test2"]
@@ -346,7 +366,11 @@ def test_skip_test_warning(self, mock_ver, mock_run, mock1, mock2):
primaries = [dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=-1, dbid=0, isprimary='t')]
for i in range(1, 50):
primaries.append(dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=1, dbid=i, isprimary='t'))
- self.db_connection.query.return_value.dictresult.return_value = primaries
+ # context manager helper functions.
+ self.db_connection.cursor.return_value = Mock()
+ self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute']))
+ self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False)
+ self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = primaries
self.subject.all_checks = {'test1': 'a', 'test2': 'b', 'test3': 'c'}
testargs = ['gpcheckcat', '-port 1', '-s', "test_invalid, test2"]
@@ -365,7 +389,13 @@ def test_run_multiple_test(self, mock_ver, mock_run, mock1, mock2):
primaries = [dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=-1, dbid=0, isprimary='t')]
for i in range(1, 50):
primaries.append(dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=1, dbid=i, isprimary='t'))
- self.db_connection.query.return_value.dictresult.return_value = primaries
+
+ # context manager helper functions.
+ self.db_connection.cursor.return_value = Mock()
+ self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute']))
+ self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False)
+ self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = primaries
+
self.subject.all_checks = {'test1': 'a', 'test2': 'b', 'test3': 'c'}
testargs = ['gpcheckcat', '-port 1', '-R', "test1, test2"]
diff --git a/gpMgmt/bin/gppylib/test/unit/test_unit_gpconfig.py b/gpMgmt/bin/gppylib/test/unit/test_unit_gpconfig.py
index 54fa8f3cf7c..42afb96763d 100644
--- a/gpMgmt/bin/gppylib/test/unit/test_unit_gpconfig.py
+++ b/gpMgmt/bin/gppylib/test/unit/test_unit_gpconfig.py
@@ -5,11 +5,10 @@
import shutil
import sys
import tempfile
-
+from psycopg2 import DatabaseError
from gppylib.gparray import Segment, GpArray, SegmentPair
from gpconfig_modules.parse_guc_metadata import ParseGuc
import errno
-from pg import DatabaseError
from .gp_unittest import *
from unittest.mock import *
diff --git a/gpMgmt/bin/gppylib/test/unit/test_unit_gpload.py b/gpMgmt/bin/gppylib/test/unit/test_unit_gpload.py
index ccc19515257..1d5dd2ae774 100755
--- a/gpMgmt/bin/gppylib/test/unit/test_unit_gpload.py
+++ b/gpMgmt/bin/gppylib/test/unit/test_unit_gpload.py
@@ -33,8 +33,11 @@ def help_test_with_config(self, gpload_param, expected_begin_value, expected_com
print(gpload_param)
gploader = gpload(gpload_param)
gploader.read_config()
- gploader.db = self
- gploader.db.query = Mock(side_effect=self.mockQuery)
+ gploader.conn = Mock()
+ gploader.conn.cursor.return_value = Mock()
+ gploader.conn.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute']))
+ gploader.conn.cursor.return_value.__exit__ = Mock(return_value=False)
+ gploader.conn.cursor.return_value.__enter__.return_value.execute = Mock(side_effect=self.mockQuery)
gploader.do_method_merge = Mock(side_effect=self.mockDoNothing)
gploader.do_method_update = Mock(side_effect=self.mockDoNothing)
gploader.do_method_insert = Mock(side_effect=self.mockDoNothing)
diff --git a/gpMgmt/bin/gppylib/test/unit/test_unit_gpstate.py b/gpMgmt/bin/gppylib/test/unit/test_unit_gpstate.py
index e9c60760aeb..4858e80e65a 100644
--- a/gpMgmt/bin/gppylib/test/unit/test_unit_gpstate.py
+++ b/gpMgmt/bin/gppylib/test/unit/test_unit_gpstate.py
@@ -1,6 +1,6 @@
import unittest
import mock
-import pgdb
+import psycopg2
import tempfile
from gppylib import gparray
@@ -228,7 +228,7 @@ def test_add_replication_info_adds_unknowns_if_primary_is_down(self):
@mock.patch('gppylib.db.dbconn.connect', autospec=True)
def test_add_replication_info_adds_unknowns_if_connection_cannot_be_made(self, mock_connect):
# Simulate a connection failure in dbconn.connect().
- mock_connect.side_effect = pgdb.InternalError('connection failure forced by unit test')
+ mock_connect.side_effect = psycopg2.InternalError('connection failure forced by unit test')
GpSystemStateProgram._add_replication_info(self.data, self.primary, self.mirror)
self.assertEqual('Unknown', self.data.getStrValue(self.mirror, VALUE__REPL_SENT_LSN))
diff --git a/gpMgmt/bin/gppylib/test/unit/test_unit_leaked_schema_dropper.py b/gpMgmt/bin/gppylib/test/unit/test_unit_leaked_schema_dropper.py
index 1eda2e72a25..1cd81684c23 100644
--- a/gpMgmt/bin/gppylib/test/unit/test_unit_leaked_schema_dropper.py
+++ b/gpMgmt/bin/gppylib/test/unit/test_unit_leaked_schema_dropper.py
@@ -6,45 +6,57 @@
class LeakedSchemaDropperTestCase(GpTestCase):
def setUp(self):
- self.db_connection = Mock(spec=['query'])
-
- two_leaked_schemas = Mock()
- two_leaked_schemas.getresult.return_value = [
+ self.db_connection = Mock(spec=['cursor'])
+ self.db_connection.cursor.return_value.fetchall.return_value = [
('fake_leak_1', 'something_else'),
('some"test"special_#;character--schema', 'something_else')
]
- self.db_connection.query.return_value = two_leaked_schemas
-
self.subject = LeakedSchemaDropper()
def test_drop_leaked_schemas__returns_a_list_of_leaked_schemas(self):
+ self.db_connection.cursor.return_value = Mock()
+ self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute']))
+ self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False)
+ self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = [
+ ('fake_leak_1', 'something_else'),
+ ('some"test"special_#;character--schema', 'something_else')
+ ]
self.assertEqual(self.subject.drop_leaked_schemas(self.db_connection), ['fake_leak_1', 'some"test"special_#;character--schema'])
def test_drop_leaked_schemas__when_there_are_no_leaked_schemas__returns_an_empty_list(self):
- no_leaked_schemas = Mock()
- no_leaked_schemas.getresult.return_value = []
- self.db_connection.query.return_value = no_leaked_schemas
-
+ self.db_connection.cursor.return_value = Mock()
+ self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute']))
+ self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False)
+ self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = []
self.assertEqual(self.subject.drop_leaked_schemas(self.db_connection), [])
def test_drop_leaked_schemas__when_query_returns_null_schema__returns_an_empty_list(self):
- null_leaked_schema = Mock()
- null_leaked_schema.getresult.return_value = [(None, 'something_else')]
- self.db_connection.query.return_value = null_leaked_schema
-
+ self.db_connection.cursor.return_value = Mock()
+ self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute']))
+ self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False)
+ self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = [(None, 'something_else')]
self.assertEqual(self.subject.drop_leaked_schemas(self.db_connection), [])
def test_drop_leaked_schemas__when_query_returns_null__returns_an_empty_list(self):
- self.db_connection.query.return_value = None
-
+ self.db_connection.cursor.return_value = Mock()
+ self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute']))
+ self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False)
+ self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = []
self.assertEqual(self.subject.drop_leaked_schemas(self.db_connection), [])
def test_drop_leaked_schemas__drops_orphaned_and_leaked_schemas(self):
+ self.db_connection.cursor.return_value = Mock()
+ self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute']))
+ self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False)
+ self.db_connection.cursor.return_value.__enter__.return_value.fetchall.return_value = [
+ ('fake_leak_1', 'something_else'),
+ ('some"test"special_#;character--schema', 'something_else')
+ ]
self.subject.drop_leaked_schemas(self.db_connection)
drop_query_expected_list = [call("DROP SCHEMA IF EXISTS \"fake_leak_1\" CASCADE;"),
call("DROP SCHEMA IF EXISTS \"some\"\"test\"\"special_#;character--schema\" CASCADE;")]
- self.db_connection.query.assert_has_calls(drop_query_expected_list)
+ self.db_connection.cursor.return_value.__enter__.return_value.execute.assert_has_calls(drop_query_expected_list)
if __name__ == '__main__':
diff --git a/gpMgmt/bin/gppylib/test/unit/test_unit_unique_index_violation_check.py b/gpMgmt/bin/gppylib/test/unit/test_unit_unique_index_violation_check.py
index c9306d69b97..19ddb773232 100644
--- a/gpMgmt/bin/gppylib/test/unit/test_unit_unique_index_violation_check.py
+++ b/gpMgmt/bin/gppylib/test/unit/test_unit_unique_index_violation_check.py
@@ -9,33 +9,37 @@ def setUp(self):
self.subject = UniqueIndexViolationCheck()
self.index_query_result = Mock()
- self.index_query_result.getresult.return_value = [
+ self.index_query_result.fetchall.return_value = [
(9001, 'index1', 'table1', ['index1_column1','index1_column2']),
(9001, 'index2', 'table1', ['index2_column1','index2_column2'])
]
- self.violated_segments_query_result = Mock()
-
- self.db_connection = Mock(spec=['query'])
- self.db_connection.query.side_effect = self.mock_query_return_value
-
- def mock_query_return_value(self, query_string):
- if query_string == UniqueIndexViolationCheck.unique_indexes_query:
- return self.index_query_result
- else:
- return self.violated_segments_query_result
+ self.db_connection = Mock(spec=['cursor'])
+ self.db_connection.cursor.return_value.__enter__ = Mock(return_value=Mock(spec=['fetchall', 'execute']))
+ self.db_connection.cursor.return_value.__exit__ = Mock(return_value=False)
def test_run_check__when_there_are_no_issues(self):
- self.violated_segments_query_result.getresult.return_value = []
+ self.db_connection.cursor.return_value.__enter__.return_value.fetchall.side_effect = [
+ [
+ (9001, 'index1', 'table1', ['index1_column1','index1_column2']),
+ (9001, 'index2', 'table1', ['index2_column1','index2_column2'])
+ ],
+ [],
+ [],
+ ]
violations = self.subject.runCheck(self.db_connection)
self.assertEqual(len(violations), 0)
def test_run_check__when_index_is_violated(self):
- self.violated_segments_query_result.getresult.side_effect = [
+ self.db_connection.cursor.return_value.__enter__.return_value.fetchall.side_effect = [
+ [
+ (9001, 'index1', 'table1', ['index1_column1','index1_column2']),
+ (9001, 'index2', 'table1', ['index2_column1','index2_column2'])
+ ],
[(-1,), (0,), (1,)],
- [(-1,)]
+ [(-1,)],
]
violations = self.subject.runCheck(self.db_connection)
diff --git a/gpMgmt/bin/gppylib/test/unit/test_unit_utils.py b/gpMgmt/bin/gppylib/test/unit/test_unit_utils.py
new file mode 100644
index 00000000000..082e52d8d11
--- /dev/null
+++ b/gpMgmt/bin/gppylib/test/unit/test_unit_utils.py
@@ -0,0 +1,7 @@
+from mock import *
+from .gp_unittest import *
+from gppylib.utils import escape_string
+
+class UtilsFunctionsTest(GpTestCase):
+ def test_escape_string_can_handle_utf8(self):
+ self.assertEqual('public."spiegelungssätze"', escape_string('public."spiegelungssätze"'))
diff --git a/gpMgmt/bin/gppylib/utils.py b/gpMgmt/bin/gppylib/utils.py
index fead818d3ac..19791c6280d 100644
--- a/gpMgmt/bin/gppylib/utils.py
+++ b/gpMgmt/bin/gppylib/utils.py
@@ -5,8 +5,7 @@
from sys import *
from xml.dom import minidom
from xml.dom import Node
-
-import pgdb
+import psycopg2
from gppylib.gplog import *
logger = get_default_logger()
@@ -503,14 +502,20 @@ def escapeDoubleQuoteInSQLString(string, forceDoubleQuote=True):
string = '"' + string + '"'
return string
-
-def Escape(query_str):
- return pgdb.escape_string(query_str)
-
+# Escape single quotes, backslashes appearing in the string according to the SQL string constants syntax.
+# E.g.,
+# >>> escape_string(r"O'Reilly")
+# "O''Reilly"
+def escape_string(string):
+ adapted = psycopg2.extensions.QuotedString(string)
+ # The getquoted() API returns 'latin-1' encoded binary string by default, we need to specify
+ # the encoding manually.
+ adapted.encoding = 'utf-8'
+ return adapted.getquoted().decode()[1:-1]
def escapeArrayElement(query_str):
# also escape backslashes and double quotes, in addition to the doubling of single quotes
- return pgdb.escape_string(query_str.encode(errors='backslashreplace')).decode(errors='backslashreplace').replace('\\','\\\\').replace('"','\\"')
+ return escape_string(query_str.encode(errors='backslashreplace')).encode().decode(errors='backslashreplace').replace('\\','\\\\').replace('"','\\"')
# Transform Python list to Postgres array literal (of the form: '{...}')
@@ -593,7 +598,7 @@ def formatInsertValuesList(row, starelid, inclHLL):
# Format stavalues5 for an hll slot
elif i == 30 and hll:
if inclHLL:
- val = '\'{"%s"}\'' % pgdb.escape_bytea(val[0])
+ val = '\'{\\%s}\'' % val[0]
rowVals.append('\t{0}::{1}'.format(val, 'bytea[]'))
else:
rowVals.append('\t{0}'.format('NULL::int4[]'))
diff --git a/gpMgmt/bin/gpsd b/gpMgmt/bin/gpsd
index bffbc02065b..b9b19953b58 100755
--- a/gpMgmt/bin/gpsd
+++ b/gpMgmt/bin/gpsd
@@ -11,8 +11,8 @@ import sys
import re
from contextlib import closing
from optparse import OptionParser
-import pgdb
-from gppylib.utils import formatInsertValuesList, Escape
+from gppylib.utils import formatInsertValuesList, escape_string
+import psycopg2
gpsd_version = '%prog 1.0'
@@ -42,7 +42,7 @@ def get_num_segments(cursor):
query = "select count(*) from gp_segment_configuration where role='p' and content >=0;"
try:
cursor.execute(query)
- except pgdb.DatabaseError as e:
+ except psycopg2.DatabaseError as e:
sys.stderr.write('\nError while trying to retrieve number of segments.\n\n' + str(e) + '\n\n')
sys.exit(1)
vals = cursor.fetchone()
@@ -82,7 +82,13 @@ def dumpTupleCount(cur):
def dumpStats(cur, inclHLL):
- query = 'SELECT pgc.relname, pgn.nspname, pga.attname, pgtn.nspname, pgt.typname, pgs.* ' \
+ query = 'SELECT pgc.relname, pgn.nspname, pga.attname, pgtn.nspname, pgt.typname, ' \
+ 'pgs.starelid, pgs.staattnum, pgs.stainherit, pgs.stanullfrac, pgs.stawidth, pgs.stadistinct, ' \
+ 'pgs.stakind1, pgs.stakind2, pgs.stakind3, pgs.stakind4, pgs.stakind5, ' \
+ 'pgs.staop1, pgs.staop2, pgs.staop3, pgs.staop4, pgs.staop5, ' \
+ 'pgs.stacoll1, pgs.stacoll2, pgs.stacoll3, pgs.stacoll4, pgs.stacoll5, ' \
+ 'pgs.stanumbers1, pgs.stanumbers2, pgs.stanumbers3, pgs.stanumbers4, pgs.stanumbers5, ' \
+ 'pgs.stavalues1::text::text[], pgs.stavalues2::text::text[], pgs.stavalues3::text::text[], pgs.stavalues4::text::text[], pgs.stavalues5::text::text[] ' \
'FROM pg_class pgc, pg_statistic pgs, pg_namespace pgn, pg_attribute pga, pg_type pgt, pg_namespace pgtn ' \
'WHERE pgc.relnamespace = pgn.oid and pgn.nspname NOT IN ' + \
sysnslist + \
@@ -101,7 +107,7 @@ def dumpStats(cur, inclHLL):
cur.execute(query)
for vals in ResultIter(cur):
- starelid = "'%s.%s'::regclass" % (Escape(vals[1]), Escape(vals[0]))
+ starelid = "'%s.%s'::regclass" % (escape_string(vals[1]), escape_string(vals[0]))
rowVals = formatInsertValuesList(vals, starelid, inclHLL)
print(pstring.format(vals[0], vals[2], ',\n'.join(rowVals)))
@@ -152,7 +158,7 @@ def main():
'options': pgoptions
}
num_segments = 0
- with closing(pgdb.connect(**connectionInfo)) as connection:
+ with closing(psycopg2.connect(**connectionInfo)) as connection:
with closing(connection.cursor()) as cursor:
num_segments = get_num_segments(cursor)
sys.stdout.writelines(['\n-- Greenplum database Statistics Dump',
@@ -187,7 +193,7 @@ def main():
sys.stdout.flush()
try:
- with closing(pgdb.connect(**connectionInfo)) as connection:
+ with closing(psycopg2.connect(**connectionInfo)) as connection:
with closing(connection.cursor()) as cursor:
dumpTupleCount(cursor)
dumpStats(cursor, inclHLL)
@@ -196,11 +202,11 @@ def main():
'which requires some data elements to be included in the output file.\n',
'Please review output file to ensure it is within corporate policy to transport the output file.\n'])
- except pgdb.DatabaseError as err: # catch *all* exceptions
+ except psycopg2.DatabaseError as err: # catch *all* exceptions
sys.stderr.write('Error while dumping statistics:\n')
sys.stderr.write(str(err))
sys.exit(1)
if __name__ == "__main__":
- main()
+ main()
diff --git a/gpMgmt/bin/minirepro b/gpMgmt/bin/minirepro
index 79d4c9fbb31..858b77933dd 100755
--- a/gpMgmt/bin/minirepro
+++ b/gpMgmt/bin/minirepro
@@ -61,10 +61,10 @@ minirepro gptest -h locahost -U gpadmin -p 4444 -q ~/in.sql -f ~/out.sql
import pwd
import os, sys, re, json, platform, subprocess
-import pgdb
+import psycopg2
from optparse import OptionParser
from datetime import datetime
-from gppylib.utils import formatInsertValuesList, Escape
+from gppylib.utils import formatInsertValuesList, escape_string
version = '1.13'
PATH_PREFIX = '/tmp/'
@@ -97,7 +97,7 @@ def get_server_version(cursor):
query = "select version()"
try:
cursor.execute(query)
- except pgdb.DatabaseError as e:
+ except psycopg2.DatabaseError as e:
sys.stderr.write('\nError while trying to find GPDB version.\n\n' + str(e) + '\n\n')
sys.exit(1)
vals = cursor.fetchone()
@@ -107,7 +107,7 @@ def get_num_segments(cursor):
query = "select count(*) from gp_segment_configuration where role='p' and content >=0;"
try:
cursor.execute(query)
- except pgdb.DatabaseError as e:
+ except psycopg2.DatabaseError as e:
sys.stderr.write('\nError while trying to retrieve number of segments.\n\n' + str(e) + '\n\n')
sys.exit(1)
vals = cursor.fetchone()
@@ -136,7 +136,7 @@ def dump_query(connectionInfo, query_file):
with open(query_file, 'r') as query_f:
sql_text = query_f.read()
- query = "select pg_catalog.gp_dump_query_oids('%s')" % Escape(sql_text)
+ query = "select pg_catalog.gp_dump_query_oids('%s')" % escape_string(sql_text)
toolkit_sql = PATH_PREFIX + 'toolkit.sql'
with open(toolkit_sql, 'w') as toolkit_f:
@@ -187,7 +187,7 @@ def pg_dump_object(mr_query, connectionInfo, envOpts):
out_file = PATH_PREFIX + PGDUMP_FILE
dmp_cmd = 'pg_dump -h %s -p %s -U %s -sxO %s' % connectionInfo
dmp_cmd = "%s --relation-oids %s --function-oids %s -f %s" % \
- (dmp_cmd, mr_query.relids, mr_query.funcids, Escape(out_file))
+ (dmp_cmd, mr_query.relids, mr_query.funcids, escape_string(out_file))
print(dmp_cmd)
p = subprocess.Popen(dmp_cmd, shell=True, stderr=subprocess.PIPE, env=envOpts)
_, errormsg = p.communicate()
@@ -213,11 +213,17 @@ def dump_tuple_count(cur, oid_str, f_out):
for col, val, typ in zip(columns[2:], vals[2:], types):
# i.e. relpages = 1::int, reltuples = 1.0::real
lines.append('\t%s = %s::%s' % (col, val, typ))
- updateStmt = templateStmt.format(Escape(',\n'.join(lines)), Escape(vals[0]), Escape(vals[1]))
+ updateStmt = templateStmt.format(escape_string(',\n'.join(lines)), escape_string(vals[0]), escape_string(vals[1]))
f_out.writelines(updateStmt)
def dump_stats(cur, oid_str, f_out, inclHLL):
- query = 'SELECT pgc.relname, pgn.nspname, pga.attname, pgtn.nspname, pgt.typname, pgs.* ' \
+ query = 'SELECT pgc.relname, pgn.nspname, pga.attname, pgtn.nspname, pgt.typname, ' \
+ 'pgs.starelid, pgs.staattnum, pgs.stainherit, pgs.stanullfrac, pgs.stawidth, pgs.stadistinct, ' \
+ 'pgs.stakind1, pgs.stakind2, pgs.stakind3, pgs.stakind4, pgs.stakind5, ' \
+ 'pgs.staop1, pgs.staop2, pgs.staop3, pgs.staop4, pgs.staop5, ' \
+ 'pgs.stacoll1, pgs.stacoll2, pgs.stacoll3, pgs.stacoll4, pgs.stacoll5, ' \
+ 'pgs.stanumbers1, pgs.stanumbers2, pgs.stanumbers3, pgs.stanumbers4, pgs.stanumbers5, ' \
+ 'pgs.stavalues1::text::text[], pgs.stavalues2::text::text[], pgs.stavalues3::text::text[], pgs.stavalues4::text::text[], pgs.stavalues5::text::text[] ' \
'FROM pg_class pgc, pg_statistic pgs, pg_namespace pgn, pg_attribute pga, pg_type pgt, pg_namespace pgtn ' \
'WHERE pgc.relnamespace = pgn.oid and pgc.oid in (%s) ' \
'and pgn.nspname NOT LIKE \'pg_temp_%%\' ' \
@@ -239,7 +245,7 @@ def dump_stats(cur, oid_str, f_out, inclHLL):
for vals in result_iter(cur):
schemaname = vals[1]
- starelid = "'%s.%s'::regclass" % (Escape(vals[1]), Escape(vals[0]))
+ starelid = "'%s.%s'::regclass" % (escape_string(vals[1]), escape_string(vals[0]))
rowVals = formatInsertValuesList(vals, starelid, inclHLL)
# For non-catalog tables we don't need to delete stats first
@@ -248,7 +254,7 @@ def dump_stats(cur, oid_str, f_out, inclHLL):
if schemaname != 'pg_catalog':
linecomment = '-- ' # This will comment out the DELETE query
- f_out.writelines(pstring.format(Escape(vals[0]), Escape(vals[2]), linecomment, starelid, vals[6], ',\n'.join(rowVals)))
+ f_out.writelines(pstring.format(escape_string(vals[0]), escape_string(vals[2]), linecomment, starelid, vals[6], ',\n'.join(rowVals)))
def main():
parser = parse_cmd_line()
@@ -299,7 +305,7 @@ def main():
}
print("Connecting to database: host=%s, port=%s, user=%s, db=%s ..." % connectionInfo)
- conn = pgdb.connect(**connectionDict)
+ conn = psycopg2.connect(**connectionDict)
cursor = conn.cursor()
# get server version, which is dumped to minirepro output file
@@ -346,7 +352,7 @@ def main():
# first create schema DDLs
print("Writing schema DDLs ...")
- table_schemas = ["CREATE SCHEMA %s;\n" % Escape(schema) for schema in mr_query.schemas if schema != 'public']
+ table_schemas = ["CREATE SCHEMA %s;\n" % escape_string(schema) for schema in mr_query.schemas if schema != 'public']
f_out.writelines(table_schemas)
# write relation and function DDLs
@@ -392,4 +398,4 @@ def main():
print('Please review output file to ensure it is within corporate policy to transport the output file.')
if __name__ == "__main__":
- main()
+ main()
diff --git a/gpMgmt/bin/pythonSrc/PyGreSQL/CMakeLists.txt b/gpMgmt/bin/pythonSrc/PyGreSQL/CMakeLists.txt
deleted file mode 100644
index ce1b626d3c6..00000000000
--- a/gpMgmt/bin/pythonSrc/PyGreSQL/CMakeLists.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-cmake_minimum_required(VERSION 3.12)
-
-set (CMAKE_CONFIGURATION_TYPES Release RelWithDebInfo)
-project(pygresql C)
-
-find_package(Python2 COMPONENTS Interpreter Development)
-if (Python2_FOUND)
- include_directories(${Python2_INCLUDE_DIRS})
-else ()
- message(FATAL_ERROR "python2 not found")
-endif(Python2_FOUND)
-
-file(GLOB SRC_TARBALL ${CMAKE_CURRENT_SOURCE_DIR}/../ext/PyGreSQL-*.tar.gz)
-if (NOT SRC_TARBALL)
- message(FATAL_ERROR "PyGreSQL source tarball not found, run git submodule update --init --recursive")
-endif()
-
-execute_process(COMMAND tar -xf ${SRC_TARBALL} --strip-components=1
- WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
-
-file(READ pgmodule.c PGMODULE_LINES)
-file(WRITE pgmodule.c "__declspec(dllexport) void init_pg(void);\n")
-file(APPEND pgmodule.c "${PGMODULE_LINES}")
-
-add_definitions("/D FRONTEND")
-
-set (CPPFLAGS "/MP /wd4996 /wd4018 /wd4090 /wd4102 /wd4244 /wd4267 /wd4273 /wd4715")
-add_definitions("${CPPFLAGS}")
-
-file(WRITE "${CMAKE_CURRENT_SOURCE_DIR}/__init__.py" "")
-set(GPDB_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../../../)
-include_directories(${GPDB_SRC_DIR}/src/include
- ${GPDB_SRC_DIR}/src/interfaces/libpq
- ${GPDB_SRC_DIR}/src/include/port
- ${GPDB_SRC_DIR}/src/include/port/win32
- ${GPDB_SRC_DIR}/src/include/port/win32_msvc
- ${GPDB_SRC_DIR}/src/port
- ${Python2_INCLUDE_DIRS})
-link_directories(${CMAKE_PREFIX_PATH}/lib)
-find_library(LIBPQ NAMES libpq HINTS ${CMAKE_INSTALL_PREFIX}/LIB)
-find_library(LIBPGPORT NAMES libpgport HINTS ${CMAKE_INSTALL_PREFIX}/LIB)
-find_library(LIBPGCOMMON NAMES libpgcommon HINTS ${CMAKE_INSTALL_PREFIX}/LIB)
-
-add_library (pygresql SHARED pgmodule.c)
-target_link_libraries(pygresql ${LIBPQ} ${LIBPGPORT} ${LIBPGCOMMON} ws2_32 secur32 ${Python2_LIBRARIES})
-
-set_target_properties(pygresql PROPERTIES OUTPUT_NAME "_pg")
-set_target_properties(pygresql PROPERTIES SUFFIX ".pyd")
-install(TARGETS pygresql DESTINATION lib/python)
-install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/pg.py DESTINATION lib/python)
-install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/pgdb.py DESTINATION lib/python)
-install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/__init__.py DESTINATION lib/python)
\ No newline at end of file
diff --git a/gpMgmt/bin/pythonSrc/ext/PyGreSQL-5.2.tar.gz b/gpMgmt/bin/pythonSrc/ext/PyGreSQL-5.2.tar.gz
deleted file mode 100644
index 9c39e0acc34..00000000000
Binary files a/gpMgmt/bin/pythonSrc/ext/PyGreSQL-5.2.tar.gz and /dev/null differ
diff --git a/gpMgmt/bin/pythonSrc/ext/PyYAML-5.3.1.tar.gz b/gpMgmt/bin/pythonSrc/ext/PyYAML-5.3.1.tar.gz
deleted file mode 100644
index 915d67b38f2..00000000000
Binary files a/gpMgmt/bin/pythonSrc/ext/PyYAML-5.3.1.tar.gz and /dev/null differ
diff --git a/gpMgmt/bin/pythonSrc/ext/mock-1.0.1.tar.gz b/gpMgmt/bin/pythonSrc/ext/mock-1.0.1.tar.gz
deleted file mode 100644
index 4fdea77c71c..00000000000
Binary files a/gpMgmt/bin/pythonSrc/ext/mock-1.0.1.tar.gz and /dev/null differ
diff --git a/gpMgmt/bin/pythonSrc/ext/psutil-5.7.0.tar.gz b/gpMgmt/bin/pythonSrc/ext/psutil-5.7.0.tar.gz
deleted file mode 100644
index e8b1d804420..00000000000
Binary files a/gpMgmt/bin/pythonSrc/ext/psutil-5.7.0.tar.gz and /dev/null differ
diff --git a/gpMgmt/sbin/gpsegstop.py b/gpMgmt/sbin/gpsegstop.py
index ab369f63473..9f13c10883b 100755
--- a/gpMgmt/sbin/gpsegstop.py
+++ b/gpMgmt/sbin/gpsegstop.py
@@ -23,7 +23,6 @@
from gppylib.commands import gp
from gppylib.commands.gp import SEGMENT_STOP_TIMEOUT_DEFAULT, DEFAULT_SEGHOST_NUM_WORKERS
from gppylib.commands import pg
-from gppylib.db import dbconn
from gppylib import pgconf
from gppylib.commands.gp import is_pid_postmaster
diff --git a/gpMgmt/test/behave/mgmt_utils/analyzedb.feature b/gpMgmt/test/behave/mgmt_utils/analyzedb.feature
index 5809c7745a9..40b8d87623b 100644
--- a/gpMgmt/test/behave/mgmt_utils/analyzedb.feature
+++ b/gpMgmt/test/behave/mgmt_utils/analyzedb.feature
@@ -1781,12 +1781,3 @@ Feature: Incrementally analyze the database
And the user executes "CREATE TEMP TABLE spiegelungssätze (c1 int) DISTRIBUTED BY (c1)" with named connection "default"
When the user runs "analyzedb -a -d special_encoding_db"
Then analyzedb should return a return code of 0
-
- Scenario: analyzedb finds materialized views
- Given a materialized view "public.mv_test_view" exists on table "pg_class"
- And the user runs "analyzedb -a -d incr_analyze"
- Then analyzedb should print "-public.mv_test_view" to stdout
- And the user runs "analyzedb -a -s public -d incr_analyze"
- Then analyzedb should print "-public.mv_test_view" to stdout
- And the user runs "analyzedb -a -t public.mv_test_view -d incr_analyze"
- Then analyzedb should print "-public.mv_test_view" to stdout
diff --git a/gpMgmt/test/behave/mgmt_utils/environment.py b/gpMgmt/test/behave/mgmt_utils/environment.py
index d79f9c18acc..c1dcca351ae 100644
--- a/gpMgmt/test/behave/mgmt_utils/environment.py
+++ b/gpMgmt/test/behave/mgmt_utils/environment.py
@@ -62,12 +62,20 @@ def before_feature(context, feature):
dbconn.execSQL(context.conn, 'create table t1(a integer, b integer)')
dbconn.execSQL(context.conn, 'create table t2(c integer, d integer)')
dbconn.execSQL(context.conn, 'create table t3(e integer, f integer)')
+ dbconn.execSQL(context.conn, 'create table spiegelungssätze(col_ä integer, 列2 integer)')
dbconn.execSQL(context.conn, 'create view v1 as select a, b from t1, t3 where t1.a=t3.e')
dbconn.execSQL(context.conn, 'create view v2 as select c, d from t2, t3 where t2.c=t3.f')
dbconn.execSQL(context.conn, 'create view v3 as select a, d from v1, v2 where v1.a=v2.c')
dbconn.execSQL(context.conn, 'insert into t1 values(1, 2)')
dbconn.execSQL(context.conn, 'insert into t2 values(1, 3)')
dbconn.execSQL(context.conn, 'insert into t3 values(1, 4)')
+ dbconn.execSQL(context.conn, 'insert into spiegelungssätze values(1, 5)')
+ # minirepro tests require statistical data about the contents of the database
+ # we should execute 'ANALYZE' to fill the pg_statistic catalog table.
+ dbconn.execSQL(context.conn, 'analyze t1')
+ dbconn.execSQL(context.conn, 'analyze t2')
+ dbconn.execSQL(context.conn, 'analyze t3')
+ dbconn.execSQL(context.conn, 'analyze spiegelungssätze')
context.conn.commit()
if 'gppkg' in feature.tags:
diff --git a/gpMgmt/test/behave/mgmt_utils/gpcheckcat.feature b/gpMgmt/test/behave/mgmt_utils/gpcheckcat.feature
index d9b91838909..84cdcba4b03 100644
--- a/gpMgmt/test/behave/mgmt_utils/gpcheckcat.feature
+++ b/gpMgmt/test/behave/mgmt_utils/gpcheckcat.feature
@@ -9,6 +9,9 @@ Feature: gpcheckcat tests
Given database "all_good" is dropped and recreated
Then the user runs "gpcheckcat -A"
Then gpcheckcat should return a return code of 0
+ When the user runs "gpcheckcat -C pg_class"
+ Then gpcheckcat should return a return code of 0
+ And gpcheckcat should not print "Execution error:" to stdout
And the user runs "dropdb all_good"
Scenario: gpcheckcat should drop leaked schemas
diff --git a/gpMgmt/test/behave/mgmt_utils/minirepro.feature b/gpMgmt/test/behave/mgmt_utils/minirepro.feature
index 15e9c666a51..756803aafac 100644
--- a/gpMgmt/test/behave/mgmt_utils/minirepro.feature
+++ b/gpMgmt/test/behave/mgmt_utils/minirepro.feature
@@ -263,3 +263,15 @@ Feature: Dump minimum database objects that is related to the query
And the output file "/tmp/out.sql" should contain "Table: t3, Attribute: f"
And the output file "/tmp/out.sql" should be loaded to database "minidb_tmp" without error
And the file "/tmp/in.sql" should be executed in database "minidb_tmp" without error
+
+ @minirepro_core
+ Scenario: Dump database objects related with select query on table with specially encoded charaters
+ Given the file "/tmp/in.sql" exists and contains "select * from spiegelungssätze;"
+ And the file "/tmp/out.sql" does not exist
+ When the user runs "minirepro minireprodb -q /tmp/in.sql -f /tmp/out.sql"
+ Then the output file "/tmp/out.sql" should exist
+ And the output file "/tmp/out.sql" should not contain "CREATE TABLE public.spiegelungssätze"
+ And the output file "/tmp/out.sql" should contain "Table: spiegelungssätze, Attribute: col_ä"
+ And the output file "/tmp/out.sql" should contain "Table: spiegelungssätze, Attribute: 列2"
+ And the output file "/tmp/out.sql" should be loaded to database "minidb_tmp" without error
+ And the file "/tmp/in.sql" should be executed in database "minidb_tmp" without error
diff --git a/gpMgmt/test/behave/mgmt_utils/steps/mgmt_utils.py b/gpMgmt/test/behave/mgmt_utils/steps/mgmt_utils.py
index dfcc137fe71..0d025ea7044 100644
--- a/gpMgmt/test/behave/mgmt_utils/steps/mgmt_utils.py
+++ b/gpMgmt/test/behave/mgmt_utils/steps/mgmt_utils.py
@@ -19,7 +19,8 @@
from datetime import datetime, timedelta
from os import path
from contextlib import closing
-
+import psycopg2
+from psycopg2 import extras
from gppylib.gparray import GpArray, ROLE_PRIMARY, ROLE_MIRROR
from gppylib.commands.gp import SegmentStart, GpStandbyStart, CoordinatorStop
from gppylib.commands import gp
@@ -3159,31 +3160,41 @@ def impl(context, table_name):
dbname = 'gptest'
conn = dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)
context.long_run_select_only_conn = conn
+ cursor = conn.cursor()
+ context.long_run_select_only_cursor = cursor
+
+ # Start a readonly transaction.
+ cursor.execute("BEGIN")
query = """SELECT gp_segment_id, * from %s order by 1, 2""" % table_name
- data_result = dbconn.query(conn, query).fetchall()
+ cursor.execute(query)
+ data_result = cursor.fetchall()
+
context.long_run_select_only_data_result = data_result
query = """SELECT txid_current()"""
- xid = dbconn.querySingleton(conn, query)
+ cursor.execute(query)
+ xid = cursor.fetchone()[0]
context.long_run_select_only_xid = xid
@then('verify that long-run read-only transaction still exists on {table_name}')
def impl(context, table_name):
dbname = 'gptest'
- conn = context.long_run_select_only_conn
+ cursor = context.long_run_select_only_cursor
query = """SELECT gp_segment_id, * from %s order by 1, 2""" % table_name
- data_result = dbconn.query(conn, query).fetchall()
+ cursor.execute(query)
+ data_result = cursor.fetchall()
query = """SELECT txid_current()"""
- xid = dbconn.querySingleton(conn, query)
+ cursor.execute(query)
+ xid = cursor.fetchone()[0]
if (xid != context.long_run_select_only_xid or
data_result != context.long_run_select_only_data_result):
error_str = "Incorrect xid or select result of long run read-only transaction: \
- xid(before %s, after %), result(before %s, after %s)"
- raise Exception(error_str % (context.long_run_select_only_xid, xid, context.long_run_select_only_data_result, data_result))
+ xid(before {}, after {}), result(before {}, after {})"
+ raise Exception(error_str.format(context.long_run_select_only_xid, xid, context.long_run_select_only_data_result, data_result))
@given('a long-run transaction starts')
def impl(context):
@@ -3191,30 +3202,36 @@ def impl(context):
conn = dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)
context.long_run_conn = conn
+ cursor = conn.cursor()
+ context.long_run_cursor = cursor
+
+ cursor.execute("BEGIN")
+
query = """SELECT txid_current()"""
- xid = dbconn.querySingleton(conn, query)
+ cursor.execute(query)
+ xid = cursor.fetchone()[0]
context.long_run_xid = xid
@then('verify that long-run transaction aborted for changing the catalog by creating table {table_name}')
def impl(context, table_name):
- dbname = 'gptest'
- conn = context.long_run_conn
+ cursor = context.long_run_cursor
query = """SELECT txid_current()"""
- xid = dbconn.querySingleton(conn, query)
+ cursor.execute(query)
+ xid = cursor.fetchone()[0]
if context.long_run_xid != xid:
raise Exception("Incorrect xid of long run transaction: before %s, after %s" %
(context.long_run_xid, xid));
query = """CREATE TABLE %s (a INT)""" % table_name
try:
- data_result = dbconn.query(conn, query)
- except Exception as msg:
- key_msg = "FATAL: cluster is expanded"
- if key_msg not in msg.__str__():
- raise Exception("transaction not abort correctly, errmsg:%s" % msg)
+ cursor.execute(query)
+ except Exception as e:
+ key_msg = "cluster is expanded from"
+ if key_msg not in str(e):
+ raise Exception("transaction not abort correctly, errmsg:%s" % str(e))
else:
- raise Exception("transaction not abort, result:%s" % data_result)
+ raise Exception("transaction not abort")
@when('verify that the cluster has {num_of_segments} new segments')
@then('verify that the cluster has {num_of_segments} new segments')
@@ -3832,7 +3849,7 @@ def impl(context):
@then('the database locales are saved')
def impl(context):
- with closing(dbconn.connect(dbconn.DbURL())) as conn:
+ with closing(dbconn.connect(dbconn.DbURL(), cursorFactory=psycopg2.extras.NamedTupleCursor)) as conn:
rows = dbconn.query(conn, "SELECT name, setting FROM pg_settings WHERE name LIKE 'lc_%'").fetchall()
context.database_locales = {row.name: row.setting for row in rows}
diff --git a/gpMgmt/test/behave/mgmt_utils/steps/mirrors_mgmt_utils.py b/gpMgmt/test/behave/mgmt_utils/steps/mirrors_mgmt_utils.py
index 47d4a400056..1b8c1caa7f0 100644
--- a/gpMgmt/test/behave/mgmt_utils/steps/mirrors_mgmt_utils.py
+++ b/gpMgmt/test/behave/mgmt_utils/steps/mirrors_mgmt_utils.py
@@ -479,7 +479,7 @@ def make_temp_dir_on_remote(context, hostname, tmp_base_dir_remote, mode='700'):
raise Exception("tmp_base_dir cannot be empty")
tempfile_cmd = Command(name="Create temp directory on remote host",
- cmdStr=""" python -c "import tempfile; t=tempfile.mkdtemp(dir='{}');print(t)" """
+ cmdStr=""" python3 -c "import tempfile; t=tempfile.mkdtemp(dir='{}');print(t)" """
.format(tmp_base_dir_remote),
remoteHost=hostname, ctxt=REMOTE)
tempfile_cmd.run(validateAfter=True)
diff --git a/gpMgmt/test/behave/mgmt_utils/steps/replication_slots_utils.py b/gpMgmt/test/behave/mgmt_utils/steps/replication_slots_utils.py
index aa9b4a011c1..1c20e690d53 100644
--- a/gpMgmt/test/behave/mgmt_utils/steps/replication_slots_utils.py
+++ b/gpMgmt/test/behave/mgmt_utils/steps/replication_slots_utils.py
@@ -28,7 +28,7 @@ def create_cluster(context, with_mirrors=True):
cd ../gpAux/gpdemo; \
export DEMO_PORT_BASE={port_base} && \
export NUM_PRIMARY_MIRROR_PAIRS={num_primary_mirror_pairs} && \
- export WITH_MIRRORS={with_mirrors} && \A
+ export WITH_MIRRORS={with_mirrors} && \
./demo_cluster.sh -d && ./demo_cluster.sh -c && \
./demo_cluster.sh
""".format(port_base=os.getenv('PORT_BASE', 15432),
@@ -108,18 +108,17 @@ def step_impl(context):
def step_impl(context):
result_cursor = query_sql(
"postgres",
- "select pg_get_replication_slots() from gp_dist_random('gp_id') order by gp_segment_id"
+ "select (pg_get_replication_slots()).* from gp_dist_random('gp_id') order by gp_segment_id"
)
if result_cursor.rowcount != context.current_cluster_size:
raise Exception("expected all %d primaries to have replication slots, only %d have slots" % (context.current_cluster_size, results.rowcount))
- for content_id, result in enumerate(result_cursor.fetchall()):
- pg_rep_slot = result[0]
- if (pg_rep_slot[0], pg_rep_slot[2], pg_rep_slot[4]) != ('internal_wal_replication_slot','physical','f') :
+ for content_id, pg_rep_slot in enumerate(result_cursor.fetchall()):
+ if (pg_rep_slot[0], pg_rep_slot[2], pg_rep_slot[4]) != ('internal_wal_replication_slot', 'physical', False) :
raise Exception(
"expected replication slot to be active for content id %d, got %s" %
- (content_id, result[0])
+ (content_id, pg_rep_slot)
)
@then('the mirrors should not have replication slots')
diff --git a/gpMgmt/test/behave_utils/gpexpand_dml.py b/gpMgmt/test/behave_utils/gpexpand_dml.py
index 8658c7e58ff..55f0e37b34d 100755
--- a/gpMgmt/test/behave_utils/gpexpand_dml.py
+++ b/gpMgmt/test/behave_utils/gpexpand_dml.py
@@ -31,9 +31,15 @@ def __init__(self, dbname, dmltype):
def run(self):
conn = dbconn.connect(dbconn.DbURL(dbname=self.dbname), unsetSearchPath=False)
+ with conn.cursor() as cur:
+ cur.execute("BEGIN")
+
self.loop(conn)
self.verify(conn)
+ with conn.cursor() as cur:
+ cur.execute("COMMIT")
+
conn.commit()
conn.close()
@@ -109,13 +115,15 @@ def loop_step(self):
def verify(self, conn):
sql = '''
select c1 from {tablename} order by c1;
- '''.format(tablename=self.tablename, counter=self.counter)
- results = dbconn.query(conn, sql).fetchall()
+ '''.format(tablename=self.tablename)
+ with conn.cursor() as cur:
+ cur.execute(sql)
+ results = cur.fetchall()
- for i in range(0, self.counter):
- if i != int(results[i][0]):
- self.report_incorrect_result()
- return
+ for i in range(self.counter):
+ if i != int(results[i][0]):
+ self.report_incorrect_result()
+ return
class TestUpdate(TestDML):
datasize = 1000
@@ -135,13 +143,15 @@ def loop_step(self):
def verify(self, conn):
sql = '''
select c2 from {tablename} order by c1;
- '''.format(tablename=self.tablename, counter=self.counter)
- results = dbconn.query(conn, sql).fetchall()
+ '''.format(tablename=self.tablename)
+ with conn.cursor() as cur:
+ cur.execute(sql)
+ results = cur.fetchall()
- for i in range(0, self.datasize):
- if i + self.counter - 1 != int(results[i][0]):
- self.report_incorrect_result()
- return
+ for i in range(self.datasize):
+ if i + self.counter - 1 != int(results[i][0]):
+ self.report_incorrect_result()
+ return
class TestDelete(TestDML):
datasize = 100000
@@ -161,13 +171,15 @@ def loop_step(self):
def verify(self, conn):
sql = '''
select c1 from {tablename} order by c1;
- '''.format(tablename=self.tablename, counter=self.counter)
- results = dbconn.query(conn, sql).fetchall()
-
- for i in range(self.counter, self.datasize):
- if i != int(results[i - self.counter][0]):
- self.report_incorrect_result()
- return
+ '''.format(tablename=self.tablename)
+ with conn.cursor() as cur:
+ cur.execute(sql)
+ results = cur.fetchall()
+
+ for i in range(self.counter, self.datasize):
+ if i != int(results[i - self.counter][0]):
+ self.report_incorrect_result()
+ return
# for test only
if __name__ == '__main__':
diff --git a/gpMgmt/test/behave_utils/utils.py b/gpMgmt/test/behave_utils/utils.py
index a61febcd9f7..06c8c2209a6 100644
--- a/gpMgmt/test/behave_utils/utils.py
+++ b/gpMgmt/test/behave_utils/utils.py
@@ -11,15 +11,13 @@
import subprocess
import difflib
-import pg
-
from contextlib import closing
from datetime import datetime
from gppylib.commands.base import Command, ExecutionError, REMOTE
from gppylib.commands.gp import chk_local_db_running, get_coordinatordatadir
from gppylib.db import dbconn
from gppylib.gparray import GpArray, MODE_SYNCHRONIZED
-
+from gppylib.utils import escape_string
PARTITION_START_DATE = '2010-01-01'
PARTITION_END_DATE = '2013-01-01'
@@ -317,14 +315,14 @@ def check_table_exists(context, dbname, table_name, table_type=None, host=None,
FROM pg_class c, pg_namespace n
WHERE c.relname = '%s' AND n.nspname = '%s' AND c.relnamespace = n.oid;
"""
- SQL = SQL_format % (escape_string(tablename, conn=conn), escape_string(schemaname, conn=conn))
+ SQL = SQL_format % (escape_string(tablename), escape_string(schemaname))
else:
SQL_format = """
SELECT oid, relkind, relam, reloptions \
FROM pg_class \
WHERE relname = E'%s';\
"""
- SQL = SQL_format % (escape_string(table_name, conn=conn))
+ SQL = SQL_format % (escape_string(table_name))
table_row = None
try:
@@ -773,11 +771,6 @@ def replace_special_char_env(str):
str = str.replace("$%s" % var, os.environ[var])
return str
-
-def escape_string(string, conn):
- return pg.DB(db=conn).escape_string(string)
-
-
def wait_for_unblocked_transactions(context, num_retries=150):
"""
Tries once a second to successfully commit a transaction to the database
diff --git a/gpMgmt/test/coveragerc b/gpMgmt/test/coveragerc
index 511c2626fbe..566c9c83169 100644
--- a/gpMgmt/test/coveragerc
+++ b/gpMgmt/test/coveragerc
@@ -8,7 +8,4 @@ branch = True
omit =
*/site-packages/*
*/bin/behave
- */python/psutil/*
- */python/pygresql/*
- */python/yaml/*
*/python/lockfile/*
\ No newline at end of file
diff --git a/gpcontrib/gp_replica_check/gp_replica_check.py b/gpcontrib/gp_replica_check/gp_replica_check.py
index 1627b859c1e..d00b66dc34a 100755
--- a/gpcontrib/gp_replica_check/gp_replica_check.py
+++ b/gpcontrib/gp_replica_check/gp_replica_check.py
@@ -40,8 +40,7 @@
import time
import os
from collections import defaultdict
-from pg import DB
-
+import psycopg2
def run_sql(sql, host=None, port=None,
dbname="postgres", is_query=True,
@@ -51,10 +50,15 @@ def run_sql(sql, host=None, port=None,
if port is None:
port = int(os.getenv("PGPORT"))
opt = "-c gp_role=utility" if is_utility else None
- with DB(dbname=dbname, host=host, port=port, opt=opt) as db:
- r = db.query(sql)
- if is_query:
- return r.getresult()
+ try:
+ with psycopg2.connect(dbname=dbname, host=host, port=port, options=opt) as conn:
+ with conn.cursor() as cur:
+ cur.execute(sql)
+ if is_query:
+ resultList = cur.fetchall()
+ return resultList
+ except Exception as e:
+ print('Exception: %s while running query %s dbname = %s' % (e, sql, dbname))
class ReplicaCheck(threading.Thread):
diff --git a/gpcontrib/gpcloud/regress/input/5_01_normal_http_param.source b/gpcontrib/gpcloud/regress/input/5_01_normal_http_param.source
index 6b18334c0d9..66f3564a100 100644
--- a/gpcontrib/gpcloud/regress/input/5_01_normal_http_param.source
+++ b/gpcontrib/gpcloud/regress/input/5_01_normal_http_param.source
@@ -1,6 +1,6 @@
SET client_min_messages TO 'warning';
CREATE EXTERNAL WEB TABLE dummyHttpServerstart (x text)
-execute E'((python @abs_srcdir@/dummyHTTPServer.py -p 8553 -f @config_file@ -t Parameter_Server >/dev/null 2>&1 &);for i in `seq 1 30`; do curl http://127.0.0.1:8553 >/dev/null 2>&1 && break; sleep 1; done; echo "starting...") '
+execute E'((python3 @abs_srcdir@/dummyHTTPServer.py -p 8553 -f @config_file@ -t Parameter_Server >/dev/null 2>&1 &);for i in `seq 1 30`; do curl http://127.0.0.1:8553 >/dev/null 2>&1 && break; sleep 1; done; echo "starting...") '
on SEGMENT 0
FORMAT 'text' (delimiter '|');
diff --git a/gpcontrib/gpcloud/regress/output/5_01_normal_http_param.source b/gpcontrib/gpcloud/regress/output/5_01_normal_http_param.source
index dbe5349e385..68b4d131d52 100644
--- a/gpcontrib/gpcloud/regress/output/5_01_normal_http_param.source
+++ b/gpcontrib/gpcloud/regress/output/5_01_normal_http_param.source
@@ -1,6 +1,6 @@
SET client_min_messages TO 'warning';
CREATE EXTERNAL WEB TABLE dummyHttpServerstart (x text)
-execute E'((python @abs_srcdir@/dummyHTTPServer.py -p 8553 -f @config_file@ -t Parameter_Server >/dev/null 2>&1 &);for i in `seq 1 30`; do curl http://127.0.0.1:8553 >/dev/null 2>&1 && break; sleep 1; done; echo "starting...") '
+execute E'((python3 @abs_srcdir@/dummyHTTPServer.py -p 8553 -f @config_file@ -t Parameter_Server >/dev/null 2>&1 &);for i in `seq 1 30`; do curl http://127.0.0.1:8553 >/dev/null 2>&1 && break; sleep 1; done; echo "starting...") '
on SEGMENT 0
FORMAT 'text' (delimiter '|');
CREATE EXTERNAL WEB TABLE dummyHttpServerstop (x text)
diff --git a/python-dependencies.txt b/python-dependencies.txt
index 9e63afef528..2fd60bd41b1 100644
--- a/python-dependencies.txt
+++ b/python-dependencies.txt
@@ -1,3 +1,3 @@
psutil==5.7.0
-pygresql==5.2
pyyaml==5.3.1
+psycopg2==2.9.6
diff --git a/src/Makefile.global.in b/src/Makefile.global.in
index 8a195a39856..22c74e7a856 100644
--- a/src/Makefile.global.in
+++ b/src/Makefile.global.in
@@ -192,7 +192,6 @@ bitcodedir = $(pkglibdir)/bitcode
with_icu = @with_icu@
with_perl = @with_perl@
with_python = @with_python@
-with_pythonsrc_ext = @with_pythonsrc_ext@
with_tcl = @with_tcl@
with_ssl = @with_ssl@
with_readline = @with_readline@
diff --git a/src/test/gpdb_pitr/expected/gpdb_pitr_setup.out b/src/test/gpdb_pitr/expected/gpdb_pitr_setup.out
index 48df10e656d..52fb6f56c3f 100644
--- a/src/test/gpdb_pitr/expected/gpdb_pitr_setup.out
+++ b/src/test/gpdb_pitr/expected/gpdb_pitr_setup.out
@@ -1,26 +1,26 @@
-- Create some tables and load some data
-- We do 1 row for gpdb_one_phase_commit to bypass autostats later
CREATE TABLE gpdb_two_phase_commit_before_acquire_share_lock(num int);
-CREATE
+CREATE TABLE
CREATE TABLE gpdb_two_phase_commit_after_acquire_share_lock(num int);
-CREATE
+CREATE TABLE
CREATE TABLE gpdb_one_phase_commit(num int);
-CREATE
+CREATE TABLE
CREATE TABLE gpdb_two_phase_commit_after_restore_point(num int);
-CREATE
+CREATE TABLE
INSERT INTO gpdb_two_phase_commit_before_acquire_share_lock SELECT generate_series(1, 10);
-INSERT 10
+INSERT 0 10
INSERT INTO gpdb_two_phase_commit_after_acquire_share_lock SELECT generate_series(1, 10);
-INSERT 10
+INSERT 0 10
INSERT INTO gpdb_one_phase_commit VALUES (1);
-INSERT 1
+INSERT 0 1
-- Inject suspend faults that will be used later to test different
-- distributed commit scenarios, and to also test the commit blocking
-- requirement which should only block twophase commits during
-- distributed commit broadcast when a restore point is being created.
1: CREATE EXTENSION IF NOT EXISTS gp_inject_fault;
-CREATE
+CREATE EXTENSION
1: SELECT gp_inject_fault('dtm_broadcast_prepare', 'suspend', 1);
gp_inject_fault
-----------------
@@ -50,7 +50,7 @@ DELETE 10
4: BEGIN;
BEGIN
4: INSERT INTO gpdb_two_phase_commit_after_restore_point SELECT generate_series(1, 10);
-INSERT 10
+INSERT 0 10
4&: SELECT gp_segment_id, count(*) FROM gp_create_restore_point('test_restore_point') GROUP BY gp_segment_id ORDER BY gp_segment_id;
1: SELECT gp_wait_until_triggered_fault('gp_create_restore_point_acquired_lock', 1, 1);
gp_wait_until_triggered_fault
@@ -72,7 +72,7 @@ INSERT 10
3&: COMMIT;
-- One-phase commit query should not block.
1: INSERT INTO gpdb_one_phase_commit VALUES (2);
-INSERT 1
+INSERT 0 1
-- Read-only query should not block.
1: SELECT * FROM gpdb_two_phase_commit_before_acquire_share_lock;
num
@@ -154,7 +154,7 @@ SELECT * FROM gpdb_two_phase_commit_after_restore_point ORDER BY num;
-- must do this in a plpgsql cursor because of a known limitation with
-- CTAS on an EXECUTE ON COORDINATOR function.
CREATE TEMP TABLE switch_walfile_names(content_id smallint, walfilename text);
-CREATE
+CREATE TABLE
CREATE OR REPLACE FUNCTION populate_switch_walfile_names() RETURNS void AS $$ DECLARE curs CURSOR FOR SELECT * FROM gp_switch_wal(); /*in func*/ DECLARE rec record; /*in func*/ BEGIN /*in func*/ OPEN curs; /*in func*/ LOOP FETCH curs INTO rec; /*in func*/ EXIT WHEN NOT FOUND; /*in func*/
INSERT INTO switch_walfile_names VALUES (rec.gp_segment_id, rec.pg_walfile_name); /*in func*/ END LOOP; /*in func*/ END $$ LANGUAGE plpgsql; /*in func*/ SELECT populate_switch_walfile_names();
populate_switch_walfile_names
@@ -166,7 +166,7 @@ INSERT INTO switch_walfile_names VALUES (rec.gp_segment_id, rec.pg_walfile_name)
-- This function loops until the archival is complete. It times out after
-- approximately 10mins.
CREATE OR REPLACE FUNCTION check_archival() RETURNS BOOLEAN AS $$ DECLARE archived BOOLEAN; /*in func*/ DECLARE archived_count INTEGER; /*in func*/ BEGIN /*in func*/ FOR i in 1..3000 LOOP SELECT bool_and(seg_archived), count(*) FROM (SELECT last_archived_wal = l.walfilename AS seg_archived FROM switch_walfile_names l INNER JOIN gp_stat_archiver a ON l.content_id = a.gp_segment_id) s INTO archived, archived_count; /*in func*/ IF archived AND archived_count = 4 THEN RETURN archived; /*in func*/ END IF; /*in func*/ PERFORM pg_sleep(0.2); /*in func*/ END LOOP; /*in func*/ END $$ LANGUAGE plpgsql;
-CREATE
+CREATE FUNCTION
SELECT check_archival();
check_archival
diff --git a/src/test/gpdb_pitr/expected/test_gp_switch_wal.out b/src/test/gpdb_pitr/expected/test_gp_switch_wal.out
index 6eb3a2d6a0a..8f81bcbf8e4 100644
--- a/src/test/gpdb_pitr/expected/test_gp_switch_wal.out
+++ b/src/test/gpdb_pitr/expected/test_gp_switch_wal.out
@@ -71,3 +71,14 @@ SELECT gp_switch_wal() FROM gp_dist_random('gp_id');
ERROR: function with EXECUTE ON restrictions cannot be used in the SELECT list of a query with FROM
CREATE TABLE this_ctas_should_fail AS SELECT gp_segment_id AS contentid, pg_switch_wal, pg_walfile_name FROM gp_switch_wal();
ERROR: cannot use gp_switch_wal() when not in QD mode (xlogfuncs_gp.c:LINE_NUM)
+
+CREATE ROLE switch_wal_error_role;
+CREATE ROLE
+SET ROLE TO switch_wal_error_role;
+SET
+SELECT * FROM gp_switch_wal();
+ERROR: permission denied for function gp_switch_wal
+RESET ROLE;
+RESET
+DROP ROLE switch_wal_error_role;
+DROP ROLE
diff --git a/src/test/isolation2/expected/add_column_after_vacuum_skip_drop_column.out b/src/test/isolation2/expected/add_column_after_vacuum_skip_drop_column.out
index 62468f7904a..55cf5abd109 100644
--- a/src/test/isolation2/expected/add_column_after_vacuum_skip_drop_column.out
+++ b/src/test/isolation2/expected/add_column_after_vacuum_skip_drop_column.out
@@ -4,9 +4,9 @@
-- the test expected result were adjusted accordingly.
--
CREATE TABLE aoco_add_column_after_vacuum_skip_drop (a INT, b INT) WITH (appendonly=true, orientation=column);
-CREATE
+CREATE TABLE
INSERT INTO aoco_add_column_after_vacuum_skip_drop SELECT i as a, i as b FROM generate_series(1, 10) AS i;
-INSERT 10
+INSERT 0 10
DELETE FROM aoco_add_column_after_vacuum_skip_drop;
DELETE 10
@@ -30,7 +30,7 @@ BEGIN
2: VACUUM aoco_add_column_after_vacuum_skip_drop;
VACUUM
1: END;
-END
+COMMIT
-- We should see an aocsseg in state 2 (AOSEG_STATE_AWAITING_DROP)
0U: SELECT segno, column_num, state FROM gp_toolkit.__gp_aocsseg('aoco_add_column_after_vacuum_skip_drop');
@@ -44,7 +44,7 @@ END
-- The ADD COLUMN should clean up aocssegs in state 2 (AOSEG_STATE_AWAITING_DROP)
1: ALTER TABLE aoco_add_column_after_vacuum_skip_drop ADD COLUMN c INT DEFAULT 0;
-ALTER
+ALTER TABLE
0U: SELECT segno, column_num, state FROM gp_toolkit.__gp_aocsseg('aoco_add_column_after_vacuum_skip_drop');
segno | column_num | state
-------+------------+-------
@@ -58,7 +58,7 @@ ALTER
-- Check if insert goes into segno 1 instead of segno 2
1: INSERT INTO aoco_add_column_after_vacuum_skip_drop SELECT i as a, i as b, i as c FROM generate_series(1, 100) AS i;
-INSERT 100
+INSERT 0 100
0U: SELECT segno, tupcount > 0, state FROM gp_toolkit.__gp_aocsseg('aoco_add_column_after_vacuum_skip_drop');
segno | ?column? | state
-------+----------+-------
diff --git a/src/test/isolation2/expected/alter_blocks_for_update_and_viceversa.out b/src/test/isolation2/expected/alter_blocks_for_update_and_viceversa.out
index 18ef845b750..7f9e1a7004a 100644
--- a/src/test/isolation2/expected/alter_blocks_for_update_and_viceversa.out
+++ b/src/test/isolation2/expected/alter_blocks_for_update_and_viceversa.out
@@ -1,10 +1,10 @@
-- setup
1: drop table if exists alter_block;
-DROP
+DROP TABLE
1: create table alter_block(a int, b int) distributed by (a);
-CREATE
+CREATE TABLE
1: insert into alter_block select 1, 1;
-INSERT 1
+INSERT 0 1
-- Validate UPDATE blocks the Alter
2: BEGIN;
BEGIN
@@ -20,12 +20,12 @@ UPDATE 1
2: COMMIT;
COMMIT
1<: <... completed>
-ALTER
+ALTER TABLE
-- Now validate ALTER blocks the UPDATE
2: BEGIN;
BEGIN
2: ALTER TABLE alter_block SET DISTRIBUTED BY (a);
-ALTER
+ALTER TABLE
1&: UPDATE alter_block SET b = b + 1;
2: SELECT wait_event_type FROM pg_stat_activity where query like 'UPDATE alter_block SET %';
wait_event_type
diff --git a/src/test/isolation2/expected/ao_blkdir.out b/src/test/isolation2/expected/ao_blkdir.out
index f202d8ec4da..e626105b911 100644
--- a/src/test/isolation2/expected/ao_blkdir.out
+++ b/src/test/isolation2/expected/ao_blkdir.out
@@ -7,12 +7,12 @@
--------------------------------------------------------------------------------
CREATE TABLE ao_blkdir_test(i int, j int) USING ao_row DISTRIBUTED BY (j);
-CREATE
+CREATE TABLE
CREATE INDEX ao_blkdir_test_idx ON ao_blkdir_test(i);
-CREATE
+CREATE INDEX
1: INSERT INTO ao_blkdir_test SELECT i, 2 FROM generate_series(1, 10) i;
-INSERT 10
+INSERT 0 10
-- There should be 1 block directory row with a single entry covering 10 rows
SELECT (gp_toolkit.__gp_aoblkdir('ao_blkdir_test')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0 ORDER BY 1,2,3,4,5;
tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count
@@ -21,7 +21,7 @@ SELECT (gp_toolkit.__gp_aoblkdir('ao_blkdir_test')).* FROM gp_dist_random('gp_id
(1 row)
1: INSERT INTO ao_blkdir_test SELECT i, 2 FROM generate_series(11, 30) i;
-INSERT 20
+INSERT 0 20
-- There should be 2 block directory entries in a new block directory row, and
-- the row from the previous INSERT should not be visible. The entry from the
-- first INSERT should remain unchanged.
@@ -35,11 +35,11 @@ SELECT (gp_toolkit.__gp_aoblkdir('ao_blkdir_test')).* FROM gp_dist_random('gp_id
1: BEGIN;
BEGIN
1: INSERT INTO ao_blkdir_test SELECT i, 2 FROM generate_series(31, 60) i;
-INSERT 30
+INSERT 0 30
2: BEGIN;
BEGIN
2: INSERT INTO ao_blkdir_test SELECT i, 2 FROM generate_series(71, 110) i;
-INSERT 40
+INSERT 0 40
1: COMMIT;
COMMIT
2: COMMIT;
@@ -57,14 +57,10 @@ SELECT (gp_toolkit.__gp_aoblkdir('ao_blkdir_test')).* FROM gp_dist_random('gp_id
(4 rows)
TRUNCATE ao_blkdir_test;
-TRUNCATE
-set gp_appendonly_insert_files = 0;
-SET
+TRUNCATE TABLE
-- Insert enough rows to overflow the first block directory minipage by 2.
INSERT INTO ao_blkdir_test SELECT i, 2 FROM generate_series(1, 292700) i;
-INSERT 292700
-reset gp_appendonly_insert_files;
-RESET
+INSERT 0 292700
-- There should be 2 block directory rows, one with 161 entries covering 292698
-- rows and the other with 1 entry covering the 2 overflow rows.
SELECT (gp_toolkit.__gp_aoblkdir('ao_blkdir_test')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0 ORDER BY 1,2,3,4,5;
@@ -236,9 +232,9 @@ SELECT (gp_toolkit.__gp_aoblkdir('ao_blkdir_test')).* FROM gp_dist_random('gp_id
-- Unique index white box tests
DROP TABLE ao_blkdir_test;
-DROP
+DROP TABLE
CREATE TABLE ao_blkdir_test(i int UNIQUE, j int) USING ao_row DISTRIBUTED BY (i);
-CREATE
+CREATE TABLE
SELECT gp_inject_fault('appendonly_insert', 'suspend', '', '', 'ao_blkdir_test', 1, 1, 0, dbid) FROM gp_segment_configuration WHERE role = 'p' AND content = 0;
gp_inject_fault
@@ -284,7 +280,7 @@ SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_confi
Success:
(1 row)
1<: <... completed>
-INSERT 1
+INSERT 0 1
-- The placeholder row is invisible to the INSERTing transaction. Since the
-- INSERT finished, there should be 1 visible blkdir row representing the INSERT.
@@ -317,19 +313,297 @@ COMMIT
(1 row)
DROP TABLE ao_blkdir_test;
-DROP
+DROP TABLE
+
+-- Test `tupcount` in pg_aoseg == sum of number of `row_count` across all
+-- aoblkdir entries for each segno. Test with commits, aborts and deletes.
+
+-- Case1: without VACUUM ANALYZE
+CREATE TABLE ao_blkdir_test_rowcount(i int, j int) USING ao_row DISTRIBUTED BY (j);
+CREATE TABLE
+1: BEGIN;
+BEGIN
+2: BEGIN;
+BEGIN
+3: BEGIN;
+BEGIN
+4: BEGIN;
+BEGIN
+1: INSERT INTO ao_blkdir_test_rowcount SELECT i, 2 FROM generate_series(1, 10) i;
+INSERT 0 10
+2: INSERT INTO ao_blkdir_test_rowcount SELECT i, 3 FROM generate_series(1, 20) i;
+INSERT 0 20
+3: INSERT INTO ao_blkdir_test_rowcount SELECT i, 4 FROM generate_series(1, 30) i;
+INSERT 0 30
+3: ABORT;
+ROLLBACK
+3: BEGIN;
+BEGIN
+3: INSERT INTO ao_blkdir_test_rowcount SELECT i, 4 FROM generate_series(1, 40) i;
+INSERT 0 40
+4: INSERT INTO ao_blkdir_test_rowcount SELECT i, 7 FROM generate_series(1, 50) i;
+INSERT 0 50
+1: COMMIT;
+COMMIT
+2: COMMIT;
+COMMIT
+3: COMMIT;
+COMMIT
+4: COMMIT;
+COMMIT
+DELETE FROM ao_blkdir_test_rowcount WHERE j = 7;
+DELETE 50
+
+CREATE INDEX ao_blkdir_test_rowcount_idx ON ao_blkdir_test_rowcount(i);
+CREATE INDEX
+
+SELECT segno, sum(row_count) AS totalrows FROM (SELECT (gp_toolkit.__gp_aoblkdir('ao_blkdir_test_rowcount')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0)s GROUP BY segno, columngroup_no ORDER BY segno;
+ segno | totalrows
+-------+-----------
+ 1 | 10
+ 2 | 20
+ 3 | 40
+ 4 | 50
+(4 rows)
+SELECT segno, sum(tupcount) AS totalrows FROM gp_toolkit.__gp_aoseg('ao_blkdir_test_rowcount') WHERE segment_id = 0 GROUP BY segno;
+ segno | totalrows
+-------+-----------
+ 1 | 10
+ 2 | 20
+ 3 | 40
+ 4 | 50
+(4 rows)
+
+-- Case2: with VACUUM ANALYZE
+DROP TABLE ao_blkdir_test_rowcount;
+DROP TABLE
+CREATE TABLE ao_blkdir_test_rowcount(i int, j int) USING ao_row DISTRIBUTED BY (j);
+CREATE TABLE
+CREATE INDEX ao_blkdir_test_rowcount_idx ON ao_blkdir_test_rowcount(i);
+CREATE INDEX
+1: BEGIN;
+BEGIN
+2: BEGIN;
+BEGIN
+3: BEGIN;
+BEGIN
+4: BEGIN;
+BEGIN
+1: INSERT INTO ao_blkdir_test_rowcount SELECT i, 2 FROM generate_series(1, 10) i;
+INSERT 0 10
+1: INSERT INTO ao_blkdir_test_rowcount SELECT i, 2 FROM ao_blkdir_test_rowcount;
+INSERT 0 10
+1: INSERT INTO ao_blkdir_test_rowcount SELECT i, 2 FROM ao_blkdir_test_rowcount;
+INSERT 0 20
+2: INSERT INTO ao_blkdir_test_rowcount SELECT i, 3 FROM generate_series(1, 20) i;
+INSERT 0 20
+2: INSERT INTO ao_blkdir_test_rowcount SELECT i, 3 FROM ao_blkdir_test_rowcount;
+INSERT 0 20
+2: INSERT INTO ao_blkdir_test_rowcount SELECT i, 3 FROM ao_blkdir_test_rowcount;
+INSERT 0 40
+3: INSERT INTO ao_blkdir_test_rowcount SELECT i, 4 FROM generate_series(1, 30) i;
+INSERT 0 30
+3: INSERT INTO ao_blkdir_test_rowcount SELECT i, 4 FROM ao_blkdir_test_rowcount;
+INSERT 0 30
+3: INSERT INTO ao_blkdir_test_rowcount SELECT i, 4 FROM ao_blkdir_test_rowcount;
+INSERT 0 60
+4: INSERT INTO ao_blkdir_test_rowcount SELECT i, 7 FROM generate_series(1, 50) i;
+INSERT 0 50
+4: INSERT INTO ao_blkdir_test_rowcount SELECT i, 7 FROM ao_blkdir_test_rowcount;
+INSERT 0 50
+4: INSERT INTO ao_blkdir_test_rowcount SELECT i, 7 FROM ao_blkdir_test_rowcount;
+INSERT 0 100
+1: COMMIT;
+COMMIT
+2: COMMIT;
+COMMIT
+3: ABORT;
+ROLLBACK
+4: COMMIT;
+COMMIT
+
+DELETE FROM ao_blkdir_test_rowcount WHERE j = 7;
+DELETE 200
+VACUUM ANALYZE ao_blkdir_test_rowcount;
+VACUUM
+
+SELECT segno, sum(row_count) AS totalrows FROM (SELECT (gp_toolkit.__gp_aoblkdir('ao_blkdir_test_rowcount')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0)s GROUP BY segno, columngroup_no ORDER BY segno;
+ segno | totalrows
+-------+-----------
+ 1 | 40
+ 2 | 80
+(2 rows)
+SELECT segno, sum(tupcount) AS totalrows FROM gp_toolkit.__gp_aoseg('ao_blkdir_test_rowcount') WHERE segment_id = 0 GROUP BY segno;
+ segno | totalrows
+-------+-----------
+ 1 | 40
+ 2 | 80
+ 3 | 0
+ 4 | 0
+(4 rows)
+
+UPDATE ao_blkdir_test_rowcount SET i = i + 1;
+UPDATE 120
+VACUUM ANALYZE ao_blkdir_test_rowcount;
+VACUUM
+
+SELECT segno, sum(row_count) AS totalrows FROM (SELECT (gp_toolkit.__gp_aoblkdir('ao_blkdir_test_rowcount')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0)s GROUP BY segno, columngroup_no ORDER BY segno;
+ segno | totalrows
+-------+-----------
+ 3 | 120
+(1 row)
+SELECT segno, sum(tupcount) AS totalrows FROM gp_toolkit.__gp_aoseg('ao_blkdir_test_rowcount') WHERE segment_id = 0 GROUP BY segno;
+ segno | totalrows
+-------+-----------
+ 1 | 0
+ 2 | 0
+ 3 | 120
+ 4 | 0
+(4 rows)
+
+DROP TABLE ao_blkdir_test_rowcount;
+DROP TABLE
+
+--
+-- Test tuple fetch with holes from ABORTs
+--
+CREATE TABLE ao_fetch_hole(i int, j int) USING ao_row;
+CREATE TABLE
+CREATE INDEX ON ao_fetch_hole(i);
+CREATE INDEX
+INSERT INTO ao_fetch_hole VALUES (2, 0);
+INSERT 0 1
+-- Create a hole after the last entry (of the last minipage) in the blkdir.
+BEGIN;
+BEGIN
+INSERT INTO ao_fetch_hole SELECT 3, j FROM generate_series(1, 20) j;
+INSERT 0 20
+ABORT;
+ROLLBACK
+SELECT (gp_toolkit.__gp_aoblkdir('ao_fetch_hole')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0 ORDER BY 1,2,3,4,5;
+ tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count
+---------+-------+----------------+----------+--------------+-------------+-----------
+ (0,1) | 1 | 0 | 0 | 1 | 0 | 1
+(1 row)
+
+-- Ensure we will do an index scan.
+SET enable_seqscan TO off;
+SET
+SET enable_indexonlyscan TO off;
+SET
+SET optimizer TO off;
+SET
+EXPLAIN SELECT count(*) FROM ao_fetch_hole WHERE i = 3;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------
+ Finalize Aggregate (cost=39.80..39.81 rows=1 width=8)
+ -> Gather Motion 1:1 (slice1; segments: 1) (cost=39.74..39.79 rows=3 width=8)
+ -> Partial Aggregate (cost=39.74..39.75 rows=1 width=8)
+ -> Bitmap Heap Scan on ao_fetch_hole (cost=4.82..39.67 rows=29 width=0)
+ Recheck Cond: (i = 3)
+ -> Bitmap Index Scan on ao_fetch_hole_i_idx (cost=0.00..4.81 rows=29 width=0)
+ Index Cond: (i = 3)
+ Optimizer: Postgres query optimizer
+(8 rows)
+
+SELECT gp_inject_fault_infinite('AppendOnlyBlockDirectory_GetEntry_sysscan', 'skip', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p';
+ gp_inject_fault_infinite
+--------------------------
+ Success:
+(1 row)
+SELECT count(*) FROM ao_fetch_hole WHERE i = 3;
+ count
+-------
+ 0
+(1 row)
+-- Since the hole is at the end of the minipage, we can't avoid a sysscan for
+-- each tuple.
+SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_sysscan', 'status', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p';
+ gp_inject_fault
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Success: fault name:'AppendOnlyBlockDirectory_GetEntry_sysscan' fault type:'skip' ddl statement:'' database name:'' table name:'' start occurrence:'1' end occurrence:'-1' extra arg:'0' fault injection state:'triggered' num times hit:'20'
+
+(1 row)
+
+SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_sysscan', 'reset', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p';
+ gp_inject_fault
+-----------------
+ Success:
+(1 row)
+
+-- Now do 1 more insert, so that the hole is sandwiched between two successive
+-- minipage entries.
+INSERT INTO ao_fetch_hole VALUES (4, 21);
+INSERT 0 1
+SELECT (gp_toolkit.__gp_aoblkdir('ao_fetch_hole')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0 ORDER BY 1,2,3,4,5;
+ tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count
+---------+-------+----------------+----------+--------------+-------------+-----------
+ (0,3) | 1 | 0 | 0 | 1 | 0 | 1
+ (0,3) | 1 | 0 | 1 | 201 | 40 | 1
+(2 rows)
+
+SELECT gp_inject_fault_infinite('AppendOnlyBlockDirectory_GetEntry_sysscan', 'skip', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p';
+ gp_inject_fault_infinite
+--------------------------
+ Success:
+(1 row)
+SELECT gp_inject_fault_infinite('AppendOnlyBlockDirectory_GetEntry_inter_entry_hole', 'skip', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p';
+ gp_inject_fault_infinite
+--------------------------
+ Success:
+(1 row)
+SELECT count(*) FROM ao_fetch_hole WHERE i = 3;
+ count
+-------
+ 0
+(1 row)
+-- Since the hole is between two entries, we are always able to find the last
+-- entry in the minipage, determine that the target row doesn't lie within it
+-- and early return, thereby avoiding an expensive per-tuple sysscan. We only
+-- do 1 sysscan - for the first tuple fetch in the hole and avoid it for all
+-- subsequent fetches in the hole.
+SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_sysscan', 'status', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p';
+ gp_inject_fault
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Success: fault name:'AppendOnlyBlockDirectory_GetEntry_sysscan' fault type:'skip' ddl statement:'' database name:'' table name:'' start occurrence:'1' end occurrence:'-1' extra arg:'0' fault injection state:'triggered' num times hit:'1'
+
+(1 row)
+SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_inter_entry_hole', 'status', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p';
+ gp_inject_fault
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Success: fault name:'AppendOnlyBlockDirectory_GetEntry_inter_entry_hole' fault type:'skip' ddl statement:'' database name:'' table name:'' start occurrence:'1' end occurrence:'-1' extra arg:'0' fault injection state:'triggered' num times hit:'19'
+
+(1 row)
+
+SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_sysscan', 'reset', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p';
+ gp_inject_fault
+-----------------
+ Success:
+(1 row)
+SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_inter_entry_hole', 'reset', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p';
+ gp_inject_fault
+-----------------
+ Success:
+(1 row)
+
+RESET enable_seqscan;
+RESET
+RESET enable_indexonlyscan;
+RESET
+RESET optimizer;
+RESET
--------------------------------------------------------------------------------
-- AOCO tables
--------------------------------------------------------------------------------
CREATE TABLE aoco_blkdir_test(i int, j int) USING ao_column DISTRIBUTED BY (j);
-CREATE
+CREATE TABLE
CREATE INDEX aoco_blkdir_test_idx ON aoco_blkdir_test(i);
-CREATE
+CREATE INDEX
1: INSERT INTO aoco_blkdir_test SELECT i, 2 FROM generate_series(1, 10) i;
-INSERT 10
+INSERT 0 10
-- There should be 2 block directory rows with a single entry covering 10 rows,
-- (1 for each column).
SELECT (gp_toolkit.__gp_aoblkdir('aoco_blkdir_test')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0 ORDER BY 1,2,3,4,5;
@@ -340,7 +614,7 @@ SELECT (gp_toolkit.__gp_aoblkdir('aoco_blkdir_test')).* FROM gp_dist_random('gp_
(2 rows)
1: INSERT INTO aoco_blkdir_test SELECT i, 2 FROM generate_series(11, 30) i;
-INSERT 20
+INSERT 0 20
-- There should be 2 block directory rows, carrying 2 entries each. The rows
-- from the previous INSERT should not be visible. The entries from the first
-- INSERT should remain unchanged.
@@ -356,11 +630,11 @@ SELECT (gp_toolkit.__gp_aoblkdir('aoco_blkdir_test')).* FROM gp_dist_random('gp_
1: BEGIN;
BEGIN
1: INSERT INTO aoco_blkdir_test SELECT i, 2 FROM generate_series(31, 60) i;
-INSERT 30
+INSERT 0 30
2: BEGIN;
BEGIN
2: INSERT INTO aoco_blkdir_test SELECT i, 2 FROM generate_series(71, 110) i;
-INSERT 40
+INSERT 0 40
1: COMMIT;
COMMIT
2: COMMIT;
@@ -382,14 +656,12 @@ SELECT (gp_toolkit.__gp_aoblkdir('aoco_blkdir_test')).* FROM gp_dist_random('gp_
(8 rows)
TRUNCATE aoco_blkdir_test;
-TRUNCATE
+TRUNCATE TABLE
-- Insert enough rows to overflow the first block directory minipage by 2.
set gp_appendonly_insert_files = 0;
SET
INSERT INTO aoco_blkdir_test SELECT i, 2 FROM generate_series(1, 1317143) i;
-INSERT 1317143
-reset gp_appendonly_insert_files;
-RESET
+INSERT 0 1317143
-- There should be 2 block directory rows, 2 for each column, one with 161
-- entries covering 1317141 rows and the other with 1 entry covering the 2
-- overflow rows.
@@ -724,9 +996,9 @@ SELECT (gp_toolkit.__gp_aoblkdir('aoco_blkdir_test')).* FROM gp_dist_random('gp_
-- Unique index white box tests
DROP TABLE aoco_blkdir_test;
-DROP
+DROP TABLE
CREATE TABLE aoco_blkdir_test(h int, i int UNIQUE, j int) USING ao_column DISTRIBUTED BY (i);
-CREATE
+CREATE TABLE
SELECT gp_inject_fault('appendonly_insert', 'suspend', '', '', 'aoco_blkdir_test', 1, 1, 0, dbid) FROM gp_segment_configuration WHERE role = 'p' AND content = 0;
gp_inject_fault
@@ -777,7 +1049,7 @@ SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_confi
Success:
(1 row)
1<: <... completed>
-INSERT 1
+INSERT 0 1
-- The placeholder row is invisible to the INSERTing transaction. Since the
-- INSERT finished, there should be 3 visible blkdir rows representing the
@@ -818,10 +1090,308 @@ COMMIT
-- properly resolve uniqueness checks (by consulting the first non-dropped
-- column's block directory row).
3<: <... completed>
-ALTER
+ALTER TABLE
4: INSERT INTO aoco_blkdir_test VALUES (2, 2);
ERROR: duplicate key value violates unique constraint "aoco_blkdir_test_i_key" (seg0 192.168.0.148:7002 pid=176693)
DETAIL: Key (i)=(2) already exists.
DROP TABLE aoco_blkdir_test;
-DROP
+DROP TABLE
+
+-- Test `tupcount` in pg_ao(cs)seg == sum of number of `row_count` across all
+-- aoblkdir entries for each . Test with commits, aborts
+-- and deletes.
+
+-- Case1: without VACUUM ANALYZE
+CREATE TABLE aoco_blkdir_test_rowcount(i int, j int) USING ao_column DISTRIBUTED BY (j);
+CREATE TABLE
+1: BEGIN;
+BEGIN
+2: BEGIN;
+BEGIN
+3: BEGIN;
+BEGIN
+4: BEGIN;
+BEGIN
+1: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 2 FROM generate_series(1, 10) i;
+INSERT 0 10
+2: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 3 FROM generate_series(1, 20) i;
+INSERT 0 20
+3: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 4 FROM generate_series(1, 30) i;
+INSERT 0 30
+3: ABORT;
+ROLLBACK
+3: BEGIN;
+BEGIN
+3: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 4 FROM generate_series(1, 40) i;
+INSERT 0 40
+4: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 7 FROM generate_series(1, 50) i;
+INSERT 0 50
+1: COMMIT;
+COMMIT
+2: COMMIT;
+COMMIT
+3: COMMIT;
+COMMIT
+4: COMMIT;
+COMMIT
+DELETE FROM aoco_blkdir_test_rowcount WHERE j = 7;
+DELETE 50
+
+CREATE INDEX aoco_blkdir_test_rowcount_idx ON aoco_blkdir_test_rowcount(i);
+CREATE INDEX
+
+SELECT segno, columngroup_no, sum(row_count) AS totalrows FROM (SELECT (gp_toolkit.__gp_aoblkdir('aoco_blkdir_test_rowcount')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0)s GROUP BY segno, columngroup_no ORDER BY segno, columngroup_no;
+ segno | columngroup_no | totalrows
+-------+----------------+-----------
+ 1 | 0 | 10
+ 1 | 1 | 10
+ 2 | 0 | 20
+ 2 | 1 | 20
+ 3 | 0 | 40
+ 3 | 1 | 40
+ 4 | 0 | 50
+ 4 | 1 | 50
+(8 rows)
+SELECT segno, column_num, sum(tupcount) AS totalrows FROM gp_toolkit.__gp_aocsseg('aoco_blkdir_test_rowcount') WHERE segment_id = 0 GROUP BY segno, column_num;
+ segno | column_num | totalrows
+-------+------------+-----------
+ 1 | 0 | 10
+ 1 | 1 | 10
+ 2 | 0 | 20
+ 2 | 1 | 20
+ 3 | 0 | 40
+ 3 | 1 | 40
+ 4 | 0 | 50
+ 4 | 1 | 50
+(8 rows)
+
+-- Case2: with VACUUM ANALYZE
+DROP TABLE aoco_blkdir_test_rowcount;
+DROP TABLE
+CREATE TABLE aoco_blkdir_test_rowcount(i int, j int) USING ao_column DISTRIBUTED BY (j);
+CREATE TABLE
+CREATE INDEX aoco_blkdir_test_rowcount_idx ON aoco_blkdir_test_rowcount(i);
+CREATE INDEX
+1: BEGIN;
+BEGIN
+2: BEGIN;
+BEGIN
+3: BEGIN;
+BEGIN
+4: BEGIN;
+BEGIN
+1: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 2 FROM generate_series(1, 10) i;
+INSERT 0 10
+1: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 2 FROM aoco_blkdir_test_rowcount;
+INSERT 0 10
+1: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 2 FROM aoco_blkdir_test_rowcount;
+INSERT 0 20
+2: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 3 FROM generate_series(1, 20) i;
+INSERT 0 20
+2: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 3 FROM aoco_blkdir_test_rowcount;
+INSERT 0 20
+2: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 3 FROM aoco_blkdir_test_rowcount;
+INSERT 0 40
+3: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 4 FROM generate_series(1, 30) i;
+INSERT 0 30
+3: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 4 FROM aoco_blkdir_test_rowcount;
+INSERT 0 30
+3: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 4 FROM aoco_blkdir_test_rowcount;
+INSERT 0 60
+4: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 7 FROM generate_series(1, 50) i;
+INSERT 0 50
+4: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 7 FROM aoco_blkdir_test_rowcount;
+INSERT 0 50
+4: INSERT INTO aoco_blkdir_test_rowcount SELECT i, 7 FROM aoco_blkdir_test_rowcount;
+INSERT 0 100
+1: COMMIT;
+COMMIT
+2: COMMIT;
+COMMIT
+3: ABORT;
+ROLLBACK
+4: COMMIT;
+COMMIT
+
+DELETE FROM aoco_blkdir_test_rowcount WHERE j = 7;
+DELETE 200
+VACUUM ANALYZE aoco_blkdir_test_rowcount;
+VACUUM
+
+SELECT segno, columngroup_no, sum(row_count) AS totalrows FROM (SELECT (gp_toolkit.__gp_aoblkdir('aoco_blkdir_test_rowcount')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0)s GROUP BY segno, columngroup_no ORDER BY segno, columngroup_no;
+ segno | columngroup_no | totalrows
+-------+----------------+-----------
+ 1 | 0 | 40
+ 1 | 1 | 40
+ 2 | 0 | 80
+ 2 | 1 | 80
+(4 rows)
+SELECT segno, column_num, sum(tupcount) AS totalrows FROM gp_toolkit.__gp_aocsseg('aoco_blkdir_test_rowcount') WHERE segment_id = 0 GROUP BY segno, column_num;
+ segno | column_num | totalrows
+-------+------------+-----------
+ 1 | 0 | 40
+ 1 | 1 | 40
+ 2 | 0 | 80
+ 2 | 1 | 80
+ 3 | 0 | 0
+ 3 | 1 | 0
+ 4 | 0 | 0
+ 4 | 1 | 0
+(8 rows)
+
+UPDATE aoco_blkdir_test_rowcount SET i = i + 1;
+UPDATE 120
+VACUUM ANALYZE aoco_blkdir_test_rowcount;
+VACUUM
+
+SELECT segno, columngroup_no, sum(row_count) AS totalrows FROM (SELECT (gp_toolkit.__gp_aoblkdir('aoco_blkdir_test_rowcount')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0)s GROUP BY segno, columngroup_no ORDER BY segno, columngroup_no;
+ segno | columngroup_no | totalrows
+-------+----------------+-----------
+ 3 | 0 | 120
+ 3 | 1 | 120
+(2 rows)
+SELECT segno, column_num, sum(tupcount) AS totalrows FROM gp_toolkit.__gp_aocsseg('aoco_blkdir_test_rowcount') WHERE segment_id = 0 GROUP BY segno, column_num;
+ segno | column_num | totalrows
+-------+------------+-----------
+ 1 | 0 | 0
+ 1 | 1 | 0
+ 2 | 0 | 0
+ 2 | 1 | 0
+ 3 | 0 | 120
+ 3 | 1 | 120
+ 4 | 0 | 0
+ 4 | 1 | 0
+(8 rows)
+
+DROP TABLE aoco_blkdir_test_rowcount;
+DROP TABLE
+
+--
+-- Test tuple fetch with holes from ABORTs
+--
+CREATE TABLE aoco_fetch_hole(i int, j int) USING ao_row;
+CREATE TABLE
+CREATE INDEX ON aoco_fetch_hole(i);
+CREATE INDEX
+INSERT INTO aoco_fetch_hole VALUES (2, 0);
+INSERT 0 1
+-- Create a hole after the last entry (of the last minipage) in the blkdir.
+BEGIN;
+BEGIN
+INSERT INTO aoco_fetch_hole SELECT 3, j FROM generate_series(1, 20) j;
+INSERT 0 20
+ABORT;
+ROLLBACK
+SELECT (gp_toolkit.__gp_aoblkdir('aoco_fetch_hole')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0 ORDER BY 1,2,3,4,5;
+ tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count
+---------+-------+----------------+----------+--------------+-------------+-----------
+ (0,1) | 1 | 0 | 0 | 1 | 0 | 1
+(1 row)
+
+-- Ensure we will do an index scan.
+SET enable_seqscan TO off;
+SET
+SET enable_indexonlyscan TO off;
+SET
+SET optimizer TO off;
+SET
+EXPLAIN SELECT count(*) FROM aoco_fetch_hole WHERE i = 3;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------
+ Finalize Aggregate (cost=39.80..39.81 rows=1 width=8)
+ -> Gather Motion 1:1 (slice1; segments: 1) (cost=39.74..39.79 rows=3 width=8)
+ -> Partial Aggregate (cost=39.74..39.75 rows=1 width=8)
+ -> Bitmap Heap Scan on aoco_fetch_hole (cost=4.82..39.67 rows=29 width=0)
+ Recheck Cond: (i = 3)
+ -> Bitmap Index Scan on aoco_fetch_hole_i_idx (cost=0.00..4.81 rows=29 width=0)
+ Index Cond: (i = 3)
+ Optimizer: Postgres query optimizer
+(8 rows)
+
+SELECT gp_inject_fault_infinite('AppendOnlyBlockDirectory_GetEntry_sysscan', 'skip', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p';
+ gp_inject_fault_infinite
+--------------------------
+ Success:
+(1 row)
+SELECT count(*) FROM aoco_fetch_hole WHERE i = 3;
+ count
+-------
+ 0
+(1 row)
+-- Since the hole is at the end of the minipage, we can't avoid a sysscan for
+-- each tuple.
+SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_sysscan', 'status', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p';
+ gp_inject_fault
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Success: fault name:'AppendOnlyBlockDirectory_GetEntry_sysscan' fault type:'skip' ddl statement:'' database name:'' table name:'' start occurrence:'1' end occurrence:'-1' extra arg:'0' fault injection state:'triggered' num times hit:'20'
+
+(1 row)
+
+SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_sysscan', 'reset', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p';
+ gp_inject_fault
+-----------------
+ Success:
+(1 row)
+
+-- Now do 1 more insert, so that the hole is sandwiched between two successive
+-- minipage entries.
+INSERT INTO aoco_fetch_hole VALUES (4, 21);
+INSERT 0 1
+SELECT (gp_toolkit.__gp_aoblkdir('aoco_fetch_hole')).* FROM gp_dist_random('gp_id') WHERE gp_segment_id = 0 ORDER BY 1,2,3,4,5;
+ tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count
+---------+-------+----------------+----------+--------------+-------------+-----------
+ (0,3) | 1 | 0 | 0 | 1 | 0 | 1
+ (0,3) | 1 | 0 | 1 | 201 | 40 | 1
+(2 rows)
+
+SELECT gp_inject_fault_infinite('AppendOnlyBlockDirectory_GetEntry_sysscan', 'skip', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p';
+ gp_inject_fault_infinite
+--------------------------
+ Success:
+(1 row)
+SELECT gp_inject_fault_infinite('AppendOnlyBlockDirectory_GetEntry_inter_entry_hole', 'skip', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p';
+ gp_inject_fault_infinite
+--------------------------
+ Success:
+(1 row)
+SELECT count(*) FROM aoco_fetch_hole WHERE i = 3;
+ count
+-------
+ 0
+(1 row)
+-- Since the hole is between two entries, we are always able to find the last
+-- entry in the minipage, determine that the target row doesn't lie within it
+-- and early return, thereby avoiding an expensive per-tuple sysscan. We only
+-- do 1 sysscan - for the first tuple fetch in the hole and avoid it for all
+-- subsequent fetches in the hole.
+SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_sysscan', 'status', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p';
+ gp_inject_fault
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Success: fault name:'AppendOnlyBlockDirectory_GetEntry_sysscan' fault type:'skip' ddl statement:'' database name:'' table name:'' start occurrence:'1' end occurrence:'-1' extra arg:'0' fault injection state:'triggered' num times hit:'1'
+
+(1 row)
+SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_inter_entry_hole', 'status', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p';
+ gp_inject_fault
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Success: fault name:'AppendOnlyBlockDirectory_GetEntry_inter_entry_hole' fault type:'skip' ddl statement:'' database name:'' table name:'' start occurrence:'1' end occurrence:'-1' extra arg:'0' fault injection state:'triggered' num times hit:'19'
+
+(1 row)
+
+SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_sysscan', 'reset', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p';
+ gp_inject_fault
+-----------------
+ Success:
+(1 row)
+SELECT gp_inject_fault('AppendOnlyBlockDirectory_GetEntry_inter_entry_hole', 'reset', dbid) FROM gp_segment_configuration WHERE content = 0 AND role = 'p';
+ gp_inject_fault
+-----------------
+ Success:
+(1 row)
+
+RESET enable_seqscan;
+RESET
+RESET enable_indexonlyscan;
+RESET
+RESET optimizer;
+RESET
diff --git a/src/test/isolation2/expected/ao_index_build_progress.out b/src/test/isolation2/expected/ao_index_build_progress.out
index 1048076ce9f..f98be86ea65 100644
--- a/src/test/isolation2/expected/ao_index_build_progress.out
+++ b/src/test/isolation2/expected/ao_index_build_progress.out
@@ -3,11 +3,15 @@
-- AO table
CREATE TABLE ao_index_build_progress(i int, j bigint) USING ao_row WITH (compresstype=zstd, compresslevel=2);
-CREATE
+CREATE TABLE
-- Insert all tuples to seg1.
INSERT INTO ao_index_build_progress SELECT 0, i FROM generate_series(1, 100000) i;
-INSERT 100000
+INSERT 0 100000
+INSERT INTO ao_index_build_progress SELECT 2, i FROM generate_series(1, 100000) i;
+INSERT 0 100000
+INSERT INTO ao_index_build_progress SELECT 5, i FROM generate_series(1, 100000) i;
+INSERT 0 100000
-- Suspend execution when some blocks have been read.
SELECT gp_inject_fault('AppendOnlyStorageRead_ReadNextBlock_success', 'suspend', '', '', '', 10, 10, 0, dbid) FROM gp_segment_configuration WHERE content = 1 AND role = 'p';
@@ -40,15 +44,19 @@ SELECT gp_inject_fault('AppendOnlyStorageRead_ReadNextBlock_success', 'reset', d
(1 row)
1<: <... completed>
-CREATE
+CREATE INDEX
-- AOCO table
CREATE TABLE aoco_index_build_progress(i int, j int ENCODING (compresstype=zstd, compresslevel=2)) USING ao_column;
-CREATE
+CREATE TABLE
-- Insert all tuples to seg1.
INSERT INTO aoco_index_build_progress SELECT 0, i FROM generate_series(1, 100000) i;
-INSERT 100000
+INSERT 0 100000
+INSERT INTO aoco_index_build_progress SELECT 2, i FROM generate_series(1, 100000) i;
+INSERT 0 100000
+INSERT INTO aoco_index_build_progress SELECT 5, i FROM generate_series(1, 100000) i;
+INSERT 0 100000
-- Suspend execution when some blocks have been read.
SELECT gp_inject_fault('AppendOnlyStorageRead_ReadNextBlock_success', 'suspend', '', '', '', 5, 5, 0, dbid) FROM gp_segment_configuration WHERE content = 1 AND role = 'p';
@@ -83,7 +91,7 @@ SELECT gp_inject_fault('AppendOnlyStorageRead_ReadNextBlock_success', 'reset', d
(1 row)
1<: <... completed>
-CREATE
+CREATE INDEX
-- Repeat the test for another index build
@@ -120,4 +128,4 @@ SELECT gp_inject_fault('AppendOnlyStorageRead_ReadNextBlock_success', 'reset', d
(1 row)
1<: <... completed>
-CREATE
+CREATE INDEX
diff --git a/src/test/isolation2/expected/ao_partition_lock.out b/src/test/isolation2/expected/ao_partition_lock.out
index 8802c69acc9..97f1ed97358 100644
--- a/src/test/isolation2/expected/ao_partition_lock.out
+++ b/src/test/isolation2/expected/ao_partition_lock.out
@@ -5,25 +5,25 @@
-- lock is not acquired.
create table test_ao_partition_lock ( field_dk integer ,field_part integer) with (appendonly=true) DISTRIBUTED BY (field_dk) PARTITION BY LIST(field_part) ( partition val1 values(1), partition val2 values(2), partition val3 values(3) );
-CREATE
+CREATE TABLE
1: begin;
BEGIN
1: insert into test_ao_partition_lock_1_prt_val1 values(1,1);
-INSERT 1
+INSERT 0 1
2: begin;
BEGIN
2: alter table test_ao_partition_lock truncate partition for (2);
-ALTER
+ALTER TABLE
2: end;
-END
+COMMIT
1: end;
-END
+COMMIT
1q: ...
2q: ...
drop table test_ao_partition_lock;
-DROP
+DROP TABLE
diff --git a/src/test/isolation2/expected/ao_same_trans_truncate_crash.out b/src/test/isolation2/expected/ao_same_trans_truncate_crash.out
index 318de3ce104..6999ec36dbc 100644
--- a/src/test/isolation2/expected/ao_same_trans_truncate_crash.out
+++ b/src/test/isolation2/expected/ao_same_trans_truncate_crash.out
@@ -43,11 +43,11 @@ CHECKPOINT
1: BEGIN;
BEGIN
1: CREATE TABLE ao_same_trans_truncate(a int, b int) WITH (appendonly=true, orientation=column);
-CREATE
+CREATE TABLE
1: TRUNCATE TABLE ao_same_trans_truncate;
-TRUNCATE
+TRUNCATE TABLE
1: ABORT;
-ABORT
+ROLLBACK
-- restart (immediate) to invoke crash recovery
1: SELECT pg_ctl(datadir, 'restart') FROM gp_segment_configuration WHERE role = 'p' AND content <> -1;
pg_ctl
diff --git a/src/test/isolation2/expected/ao_unique_index.out b/src/test/isolation2/expected/ao_unique_index.out
index d737b1feedc..a1638304ee2 100644
--- a/src/test/isolation2/expected/ao_unique_index.out
+++ b/src/test/isolation2/expected/ao_unique_index.out
@@ -8,9 +8,9 @@
-- Case 1: Conflict with committed transaction----------------------------------
CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
INSERT INTO unique_index_ao_row SELECT * FROM generate_series(1, 329729);
-INSERT 329729
+INSERT 0 329729
-- should conflict
INSERT INTO unique_index_ao_row VALUES (1);
ERROR: duplicate key value violates unique constraint "unique_index_ao_row_a_key" (seg1 192.168.0.148:7003 pid=205740)
@@ -20,73 +20,73 @@ ERROR: duplicate key value violates unique constraint "unique_index_ao_row_a_ke
DETAIL: Key (a)=(329729) already exists.
-- should not conflict
INSERT INTO unique_index_ao_row VALUES (329730);
-INSERT 1
+INSERT 0 1
DROP TABLE unique_index_ao_row;
-DROP
+DROP TABLE
-- Case 2: Conflict within the same transaction---------------------------------
CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
BEGIN;
BEGIN
INSERT INTO unique_index_ao_row SELECT * FROM generate_series(1, 329729);
-INSERT 329729
+INSERT 0 329729
-- should conflict
INSERT INTO unique_index_ao_row VALUES (1);
ERROR: duplicate key value violates unique constraint "unique_index_ao_row_a_key" (seg0 192.168.0.148:7002 pid=205739)
DETAIL: Key (a)=(1) already exists.
END;
-END
+ROLLBACK
DROP TABLE unique_index_ao_row;
-DROP
+DROP TABLE
CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
BEGIN;
BEGIN
INSERT INTO unique_index_ao_row SELECT * FROM generate_series(1, 329729);
-INSERT 329729
+INSERT 0 329729
-- should conflict
INSERT INTO unique_index_ao_row VALUES (329729);
ERROR: duplicate key value violates unique constraint "unique_index_ao_row_a_key" (seg0 192.168.0.148:7002 pid=205739)
DETAIL: Key (a)=(329729) already exists.
END;
-END
+ROLLBACK
DROP TABLE unique_index_ao_row;
-DROP
+DROP TABLE
CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
BEGIN;
BEGIN
INSERT INTO unique_index_ao_row SELECT * FROM generate_series(1, 329729);
-INSERT 329729
+INSERT 0 329729
-- should not conflict
INSERT INTO unique_index_ao_row VALUES (329730);
-INSERT 1
+INSERT 0 1
END;
-END
+COMMIT
DROP TABLE unique_index_ao_row;
-DROP
+DROP TABLE
-- Case 3: Conflict with aborted transaction is not a conflict------------------
CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
BEGIN;
BEGIN
INSERT INTO unique_index_ao_row SELECT * FROM generate_series(1, 329729);
-INSERT 329729
+INSERT 0 329729
ABORT;
-ABORT
+ROLLBACK
-- should not conflict
INSERT INTO unique_index_ao_row VALUES (1);
-INSERT 1
+INSERT 0 1
INSERT INTO unique_index_ao_row VALUES (329729);
-INSERT 1
+INSERT 0 1
INSERT INTO unique_index_ao_row VALUES (329730);
-INSERT 1
+INSERT 0 1
DROP TABLE unique_index_ao_row;
-DROP
+DROP TABLE
-- Case 4: Conflict with to-be-committed transaction----------------------------
--
@@ -106,21 +106,21 @@ DROP
-- 9. Tx 1 commits
--
CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
1: BEGIN;
BEGIN
1: INSERT INTO unique_index_ao_row VALUES (0);
-INSERT 1
+INSERT 0 1
2: BEGIN;
BEGIN
2: INSERT INTO unique_index_ao_row SELECT * FROM generate_series(1, 329729);
-INSERT 329729
+INSERT 0 329729
3&: INSERT INTO unique_index_ao_row VALUES (1);
4&: INSERT INTO unique_index_ao_row VALUES (329728);
5&: INSERT INTO unique_index_ao_row VALUES (329729);
-- should succeed immediately
6: INSERT INTO unique_index_ao_row VALUES (329730);
-INSERT 1
+INSERT 0 1
2: COMMIT;
COMMIT
3<: <... completed>
@@ -135,7 +135,7 @@ DETAIL: Key (a)=(329729) already exists.
1: COMMIT;
COMMIT
DROP TABLE unique_index_ao_row;
-DROP
+DROP TABLE
-- Case 5: Conflict with to-be-aborted transaction------------------------------
--
@@ -155,37 +155,37 @@ DROP
-- 10. Tx 1 commits
--
CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
1: BEGIN;
BEGIN
1: INSERT INTO unique_index_ao_row VALUES (0);
-INSERT 1
+INSERT 0 1
2: BEGIN;
BEGIN
2: INSERT INTO unique_index_ao_row SELECT * FROM generate_series(1, 329729);
-INSERT 329729
+INSERT 0 329729
3&: INSERT INTO unique_index_ao_row VALUES (1);
4&: INSERT INTO unique_index_ao_row VALUES (329728);
5&: INSERT INTO unique_index_ao_row VALUES (329729);
-- should succeed immediately
6: INSERT INTO unique_index_ao_row VALUES (329730);
-INSERT 1
+INSERT 0 1
2: ABORT;
-ABORT
+ROLLBACK
3<: <... completed>
-INSERT 1
+INSERT 0 1
4<: <... completed>
-INSERT 1
+INSERT 0 1
5<: <... completed>
-INSERT 1
+INSERT 0 1
1: COMMIT;
COMMIT
DROP TABLE unique_index_ao_row;
-DROP
+DROP TABLE
-- Case 6: Conflict with aborted rows following some committed rows ------------
CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
-- 1. Tx 1 commits rows 1-100.
-- 2. Tx 2 inserts rows 101-200 and then aborts.
-- 3. Tx 3 tries to insert row in range [101,200] and is immediately successful.
@@ -193,22 +193,22 @@ CREATE
-- constraint violation.
-- 5. Tx 5 tries to insert row in range [201, ) and is immediately successful.
1: INSERT INTO unique_index_ao_row SELECT generate_series(1, 100);
-INSERT 100
+INSERT 0 100
2: BEGIN;
BEGIN
2: INSERT INTO unique_index_ao_row SELECT generate_series(101, 200);
-INSERT 100
+INSERT 0 100
2: ABORT;
-ABORT
+ROLLBACK
3: INSERT INTO unique_index_ao_row VALUES(102);
-INSERT 1
+INSERT 0 1
4: INSERT INTO unique_index_ao_row VALUES(2);
ERROR: duplicate key value violates unique constraint "unique_index_ao_row_a_key" (seg0 192.168.0.148:7002 pid=659656)
DETAIL: Key (a)=(2) already exists.
5: INSERT INTO unique_index_ao_row VALUES(202);
-INSERT 1
+INSERT 0 1
DROP TABLE unique_index_ao_row;
-DROP
+DROP TABLE
--------------------------------------------------------------------------------
----------------- More concurrent tests with fault injection ------------------
@@ -232,7 +232,7 @@ DROP
-- 6. Tx 2 succeeds as Tx 1 aborted.
CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
SELECT gp_inject_fault('appendonly_insert', 'suspend', '', '', 'unique_index_ao_row', 4, 4, 0, dbid) FROM gp_segment_configuration WHERE role = 'p' AND content <> -1;
gp_inject_fault
-----------------
@@ -252,9 +252,9 @@ SELECT gp_wait_until_triggered_fault('appendonly_insert', 4, dbid) FROM gp_segme
(3 rows)
2&: INSERT INTO unique_index_ao_row VALUES(2);
4: INSERT INTO unique_index_ao_row VALUES(11);
-INSERT 1
+INSERT 0 1
3: INSERT INTO unique_index_ao_row VALUES(4);
-INSERT 1
+INSERT 0 1
SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_configuration WHERE role = 'p' AND content <> -1;
gp_inject_fault
-----------------
@@ -266,9 +266,9 @@ SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_confi
ERROR: duplicate key value violates unique constraint "unique_index_ao_row_a_key" (seg1 192.168.0.148:7003 pid=828519)
DETAIL: Key (a)=(4) already exists.
2<: <... completed>
-INSERT 1
+INSERT 0 1
DROP TABLE unique_index_ao_row;
-DROP
+DROP TABLE
-- Case 8: Conflict with to-be-committed transaction - generalization of case 7
-- where there are multiple minipages (and block directory rows) in play from
@@ -293,7 +293,7 @@ DROP
-- 7. All blocked Txs succeed.
CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
SELECT gp_inject_fault('insert_new_entry_curr_minipage_full', 'suspend', '', '', '', 2, 2, 0, dbid) FROM gp_segment_configuration WHERE role = 'p' AND content <> -1;
gp_inject_fault
@@ -349,7 +349,7 @@ SELECT gp_wait_until_triggered_fault('appendonly_insert', 4, dbid) FROM gp_segme
8&: INSERT INTO unique_index_ao_row VALUES(661507);
-- no index entry exists for it, so should not conflict.
9: INSERT INTO unique_index_ao_row VALUES(661510);
-INSERT 1
+INSERT 0 1
SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_configuration WHERE role = 'p' AND content <> -1;
gp_inject_fault
@@ -363,29 +363,29 @@ SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_confi
ERROR: duplicate key value violates unique constraint "unique_index_ao_row_a_key" (seg1 192.168.0.148:7003 pid=630215)
DETAIL: Key (a)=(661510) already exists.
2<: <... completed>
-INSERT 1
+INSERT 0 1
3<: <... completed>
-INSERT 1
+INSERT 0 1
4<: <... completed>
-INSERT 1
+INSERT 0 1
5<: <... completed>
-INSERT 1
+INSERT 0 1
6<: <... completed>
-INSERT 1
+INSERT 0 1
7<: <... completed>
-INSERT 1
+INSERT 0 1
8<: <... completed>
-INSERT 1
+INSERT 0 1
DROP TABLE unique_index_ao_row;
-DROP
+DROP TABLE
--------------------------------------------------------------------------------
--------------------------- Smoke tests for COPY -------------------------------
--------------------------------------------------------------------------------
CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
1: BEGIN;
BEGIN
@@ -405,23 +405,23 @@ CONTEXT: COPY unique_index_ao_row, line 1
2<: <... completed>
COPY 1
1: END;
-END
+ROLLBACK
DROP TABLE unique_index_ao_row;
-DROP
+DROP TABLE
--------------------------------------------------------------------------------
-------------------- Smoke tests for subtransactions ---------------------------
--------------------------------------------------------------------------------
CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
1: BEGIN;
BEGIN
1: SAVEPOINT a;
SAVEPOINT
1: INSERT INTO unique_index_ao_row VALUES(1);
-INSERT 1
+INSERT 0 1
-- concurrent tx inserting conflicting row should block.
2: BEGIN;
@@ -429,7 +429,7 @@ BEGIN
2&: INSERT INTO unique_index_ao_row VALUES(1);
-- concurrent tx inserting non-conflicting row should be successful.
3: INSERT INTO unique_index_ao_row VALUES(2);
-INSERT 1
+INSERT 0 1
-- conflict should be detected within the same subtx.
1: INSERT INTO unique_index_ao_row VALUES(1);
@@ -437,15 +437,15 @@ ERROR: duplicate key value violates unique constraint "unique_index_ao_row_a_ke
DETAIL: Key (a)=(1) already exists.
-- the concurrent tx should now succeed.
2<: <... completed>
-INSERT 1
+INSERT 0 1
2: ABORT;
-ABORT
+ROLLBACK
-- after rolling back to the savepoint, we should be able to re-insert the key
1: ROLLBACK TO SAVEPOINT a;
ROLLBACK
1: INSERT INTO unique_index_ao_row VALUES(1);
-INSERT 1
+INSERT 0 1
1: COMMIT;
COMMIT
@@ -457,7 +457,7 @@ SELECT * FROM unique_index_ao_row;
(2 rows)
DROP TABLE unique_index_ao_row;
-DROP
+DROP TABLE
--------------------------------------------------------------------------------
-------------------- Smoke tests for repeatable read ---------------------------
@@ -467,7 +467,7 @@ DROP
-- boundaries.
CREATE TABLE unique_index_ao_row (a INT unique) USING ao_row DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
-- Begin two txs with tx level snapshot taken early.
1: BEGIN ISOLATION LEVEL REPEATABLE READ;
@@ -487,11 +487,11 @@ BEGIN
3: BEGIN;
BEGIN
3: INSERT INTO unique_index_ao_row VALUES(1);
-INSERT 1
+INSERT 0 1
-- And another transaction inserts a key and commits.
INSERT INTO unique_index_ao_row VALUES(2);
-INSERT 1
+INSERT 0 1
-- Tx should block on insert of conflicting key, even though it can't "see" the
-- conflicting key due to its isolation level.
@@ -502,11 +502,11 @@ INSERT 1
1&: INSERT INTO unique_index_ao_row VALUES(1);
3: ABORT;
-ABORT
+ROLLBACK
1<: <... completed>
-INSERT 1
+INSERT 0 1
1: ABORT;
-ABORT
+ROLLBACK
-- Tx should raise a conflict, even though it can't "see" the conflicting key
-- due to its isolation level.
@@ -522,52 +522,3 @@ ABORT
DROP TABLE unique_index_ao_row;
DROP
-
-
---------------------------------------------------------------------------------
------------------------ Smoke tests for ADD CONSTRAINT ------------------------
---------------------------------------------------------------------------------
-CREATE TABLE unique_index_ao_row (a INT) USING ao_row DISTRIBUTED REPLICATED;
-CREATE
-INSERT INTO unique_index_ao_row SELECT * FROM generate_series(1, 5);
-INSERT 5
-
-ALTER table unique_index_ao_row ADD CONSTRAINT a_unique UNIQUE(a);
-ALTER
--- should conflict
-INSERT INTO unique_index_ao_row VALUES (1);
-DETAIL: Key (a)=(1) already exists.
-ERROR: duplicate key value violates unique constraint "a_unique"
-ALTER table unique_index_ao_row DROP CONSTRAINT a_unique;
-ALTER
-
-INSERT INTO unique_index_ao_row VALUES (1);
-INSERT 1
--- should failed
-ALTER table unique_index_ao_row ADD CONSTRAINT a_unique UNIQUE(a);
-DETAIL: Key (a)=(1) is duplicated.
-ERROR: could not create unique index "a_unique"
-
-DROP TABLE unique_index_ao_row;
-DROP
-
-
---------------------------------------------------------------------------------
------------------------ Smoke tests for Multiple Key ---------------------------
---------------------------------------------------------------------------------
-CREATE TABLE unique_index_ao_row (a INT, b INT) USING ao_row DISTRIBUTED REPLICATED;
-CREATE
-INSERT INTO unique_index_ao_row SELECT i,i FROM generate_series(1, 5) i;
-INSERT 5
-
-CREATE UNIQUE INDEX a_b_unique ON unique_index_ao_row(a,b);
-CREATE
--- should not conflict
-INSERT INTO unique_index_ao_row VALUES (1,2);
-INSERT 1
--- should conflict
-INSERT INTO unique_index_ao_row VALUES (1,1);
-DETAIL: Key (a, b)=(1, 1) already exists.
-ERROR: duplicate key value violates unique constraint "a_b_unique"
-DROP TABLE unique_index_ao_row;
-DROP
diff --git a/src/test/isolation2/expected/aoco_column_rewrite.out b/src/test/isolation2/expected/aoco_column_rewrite.out
index 58c20004c57..2d3933ee7eb 100644
--- a/src/test/isolation2/expected/aoco_column_rewrite.out
+++ b/src/test/isolation2/expected/aoco_column_rewrite.out
@@ -9,3 +9,1709 @@
* So when we cherry-pick the commit 20f39c76f3dd03b0feb8b05011c1e0637df6c77e, please
* also bring the changes in 17b0aac07bebc771b2d3a32bbd22cc8318201f57, or we will lost it.
*/
+--------------------------------------------------------------------------------
+-- Tests for various scenarios with the column rewrite optimization
+-- for AT on AOCO tables
+--------------------------------------------------------------------------------
+
+PREPARE attribute_encoding_check AS SELECT c.relname, a.attname, e.filenum, e.attoptions FROM pg_attribute_encoding e, pg_class c, pg_attribute a WHERE e.attrelid = c.oid AND e.attnum = a.attnum and e.attrelid = a.attrelid AND c.relname LIKE $1;
+PREPARE
+
+CREATE TABLE if not exists relfilenodecheck(segid int, relname text, relfilenodebefore int, relfilenodeafter int, casename text);
+CREATE TABLE
+
+PREPARE capturerelfilenodebefore AS INSERT INTO relfilenodecheck SELECT -1 segid, relname, pg_relation_filenode(relname::text) as relfilenode, NULL::int, $1 as casename FROM pg_class WHERE relname LIKE $2 UNION SELECT gp_segment_id segid, relname, pg_relation_filenode(relname::text) as relfilenode, NULL::int, $1 as casename FROM gp_dist_random('pg_class') WHERE relname LIKE $2 ORDER BY segid;
+PREPARE
+
+PREPARE checkrelfilenodediff AS SELECT a.segid, b.casename, b.relname, (relfilenodebefore != a.relfilenode) rewritten FROM ( SELECT -1 segid, relname, pg_relation_filenode(relname::text) as relfilenode FROM pg_class WHERE relname LIKE $2 UNION SELECT gp_segment_id segid, relname, pg_relation_filenode(relname::text) as relfilenode FROM gp_dist_random('pg_class') WHERE relname LIKE $2 ORDER BY segid )a, relfilenodecheck b WHERE b.casename LIKE $1 and b.relname LIKE $2 and a.segid = b.segid;
+PREPARE
+
+--------------------------------------------------------------------------------
+-- Test if ALTER COLUMN TYPE and ADD COLUMN on AOCO doesn't rewrite the entire table
+--------------------------------------------------------------------------------
+
+CREATE TABLE alter_type_aoco(a int, b int, c int) using ao_column;
+CREATE TABLE
+INSERT INTO alter_type_aoco VALUES (20,1,2);
+INSERT 0 1
+EXECUTE attribute_encoding_check ('alter_type_aoco');
+ relname | attname | filenum | attoptions
+-----------------+---------+---------+-------------------------------------------------------------
+ alter_type_aoco | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_type_aoco | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_type_aoco | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(3 rows)
+SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco') ORDER BY segment_id, column_num;
+ segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state
+------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+-------
+ 1 | 1 | 0 | 1 | 1 | 48 | 48 | 1 | 3 | 1
+ 1 | 1 | 1 | 129 | 1 | 48 | 48 | 1 | 3 | 1
+ 1 | 1 | 2 | 257 | 1 | 48 | 48 | 1 | 3 | 1
+(3 rows)
+EXECUTE capturerelfilenodebefore ('alter_column', 'alter_type_aoco');
+INSERT 0 4
+SELECT * FROM alter_type_aoco;
+ a | b | c
+----+---+---
+ 20 | 1 | 2
+(1 row)
+
+ALTER TABLE alter_type_aoco ALTER COLUMN b TYPE text;
+ALTER TABLE
+
+EXECUTE attribute_encoding_check ('alter_type_aoco');
+ relname | attname | filenum | attoptions
+-----------------+---------+---------+-------------------------------------------------------------
+ alter_type_aoco | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_type_aoco | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_type_aoco | b | 1602 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(3 rows)
+SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco') ORDER BY segment_id, column_num;
+ segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state
+------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+-------
+ 1 | 1 | 0 | 1 | 1 | 48 | 48 | 2 | 3 | 1
+ 1 | 1 | 1 | 204929 | 1 | 48 | 48 | 2 | 3 | 1
+ 1 | 1 | 2 | 257 | 1 | 48 | 48 | 2 | 3 | 1
+(3 rows)
+EXECUTE checkrelfilenodediff ('alter_column', 'alter_type_aoco');
+ segid | casename | relname | rewritten
+-------+--------------+-----------------+-----------
+ 0 | alter_column | alter_type_aoco | f
+ 1 | alter_column | alter_type_aoco | f
+ -1 | alter_column | alter_type_aoco | f
+ 2 | alter_column | alter_type_aoco | f
+(4 rows)
+-- data is intact
+SELECT * FROM alter_type_aoco;
+ a | b | c
+----+---+---
+ 20 | 1 | 2
+(1 row)
+INSERT INTO alter_type_aoco VALUES (20,'1',2);
+INSERT 0 1
+-- data is intact
+SELECT * FROM alter_type_aoco;
+ a | b | c
+----+---+---
+ 20 | 1 | 2
+ 20 | 1 | 2
+(2 rows)
+
+ALTER TABLE alter_type_aoco ADD COLUMN d int;
+ALTER TABLE
+
+INSERT INTO alter_type_aoco VALUES (20,'1',2, 3);
+INSERT 0 1
+-- check if we chose correct filenum for newly added column
+EXECUTE attribute_encoding_check ('alter_type_aoco');
+ relname | attname | filenum | attoptions
+-----------------+---------+---------+-------------------------------------------------------------
+ alter_type_aoco | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_type_aoco | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_type_aoco | b | 1602 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_type_aoco | d | 4 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(4 rows)
+SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco') ORDER BY segment_id, column_num;
+ segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state
+------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+-------
+ 1 | 1 | 0 | 1 | 3 | 144 | 144 | 5 | 3 | 1
+ 1 | 1 | 1 | 204929 | 3 | 144 | 144 | 5 | 3 | 1
+ 1 | 1 | 2 | 257 | 3 | 144 | 144 | 5 | 3 | 1
+ 1 | 1 | 3 | 385 | 3 | 144 | 144 | 5 | 3 | 1
+(4 rows)
+DROP TABLE alter_type_aoco;
+DROP TABLE
+CHECKPOINT;
+CHECKPOINT
+-- check if all files are dropped correctly
+SELECT * FROM gp_toolkit.gp_check_orphaned_files WHERE split_part(filename,'.',1) = (SELECT oid::text FROM pg_class WHERE relname = 'alter_type_aoco');
+ gp_segment_id | tablespace | filename
+---------------+------------+----------
+(0 rows)
+
+--------------------------------------------------------------------------------
+-- Test if column rewrite handles deleted rows in blockdirectory correctly for
+-- more than 1 minipage
+
+-- We create a table and its blkdir and insert enough data to have more than one
+-- minipage in the block directory, and check if the column rewrite rewrites the
+-- blockdirectory correctly
+--------------------------------------------------------------------------------
+CREATE TABLE alter_type_aoco_delete(a int, b int, c int) USING ao_column;
+CREATE TABLE
+CREATE INDEX at_aoco_idx on alter_type_aoco_delete(c);
+CREATE INDEX
+INSERT INTO alter_type_aoco_delete SELECT 1,i,i FROM generate_series(1,10000)i;
+INSERT 0 10000
+DELETE FROM alter_type_aoco_delete WHERE b%3 = 1;
+DELETE 3334
+EXECUTE capturerelfilenodebefore ('alter_column', 'alter_type_aoco_delete');
+INSERT 0 4
+SELECT count(*) FROM alter_type_aoco_delete;
+ count
+-------
+ 6666
+(1 row)
+
+-- test both ALTER COLUMN TYPE and ALTER COLUMN SET ENCODING together
+ALTER TABLE alter_type_aoco_delete ALTER COLUMN b TYPE text, ALTER COLUMN c SET ENCODING (compresstype=rle_type, compresslevel=4);
+ALTER TABLE
+
+EXECUTE attribute_encoding_check ('alter_type_aoco_delete');
+ relname | attname | filenum | attoptions
+------------------------+---------+---------+-----------------------------------------------------------------
+ alter_type_aoco_delete | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_type_aoco_delete | c | 1603 | ['compresstype=rle_type', 'blocksize=32768', 'compresslevel=4']
+ alter_type_aoco_delete | b | 1602 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(3 rows)
+SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco_delete') ORDER BY segment_id, column_num;
+ segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state
+------------+-------+------------+----------------+----------+-------+------------------+----------+---------------+-------
+ 1 | 1 | 0 | 1 | 10000 | 40088 | 40088 | 3 | 3 | 1
+ 1 | 1 | 1 | 204929 | 10000 | 48984 | 48984 | 3 | 3 | 1
+ 1 | 1 | 2 | 205057 | 10000 | 88 | 40047 | 3 | 3 | 1
+(3 rows)
+SELECT gp_segment_id, (gp_toolkit.__gp_aoblkdir('alter_type_aoco_delete')).* FROM gp_dist_random('gp_id');
+ gp_segment_id | tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count
+---------------+---------+-------+----------------+----------+--------------+-------------+-----------
+ 1 | (0,1) | 1 | 0 | 0 | 1 | 0 | 8181
+ 1 | (0,1) | 1 | 0 | 1 | 8182 | 32768 | 1819
+ 1 | (0,4) | 1 | 1 | 0 | 1 | 0 | 6766
+ 1 | (0,4) | 1 | 1 | 1 | 6767 | 32768 | 3234
+ 1 | (0,5) | 1 | 2 | 0 | 1 | 0 | 10000
+(5 rows)
+EXECUTE checkrelfilenodediff ('alter_column', 'alter_type_aoco_delete');
+ segid | casename | relname | rewritten
+-------+--------------+------------------------+-----------
+ 2 | alter_column | alter_type_aoco_delete | f
+ 0 | alter_column | alter_type_aoco_delete | f
+ 1 | alter_column | alter_type_aoco_delete | f
+ -1 | alter_column | alter_type_aoco_delete | f
+(4 rows)
+SELECT count(b) FROM alter_type_aoco_delete;
+ count
+-------
+ 6666
+(1 row)
+SELECT count(*) FROM alter_type_aoco_delete;
+ count
+-------
+ 6666
+(1 row)
+
+--------------------------------------------------------------------------------
+-- Test if column rewrite handles blockdirectory and visimap
+-- for deleted rows correctly with multiple blocks in same segfile
+
+-- Here, we insert data into two different blocks and delete all rows from first
+-- block. We test if that block is still replicated in the rewritten col
+--------------------------------------------------------------------------------
+CREATE TABLE alter_type_aoco_delete1(a int, b int, c int) USING ao_column;
+CREATE TABLE
+CREATE INDEX at_aoco_idx1 on alter_type_aoco_delete1(c);
+CREATE INDEX
+INSERT INTO alter_type_aoco_delete1 VALUES (1,2,2);
+INSERT 0 1
+INSERT INTO alter_type_aoco_delete1 VALUES (1,3,3);
+INSERT 0 1
+DELETE FROM alter_type_aoco_delete1 WHERE b = 2;
+DELETE 1
+EXECUTE attribute_encoding_check ('alter_type_aoco_delete1');
+ relname | attname | filenum | attoptions
+-------------------------+---------+---------+-------------------------------------------------------------
+ alter_type_aoco_delete1 | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_type_aoco_delete1 | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_type_aoco_delete1 | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(3 rows)
+SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco_delete1') ORDER BY segment_id, column_num;
+ segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state
+------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+-------
+ 1 | 1 | 0 | 1 | 2 | 96 | 96 | 3 | 3 | 1
+ 1 | 1 | 1 | 129 | 2 | 96 | 96 | 3 | 3 | 1
+ 1 | 1 | 2 | 257 | 2 | 96 | 96 | 3 | 3 | 1
+(3 rows)
+SELECT (gp_toolkit.__gp_aovisimap('alter_type_aoco_delete1')).* FROM gp_dist_random('gp_id');
+ tid | segno | row_num
+--------------+-------+---------
+ (33554432,2) | 1 | 1
+(1 row)
+SELECT gp_segment_id, (gp_toolkit.__gp_aoblkdir('alter_type_aoco_delete1')).* FROM gp_dist_random('gp_id');
+ gp_segment_id | tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count
+---------------+---------+-------+----------------+----------+--------------+-------------+-----------
+ 1 | (0,4) | 1 | 0 | 0 | 1 | 0 | 1
+ 1 | (0,4) | 1 | 0 | 1 | 101 | 48 | 1
+ 1 | (0,5) | 1 | 1 | 0 | 1 | 0 | 1
+ 1 | (0,5) | 1 | 1 | 1 | 101 | 48 | 1
+ 1 | (0,6) | 1 | 2 | 0 | 1 | 0 | 1
+ 1 | (0,6) | 1 | 2 | 1 | 101 | 48 | 1
+(6 rows)
+EXECUTE capturerelfilenodebefore ('alter_column', 'alter_type_aoco_delete1');
+INSERT 0 4
+SELECT * FROM alter_type_aoco_delete1;
+ a | b | c
+---+---+---
+ 1 | 3 | 3
+(1 row)
+
+-- test both ALTER COLUMN TYPE and ALTER COLUMN SET ENCODING together
+ALTER TABLE alter_type_aoco_delete1 ALTER COLUMN b TYPE text, ALTER COLUMN c SET ENCODING (compresstype=rle_type, compresslevel=4);
+ALTER TABLE
+
+EXECUTE attribute_encoding_check ('alter_type_aoco_delete1');
+ relname | attname | filenum | attoptions
+-------------------------+---------+---------+-----------------------------------------------------------------
+ alter_type_aoco_delete1 | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_type_aoco_delete1 | c | 1603 | ['compresstype=rle_type', 'blocksize=32768', 'compresslevel=4']
+ alter_type_aoco_delete1 | b | 1602 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(3 rows)
+SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco_delete1') ORDER BY segment_id, column_num;
+ segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state
+------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+-------
+ 1 | 1 | 0 | 1 | 2 | 96 | 96 | 4 | 3 | 1
+ 1 | 1 | 1 | 204929 | 2 | 96 | 96 | 4 | 3 | 1
+ 1 | 1 | 2 | 205057 | 2 | 96 | 96 | 4 | 3 | 1
+(3 rows)
+SELECT (gp_toolkit.__gp_aovisimap('alter_type_aoco_delete1')).* FROM gp_dist_random('gp_id');
+ tid | segno | row_num
+--------------+-------+---------
+ (33554432,2) | 1 | 1
+(1 row)
+SELECT gp_segment_id, (gp_toolkit.__gp_aoblkdir('alter_type_aoco_delete1')).* FROM gp_dist_random('gp_id');
+ gp_segment_id | tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count
+---------------+---------+-------+----------------+----------+--------------+-------------+-----------
+ 1 | (0,4) | 1 | 0 | 0 | 1 | 0 | 1
+ 1 | (0,4) | 1 | 0 | 1 | 101 | 48 | 1
+ 1 | (0,7) | 1 | 1 | 0 | 1 | 0 | 1
+ 1 | (0,7) | 1 | 1 | 1 | 101 | 48 | 1
+ 1 | (0,8) | 1 | 2 | 0 | 1 | 0 | 1
+ 1 | (0,8) | 1 | 2 | 1 | 101 | 48 | 1
+(6 rows)
+EXECUTE checkrelfilenodediff ('alter_column', 'alter_type_aoco_delete1');
+ segid | casename | relname | rewritten
+-------+--------------+-------------------------+-----------
+ 2 | alter_column | alter_type_aoco_delete1 | f
+ -1 | alter_column | alter_type_aoco_delete1 | f
+ 0 | alter_column | alter_type_aoco_delete1 | f
+ 1 | alter_column | alter_type_aoco_delete1 | f
+(4 rows)
+SELECT b FROM alter_type_aoco_delete1;
+ b
+---
+ 3
+(1 row)
+SELECT * FROM alter_type_aoco_delete1;
+ a | b | c
+---+---+---
+ 1 | 3 | 3
+(1 row)
+
+--------------------------------------------------------------------------------
+-- Test if column rewrite handles blockdirectory and visimap
+-- for deleted rows correctly with multiple blocks in same segfile
+
+-- Here, we insert data into two different blocks and delete all rows from second
+-- block. We test if that block is still replicated in the rewritten col
+--------------------------------------------------------------------------------
+CREATE TABLE alter_type_aoco_delete2(a int, b int, c int) USING ao_column;
+CREATE TABLE
+CREATE INDEX at_aoco_idx2 on alter_type_aoco_delete2(c);
+CREATE INDEX
+INSERT INTO alter_type_aoco_delete2 VALUES (1,2,2);
+INSERT 0 1
+INSERT INTO alter_type_aoco_delete2 VALUES (1,3,3);
+INSERT 0 1
+DELETE FROM alter_type_aoco_delete2 WHERE b = 3;
+DELETE 1
+EXECUTE attribute_encoding_check ('alter_type_aoco_delete2');
+ relname | attname | filenum | attoptions
+-------------------------+---------+---------+-------------------------------------------------------------
+ alter_type_aoco_delete2 | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_type_aoco_delete2 | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_type_aoco_delete2 | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(3 rows)
+SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco_delete2') ORDER BY segment_id, column_num;
+ segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state
+------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+-------
+ 1 | 1 | 0 | 1 | 2 | 96 | 96 | 3 | 3 | 1
+ 1 | 1 | 1 | 129 | 2 | 96 | 96 | 3 | 3 | 1
+ 1 | 1 | 2 | 257 | 2 | 96 | 96 | 3 | 3 | 1
+(3 rows)
+SELECT (gp_toolkit.__gp_aovisimap('alter_type_aoco_delete2')).* FROM gp_dist_random('gp_id');
+ tid | segno | row_num
+----------------+-------+---------
+ (33554432,102) | 1 | 101
+(1 row)
+SELECT gp_segment_id, (gp_toolkit.__gp_aoblkdir('alter_type_aoco_delete2')).* FROM gp_dist_random('gp_id');
+ gp_segment_id | tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count
+---------------+---------+-------+----------------+----------+--------------+-------------+-----------
+ 1 | (0,4) | 1 | 0 | 0 | 1 | 0 | 1
+ 1 | (0,4) | 1 | 0 | 1 | 101 | 48 | 1
+ 1 | (0,5) | 1 | 1 | 0 | 1 | 0 | 1
+ 1 | (0,5) | 1 | 1 | 1 | 101 | 48 | 1
+ 1 | (0,6) | 1 | 2 | 0 | 1 | 0 | 1
+ 1 | (0,6) | 1 | 2 | 1 | 101 | 48 | 1
+(6 rows)
+EXECUTE capturerelfilenodebefore ('alter_column', 'alter_type_aoco_delete2');
+INSERT 0 4
+SELECT * FROM alter_type_aoco_delete2;
+ a | b | c
+---+---+---
+ 1 | 2 | 2
+(1 row)
+
+-- test both ALTER COLUMN TYPE and ALTER COLUMN SET ENCODING together
+ALTER TABLE alter_type_aoco_delete2 ALTER COLUMN b TYPE text, ALTER COLUMN c SET ENCODING (compresstype=rle_type, compresslevel=4);
+ALTER TABLE
+
+EXECUTE attribute_encoding_check ('alter_type_aoco_delete2');
+ relname | attname | filenum | attoptions
+-------------------------+---------+---------+-----------------------------------------------------------------
+ alter_type_aoco_delete2 | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_type_aoco_delete2 | c | 1603 | ['compresstype=rle_type', 'blocksize=32768', 'compresslevel=4']
+ alter_type_aoco_delete2 | b | 1602 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(3 rows)
+SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco_delete2') ORDER BY segment_id, column_num;
+ segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state
+------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+-------
+ 1 | 1 | 0 | 1 | 2 | 96 | 96 | 4 | 3 | 1
+ 1 | 1 | 1 | 204929 | 2 | 96 | 96 | 4 | 3 | 1
+ 1 | 1 | 2 | 205057 | 2 | 96 | 96 | 4 | 3 | 1
+(3 rows)
+SELECT (gp_toolkit.__gp_aovisimap('alter_type_aoco_delete2')).* FROM gp_dist_random('gp_id');
+ tid | segno | row_num
+----------------+-------+---------
+ (33554432,102) | 1 | 101
+(1 row)
+SELECT gp_segment_id, (gp_toolkit.__gp_aoblkdir('alter_type_aoco_delete2')).* FROM gp_dist_random('gp_id');
+ gp_segment_id | tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count
+---------------+---------+-------+----------------+----------+--------------+-------------+-----------
+ 1 | (0,4) | 1 | 0 | 0 | 1 | 0 | 1
+ 1 | (0,4) | 1 | 0 | 1 | 101 | 48 | 1
+ 1 | (0,7) | 1 | 1 | 0 | 1 | 0 | 1
+ 1 | (0,7) | 1 | 1 | 1 | 101 | 48 | 1
+ 1 | (0,8) | 1 | 2 | 0 | 1 | 0 | 1
+ 1 | (0,8) | 1 | 2 | 1 | 101 | 48 | 1
+(6 rows)
+EXECUTE checkrelfilenodediff ('alter_column', 'alter_type_aoco_delete2');
+ segid | casename | relname | rewritten
+-------+--------------+-------------------------+-----------
+ 0 | alter_column | alter_type_aoco_delete2 | f
+ 1 | alter_column | alter_type_aoco_delete2 | f
+ -1 | alter_column | alter_type_aoco_delete2 | f
+ 2 | alter_column | alter_type_aoco_delete2 | f
+(4 rows)
+SELECT b FROM alter_type_aoco_delete2;
+ b
+---
+ 2
+(1 row)
+SELECT * FROM alter_type_aoco_delete2;
+ a | b | c
+---+---+---
+ 1 | 2 | 2
+(1 row)
+
+--------------------------------------------------------------------------------
+-- Test if AT ALTER COLUMN TYPE works fine when we need a full table rewrite.
+
+-- We perform a AT subcmd which requires a full table rewrite, and check results
+-- for the AT ALTER COLUMN TYPE after the table is fully rewritten
+--------------------------------------------------------------------------------
+
+
+CREATE TABLE alter_type_aoco_fullrewrite(a int, b int, c int) using ao_column;
+CREATE TABLE
+INSERT INTO alter_type_aoco_fullrewrite VALUES (20,1,2);
+INSERT 0 1
+EXECUTE attribute_encoding_check ('alter_type_aoco_fullrewrite');
+ relname | attname | filenum | attoptions
+-----------------------------+---------+---------+-------------------------------------------------------------
+ alter_type_aoco_fullrewrite | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_type_aoco_fullrewrite | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_type_aoco_fullrewrite | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(3 rows)
+SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco_fullrewrite') ORDER BY segment_id, column_num;
+ segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state
+------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+-------
+ 1 | 1 | 0 | 1 | 1 | 48 | 48 | 1 | 3 | 1
+ 1 | 1 | 1 | 129 | 1 | 48 | 48 | 1 | 3 | 1
+ 1 | 1 | 2 | 257 | 1 | 48 | 48 | 1 | 3 | 1
+(3 rows)
+EXECUTE capturerelfilenodebefore ('alter_column', 'alter_type_aoco_fullrewrite');
+INSERT 0 4
+SELECT * FROM alter_type_aoco_fullrewrite;
+ a | b | c
+----+---+---
+ 20 | 1 | 2
+(1 row)
+
+ALTER TABLE alter_type_aoco_fullrewrite ALTER COLUMN b TYPE text, SET UNLOGGED;
+ALTER TABLE
+
+EXECUTE attribute_encoding_check ('alter_type_aoco_fullrewrite');
+ relname | attname | filenum | attoptions
+-----------------------------+---------+---------+-------------------------------------------------------------
+ alter_type_aoco_fullrewrite | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_type_aoco_fullrewrite | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_type_aoco_fullrewrite | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(3 rows)
+SELECT * FROM gp_toolkit.__gp_aocsseg('alter_type_aoco_fullrewrite') ORDER BY segment_id, column_num;
+ segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state
+------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+-------
+ 1 | 0 | 0 | 0 | 1 | 48 | 48 | 1 | 3 | 1
+ 1 | 0 | 1 | 128 | 1 | 48 | 48 | 1 | 3 | 1
+ 1 | 0 | 2 | 256 | 1 | 48 | 48 | 1 | 3 | 1
+(3 rows)
+EXECUTE checkrelfilenodediff ('alter_column', 'alter_type_aoco_fullrewrite');
+ segid | casename | relname | rewritten
+-------+--------------+-----------------------------+-----------
+ 0 | alter_column | alter_type_aoco_fullrewrite | t
+ 1 | alter_column | alter_type_aoco_fullrewrite | t
+ 2 | alter_column | alter_type_aoco_fullrewrite | t
+ -1 | alter_column | alter_type_aoco_fullrewrite | t
+(4 rows)
+-- data is intact
+SELECT * FROM alter_type_aoco_fullrewrite;
+ a | b | c
+----+---+---
+ 20 | 1 | 2
+(1 row)
+INSERT INTO alter_type_aoco_fullrewrite VALUES (20,'1',2);
+INSERT 0 1
+-- data is intact
+SELECT * FROM alter_type_aoco_fullrewrite;
+ a | b | c
+----+---+---
+ 20 | 1 | 2
+ 20 | 1 | 2
+(2 rows)
+
+--------------------------------------------------------------------------------
+-- Test if AT ALTER COLUMN TYPE reindexes rewrite-affected indexes
+
+-- We create indexes on columns and test if these indexes are rewritten
+-- when any of the columns are rewritten on which the indexes depend on
+-- but other indexes are unaffected
+--------------------------------------------------------------------------------
+
+CREATE TABLE alter_type_aoco(a int, b int, c int, d int) using ao_column;
+CREATE TABLE
+
+INSERT INTO alter_type_aoco VALUES (20, 1, 2, 3);
+INSERT 0 1
+
+CREATE UNIQUE INDEX idx1 on alter_type_aoco(a,b);
+CREATE INDEX
+CREATE INDEX idx2 on alter_type_aoco using btree(c);
+CREATE INDEX
+CREATE INDEX idx3 on alter_type_aoco using bitmap(a,b,c,d);
+CREATE INDEX
+
+EXECUTE capturerelfilenodebefore ('alter_column_b', 'idx1');
+INSERT 0 4
+EXECUTE capturerelfilenodebefore ('alter_column_b', 'idx2');
+INSERT 0 4
+EXECUTE capturerelfilenodebefore ('alter_column_b', 'idx3');
+INSERT 0 4
+
+ALTER TABLE alter_type_aoco ALTER COLUMN b TYPE text;
+ALTER TABLE
+
+EXECUTE checkrelfilenodediff ('alter_column_b', 'idx1');
+ segid | casename | relname | rewritten
+-------+----------------+---------+-----------
+ 2 | alter_column_b | idx1 | t
+ -1 | alter_column_b | idx1 | t
+ 0 | alter_column_b | idx1 | t
+ 1 | alter_column_b | idx1 | t
+(4 rows)
+EXECUTE checkrelfilenodediff ('alter_column_b', 'idx2');
+ segid | casename | relname | rewritten
+-------+----------------+---------+-----------
+ 2 | alter_column_b | idx2 | f
+ 0 | alter_column_b | idx2 | f
+ 1 | alter_column_b | idx2 | f
+ -1 | alter_column_b | idx2 | f
+(4 rows)
+EXECUTE checkrelfilenodediff ('alter_column_b', 'idx3');
+ segid | casename | relname | rewritten
+-------+----------------+---------+-----------
+ 2 | alter_column_b | idx3 | t
+ -1 | alter_column_b | idx3 | t
+ 0 | alter_column_b | idx3 | t
+ 1 | alter_column_b | idx3 | t
+(4 rows)
+INSERT INTO alter_type_aoco VALUES (20, '2', 3, 4);
+INSERT 0 1
+EXECUTE capturerelfilenodebefore ('alter_column_c', 'idx1');
+INSERT 0 4
+EXECUTE capturerelfilenodebefore ('alter_column_c', 'idx2');
+INSERT 0 4
+EXECUTE capturerelfilenodebefore ('alter_column_c', 'idx3');
+INSERT 0 4
+
+ALTER TABLE alter_type_aoco ALTER COLUMN c TYPE text;
+ALTER TABLE
+
+EXECUTE checkrelfilenodediff ('alter_column_c', 'idx1');
+ segid | casename | relname | rewritten
+-------+----------------+---------+-----------
+ -1 | alter_column_c | idx1 | f
+ 2 | alter_column_c | idx1 | f
+ 0 | alter_column_c | idx1 | f
+ 1 | alter_column_c | idx1 | f
+(4 rows)
+EXECUTE checkrelfilenodediff ('alter_column_c', 'idx2');
+ segid | casename | relname | rewritten
+-------+----------------+---------+-----------
+ 2 | alter_column_c | idx2 | t
+ 0 | alter_column_c | idx2 | t
+ 1 | alter_column_c | idx2 | t
+ -1 | alter_column_c | idx2 | t
+(4 rows)
+EXECUTE checkrelfilenodediff ('alter_column_c', 'idx3');
+ segid | casename | relname | rewritten
+-------+----------------+---------+-----------
+ 0 | alter_column_c | idx3 | t
+ 1 | alter_column_c | idx3 | t
+ -1 | alter_column_c | idx3 | t
+ 2 | alter_column_c | idx3 | t
+(4 rows)
+INSERT INTO alter_type_aoco VALUES (20, '3', '4', 5);
+INSERT 0 1
+EXECUTE capturerelfilenodebefore ('alter_column_d', 'idx1');
+INSERT 0 4
+EXECUTE capturerelfilenodebefore ('alter_column_d', 'idx2');
+INSERT 0 4
+EXECUTE capturerelfilenodebefore ('alter_column_d', 'idx3');
+INSERT 0 4
+
+ALTER TABLE alter_type_aoco ALTER COLUMN d TYPE text;
+ALTER TABLE
+
+EXECUTE checkrelfilenodediff ('alter_column_d', 'idx1');
+ segid | casename | relname | rewritten
+-------+----------------+---------+-----------
+ 2 | alter_column_d | idx1 | f
+ 0 | alter_column_d | idx1 | f
+ 1 | alter_column_d | idx1 | f
+ -1 | alter_column_d | idx1 | f
+(4 rows)
+EXECUTE checkrelfilenodediff ('alter_column_d', 'idx2');
+ segid | casename | relname | rewritten
+-------+----------------+---------+-----------
+ 0 | alter_column_d | idx2 | f
+ 1 | alter_column_d | idx2 | f
+ 2 | alter_column_d | idx2 | f
+ -1 | alter_column_d | idx2 | f
+(4 rows)
+EXECUTE checkrelfilenodediff ('alter_column_d', 'idx3');
+ segid | casename | relname | rewritten
+-------+----------------+---------+-----------
+ 0 | alter_column_d | idx3 | t
+ 1 | alter_column_d | idx3 | t
+ -1 | alter_column_d | idx3 | t
+ 2 | alter_column_d | idx3 | t
+(4 rows)
+INSERT INTO alter_type_aoco VALUES (20, '4', '5', '6');
+INSERT 0 1
+-- data is intact
+SELECT * FROM alter_type_aoco;
+ a | b | c | d
+----+---+---+---
+ 20 | 1 | 2 | 3
+ 20 | 2 | 3 | 4
+ 20 | 3 | 4 | 5
+ 20 | 4 | 5 | 6
+(4 rows)
+
+
+--------------------------------------------------------------------------------
+-- Test if AT ALTER COLUMN TYPE for partitioned table
+
+-- create 2 level partitions with same schema (regular case) and create index on some column
+-- alter column on the partition table root and on the partitions and we check rewrite status and data status
+-- filenum for partition roots
+--------------------------------------------------------------------------------
+CREATE TABLE part_alter_col(a int, b int, c int) PARTITION BY RANGE (A) (partition aa start (1) end (5) every (1)) USING ao_column;
+CREATE TABLE
+INSERT INTO part_alter_col VALUES (1,2,3);
+INSERT 0 1
+CREATE INDEX part_alter_col_idx1 on part_alter_col(b);
+CREATE INDEX
+CREATE INDEX part_alter_col_idx2 on part_alter_col(c);
+CREATE INDEX
+EXECUTE capturerelfilenodebefore ('alter_column_b', 'part_alter_col_1_prt_aa_1');
+INSERT 0 4
+EXECUTE capturerelfilenodebefore ('alter_column_b', 'part_alter_col_1_prt_aa_1_b_idx');
+INSERT 0 4
+EXECUTE capturerelfilenodebefore ('alter_column_b', 'part_alter_col_1_prt_aa_1_c_idx');
+INSERT 0 4
+EXECUTE attribute_encoding_check ('part_alter_col');
+ relname | attname | filenum | attoptions
+----------------+---------+---------+-------------------------------------------------------------
+ part_alter_col | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ part_alter_col | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ part_alter_col | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(3 rows)
+
+ALTER TABLE part_alter_col ALTER COLUMN b TYPE text;
+ALTER TABLE
+
+EXECUTE attribute_encoding_check ('part_alter_col');
+ relname | attname | filenum | attoptions
+----------------+---------+---------+-------------------------------------------------------------
+ part_alter_col | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ part_alter_col | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ part_alter_col | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(3 rows)
+EXECUTE checkrelfilenodediff ('alter_column_b', 'part_alter_col_1_prt_aa_1');
+ segid | casename | relname | rewritten
+-------+----------------+---------------------------+-----------
+ 2 | alter_column_b | part_alter_col_1_prt_aa_1 | f
+ -1 | alter_column_b | part_alter_col_1_prt_aa_1 | f
+ 0 | alter_column_b | part_alter_col_1_prt_aa_1 | f
+ 1 | alter_column_b | part_alter_col_1_prt_aa_1 | f
+(4 rows)
+EXECUTE checkrelfilenodediff ('alter_column_b', 'part_alter_col_1_prt_aa_1_b_idx');
+ segid | casename | relname | rewritten
+-------+----------------+---------------------------------+-----------
+ 2 | alter_column_b | part_alter_col_1_prt_aa_1_b_idx | t
+ -1 | alter_column_b | part_alter_col_1_prt_aa_1_b_idx | t
+ 0 | alter_column_b | part_alter_col_1_prt_aa_1_b_idx | t
+ 1 | alter_column_b | part_alter_col_1_prt_aa_1_b_idx | t
+(4 rows)
+EXECUTE checkrelfilenodediff ('alter_column_b', 'part_alter_col_1_prt_aa_1_c_idx');
+ segid | casename | relname | rewritten
+-------+----------------+---------------------------------+-----------
+ -1 | alter_column_b | part_alter_col_1_prt_aa_1_c_idx | f
+ 2 | alter_column_b | part_alter_col_1_prt_aa_1_c_idx | f
+ 0 | alter_column_b | part_alter_col_1_prt_aa_1_c_idx | f
+ 1 | alter_column_b | part_alter_col_1_prt_aa_1_c_idx | f
+(4 rows)
+SELECT * FROM part_alter_col;
+ a | b | c
+---+---+---
+ 1 | 2 | 3
+(1 row)
+DROP TABLE part_alter_col;
+DROP TABLE
+CHECKPOINT;
+CHECKPOINT
+-- check if all files are dropped correctly
+SELECT * FROM gp_toolkit.gp_check_orphaned_files WHERE split_part(filename,'.',1) = (SELECT oid::text FROM pg_class WHERE relname = 'part_alter_col');
+ gp_segment_id | tablespace | filename
+---------------+------------+----------
+(0 rows)
+--------------------------------------------------------------------------------
+-- Test if column rewrite works when AT ALTER COLUMN TYPE for a column
+-- and then alter it back to the original type
+
+-- Check reloptions, pg_attribute_encoding, visimap, blkdirectory alongside the rewrite
+--------------------------------------------------------------------------------
+CREATE TABLE alter_column_back(a int, b int ENCODING (compresstype='zlib', compresslevel=5), c int) using ao_column with (compresstype='zlib', compresslevel=2);
+CREATE TABLE
+INSERT INTO alter_column_back VALUES (1,2,3), (1,2,4), (1,2,5);
+INSERT 0 3
+CREATE INDEX alter_column_back_idx1 ON alter_column_back(a,c);
+CREATE INDEX
+DELETE FROM alter_column_back WHERE c=5;
+DELETE 1
+EXECUTE capturerelfilenodebefore ('alter_column', 'alter_column_back');
+INSERT 0 4
+SELECT atttypid::regtype FROM pg_attribute WHERE attrelid='alter_column_back'::regclass AND attname='b';
+ atttypid
+----------
+ integer
+(1 row)
+
+ALTER TABLE alter_column_back ALTER COLUMN b TYPE text;
+ALTER TABLE
+
+SELECT c.relname, c.reloptions FROM pg_class c WHERE c.relname LIKE 'alter_column_back';
+ relname | reloptions
+-------------------+------------------------------------------------------------------------------
+ alter_column_back | ['compresstype=zlib', 'compresslevel=2', 'blocksize=32768', 'checksum=true']
+(1 row)
+EXECUTE checkrelfilenodediff ('alter_column', 'alter_column_back');
+ segid | casename | relname | rewritten
+-------+--------------+-------------------+-----------
+ 2 | alter_column | alter_column_back | f
+ 0 | alter_column | alter_column_back | f
+ 1 | alter_column | alter_column_back | f
+ -1 | alter_column | alter_column_back | f
+(4 rows)
+SELECT atttypid::regtype FROM pg_attribute WHERE attrelid='alter_column_back'::regclass AND attname='b';
+ atttypid
+----------
+ text
+(1 row)
+INSERT INTO alter_column_back VALUES (1,'2',3);
+INSERT 0 1
+DELETE FROM alter_column_back where c=4;
+DELETE 1
+EXECUTE capturerelfilenodebefore ('alter_column_back', 'alter_column_back');
+INSERT 0 4
+
+ALTER TABLE alter_column_back ALTER COLUMN b TYPE int using b::int;
+ALTER TABLE
+
+SELECT c.relname, c.reloptions FROM pg_class c WHERE c.relname LIKE 'alter_column_back';
+ relname | reloptions
+-------------------+------------------------------------------------------------------------------
+ alter_column_back | ['compresstype=zlib', 'compresslevel=2', 'blocksize=32768', 'checksum=true']
+(1 row)
+EXECUTE attribute_encoding_check ('alter_column_back');
+ relname | attname | filenum | attoptions
+-------------------+---------+---------+-------------------------------------------------------------
+ alter_column_back | a | 1 | ['compresstype=zlib', 'compresslevel=2', 'blocksize=32768']
+ alter_column_back | c | 3 | ['compresstype=zlib', 'compresslevel=2', 'blocksize=32768']
+ alter_column_back | b | 2 | ['compresstype=zlib', 'compresslevel=5', 'blocksize=32768']
+(3 rows)
+SELECT (gp_toolkit.__gp_aovisimap('alter_column_back')).* FROM gp_dist_random('gp_id');
+ tid | segno | row_num
+--------------+-------+---------
+ (33554432,3) | 1 | 2
+ (33554432,4) | 1 | 3
+(2 rows)
+SELECT gp_segment_id, (gp_toolkit.__gp_aoblkdir('alter_column_back')).* FROM gp_dist_random('gp_id');
+ gp_segment_id | tupleid | segno | columngroup_no | entry_no | first_row_no | file_offset | row_count
+---------------+---------+-------+----------------+----------+--------------+-------------+-----------
+ 1 | (0,5) | 1 | 0 | 0 | 1 | 0 | 3
+ 1 | (0,5) | 1 | 0 | 1 | 101 | 48 | 1
+ 1 | (0,7) | 1 | 2 | 0 | 1 | 0 | 3
+ 1 | (0,7) | 1 | 2 | 1 | 101 | 48 | 1
+ 1 | (0,8) | 1 | 1 | 0 | 1 | 0 | 3
+ 1 | (0,8) | 1 | 1 | 1 | 101 | 48 | 1
+(6 rows)
+EXECUTE checkrelfilenodediff ('alter_column_back', 'alter_column_back');
+ segid | casename | relname | rewritten
+-------+-------------------+-------------------+-----------
+ 2 | alter_column_back | alter_column_back | f
+ 0 | alter_column_back | alter_column_back | f
+ 1 | alter_column_back | alter_column_back | f
+ -1 | alter_column_back | alter_column_back | f
+(4 rows)
+SELECT atttypid::regtype FROM pg_attribute WHERE attrelid='alter_column_back'::regclass AND attname='b';
+ atttypid
+----------
+ integer
+(1 row)
+SELECT * FROM alter_column_back;
+ a | b | c
+---+---+---
+ 1 | 2 | 3
+ 1 | 2 | 3
+(2 rows)
+DROP TABLE alter_column_back;
+DROP TABLE
+CHECKPOINT;
+CHECKPOINT
+-- check if all files are dropped correctly
+SELECT * FROM gp_toolkit.gp_check_orphaned_files WHERE split_part(filename,'.',1) = (SELECT oid::text FROM pg_class WHERE relname = 'alter_column_back');
+ gp_segment_id | tablespace | filename
+---------------+------------+----------
+(0 rows)
+
+--------------------------------------------------------------------------------
+-- Test if ALTER COLUMN TYPE and SET ACCESS METHOD can be done in the same command
+-- Verify if we rewrite the table
+--------------------------------------------------------------------------------
+CREATE TABLE alter_column_set_am(a int, b int, c int) using ao_column;
+CREATE TABLE
+INSERT INTO alter_column_set_am VALUES (1,2,3);
+INSERT 0 1
+EXECUTE capturerelfilenodebefore ('alter_column_set_am_aorow', 'alter_column_set_am');
+INSERT 0 4
+EXECUTE attribute_encoding_check ('alter_column_set_am');
+ relname | attname | filenum | attoptions
+---------------------+---------+---------+-------------------------------------------------------------
+ alter_column_set_am | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_column_set_am | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_column_set_am | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(3 rows)
+
+ALTER TABLE alter_column_set_am SET ACCESS METHOD ao_row, ALTER COLUMN b TYPE text;
+ALTER TABLE
+
+EXECUTE attribute_encoding_check ('alter_column_set_am');
+ relname | attname | filenum | attoptions
+---------+---------+---------+------------
+(0 rows)
+EXECUTE checkrelfilenodediff ('alter_column_set_am_aorow', 'alter_column_set_am');
+ segid | casename | relname | rewritten
+-------+---------------------------+---------------------+-----------
+ -1 | alter_column_set_am_aorow | alter_column_set_am | t
+ 2 | alter_column_set_am_aorow | alter_column_set_am | t
+ 0 | alter_column_set_am_aorow | alter_column_set_am | t
+ 1 | alter_column_set_am_aorow | alter_column_set_am | t
+(4 rows)
+SELECT * FROM alter_column_set_am;
+ a | b | c
+---+---+---
+ 1 | 2 | 3
+(1 row)
+INSERT INTO alter_column_set_am VALUES (1,'2',3);
+INSERT 0 1
+EXECUTE capturerelfilenodebefore ('alter_column_set_am_aocol', 'alter_column_set_am');
+INSERT 0 4
+EXECUTE attribute_encoding_check ('alter_column_set_am');
+ relname | attname | filenum | attoptions
+---------+---------+---------+------------
+(0 rows)
+
+ALTER TABLE alter_column_set_am SET ACCESS METHOD ao_column, ALTER COLUMN c TYPE text;
+ALTER TABLE
+
+EXECUTE attribute_encoding_check ('alter_column_set_am');
+ relname | attname | filenum | attoptions
+---------------------+---------+---------+-------------------------------------------------------------
+ alter_column_set_am | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_column_set_am | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_column_set_am | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(3 rows)
+EXECUTE checkrelfilenodediff ('alter_column_set_am_aocol', 'alter_column_set_am');
+ segid | casename | relname | rewritten
+-------+---------------------------+---------------------+-----------
+ -1 | alter_column_set_am_aocol | alter_column_set_am | t
+ 2 | alter_column_set_am_aocol | alter_column_set_am | t
+ 0 | alter_column_set_am_aocol | alter_column_set_am | t
+ 1 | alter_column_set_am_aocol | alter_column_set_am | t
+(4 rows)
+SELECT * FROM alter_column_set_am;
+ a | b | c
+---+---+---
+ 1 | 2 | 3
+ 1 | 2 | 3
+(2 rows)
+
+--------------------------------------------------------------------------------
+-- Test if ALTER COLUMN TYPE and ADD COLUMN can be done in the same command
+-- Verify if we don't rewrite the table
+--------------------------------------------------------------------------------
+CREATE TABLE alter_column_add_col(a int, b int, c int) using ao_column;
+CREATE TABLE
+INSERT INTO alter_column_add_col VALUES (1,2,3);
+INSERT 0 1
+EXECUTE capturerelfilenodebefore ('alter_col_add_col', 'alter_column_add_col');
+INSERT 0 4
+EXECUTE attribute_encoding_check ('alter_column_add_col');
+ relname | attname | filenum | attoptions
+----------------------+---------+---------+-------------------------------------------------------------
+ alter_column_add_col | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_column_add_col | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_column_add_col | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(3 rows)
+
+ALTER TABLE alter_column_add_col ADD COLUMN d int, ALTER COLUMN b TYPE text;
+ALTER TABLE
+
+EXECUTE attribute_encoding_check ('alter_column_add_col');
+ relname | attname | filenum | attoptions
+----------------------+---------+---------+-------------------------------------------------------------
+ alter_column_add_col | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_column_add_col | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_column_add_col | b | 1602 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_column_add_col | d | 4 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(4 rows)
+EXECUTE checkrelfilenodediff ('alter_column_add_col', 'alter_column_add_col');
+ segid | casename | relname | rewritten
+-------+----------+---------+-----------
+(0 rows)
+SELECT * FROM alter_column_add_col;
+ a | b | c | d
+---+---+---+---
+ 1 | 2 | 3 |
+(1 row)
+INSERT INTO alter_column_add_col VALUES (1,'2',3, 4);
+INSERT 0 1
+SELECT * FROM alter_column_add_col;
+ a | b | c | d
+---+---+---+---
+ 1 | 2 | 3 |
+ 1 | 2 | 3 | 4
+(2 rows)
+
+--------------------------------------------------------------------------------
+-- Test if ALTER COLUMN TYPE and other AT commands can be done in the same command
+-- Verify if we rewrite the table
+--------------------------------------------------------------------------------
+CREATE TABLE alter_column_other(a int, b int, c int) using ao_column;
+CREATE TABLE
+INSERT INTO alter_column_other VALUES (1,2,3);
+INSERT 0 1
+EXECUTE capturerelfilenodebefore ('alter_column_other', 'alter_column_other');
+INSERT 0 4
+EXECUTE attribute_encoding_check ('alter_column_other');
+ relname | attname | filenum | attoptions
+--------------------+---------+---------+-------------------------------------------------------------
+ alter_column_other | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_column_other | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_column_other | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(3 rows)
+
+ALTER TABLE alter_column_other ALTER COLUMN b TYPE text, ALTER COLUMN c SET DEFAULT 5;
+ALTER TABLE
+
+EXECUTE attribute_encoding_check ('alter_column_other');
+ relname | attname | filenum | attoptions
+--------------------+---------+---------+-------------------------------------------------------------
+ alter_column_other | b | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_column_other | c | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ alter_column_other | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(3 rows)
+EXECUTE checkrelfilenodediff ('alter_column_other', 'alter_column_other');
+ segid | casename | relname | rewritten
+-------+--------------------+--------------------+-----------
+ 2 | alter_column_other | alter_column_other | t
+ -1 | alter_column_other | alter_column_other | t
+ 0 | alter_column_other | alter_column_other | t
+ 1 | alter_column_other | alter_column_other | t
+(4 rows)
+SELECT * FROM alter_column_other;
+ a | b | c
+---+---+---
+ 1 | 2 | 3
+(1 row)
+INSERT INTO alter_column_other VALUES (1,'2');
+INSERT 0 1
+SELECT * FROM alter_column_other;
+ a | b | c
+---+---+---
+ 1 | 2 | 3
+ 1 | 2 | 5
+(2 rows)
+
+--------------------------------------------------------------------------------
+-- Test if column rewrite works after vacuum on deleted rows
+--------------------------------------------------------------------------------
+CREATE TABLE alter_column_vacuum(a int, b int) using ao_column;
+CREATE TABLE
+INSERT INTO alter_column_vacuum SELECT 1,i FROM generate_series(1,1000)i;
+INSERT 0 1000
+DELETE FROM alter_column_vacuum WHERE b>10;
+DELETE 990
+VACUUM alter_column_vacuum;
+VACUUM
+SELECT * FROM gp_toolkit.__gp_aocsseg('alter_column_vacuum');
+ segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state
+------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+-------
+ 1 | 1 | 0 | 1 | 0 | 0 | 0 | 2 | 3 | 1
+ 1 | 1 | 1 | 129 | 0 | 0 | 0 | 2 | 3 | 1
+ 1 | 2 | 0 | 2 | 10 | 80 | 80 | 0 | 3 | 1
+ 1 | 2 | 1 | 130 | 10 | 80 | 80 | 0 | 3 | 1
+(4 rows)
+-- should succeed
+ALTER TABLE alter_column_vacuum ALTER COLUMN b TYPE text;
+ALTER TABLE
+SELECT * FROM gp_toolkit.__gp_aocsseg('alter_column_vacuum');
+ segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state
+------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+-------
+ 1 | 1 | 0 | 1 | 0 | 0 | 0 | 2 | 3 | 1
+ 1 | 1 | 1 | 204929 | 0 | 0 | 0 | 2 | 3 | 1
+ 1 | 2 | 0 | 2 | 10 | 80 | 80 | 1 | 3 | 1
+ 1 | 2 | 1 | 204930 | 10 | 64 | 64 | 1 | 3 | 1
+(4 rows)
+
+--------------------------------------------------------------------------------
+-- Test if ALTER COLUMN TYPE works correctly when constraints are involved
+--------------------------------------------------------------------------------
+CREATE TABLE alter_column_constraints(a int, b int check (b > 0)) USING ao_column;
+CREATE TABLE
+INSERT INTO alter_column_constraints SELECT i,i FROM generate_series(1,10)i;
+INSERT 0 10
+-- should error
+ALTER TABLE alter_column_constraints ALTER COLUMN b TYPE text;
+ERROR: operator does not exist: text > integer
+HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
+-- should succeed, and constraint remains
+EXECUTE capturerelfilenodebefore ('alter_column_constraints_col_rewrite', 'alter_column_constraints');
+INSERT 0 4
+ALTER TABLE alter_column_constraints ALTER COLUMN b TYPE bigint;
+ALTER TABLE
+EXECUTE checkrelfilenodediff ('alter_column_constraints_col_rewrite', 'alter_column_constraints');
+ segid | casename | relname | rewritten
+-------+--------------------------------------+--------------------------+-----------
+ 2 | alter_column_constraints_col_rewrite | alter_column_constraints | f
+ -1 | alter_column_constraints_col_rewrite | alter_column_constraints | f
+ 0 | alter_column_constraints_col_rewrite | alter_column_constraints | f
+ 1 | alter_column_constraints_col_rewrite | alter_column_constraints | f
+(4 rows)
+
+EXECUTE capturerelfilenodebefore ('alter_column_constraints_fullrewrite', 'alter_column_constraints');
+INSERT 0 4
+-- should succeed and relfile changed (not using the column rewrite optimization because there's other command)
+ALTER TABLE alter_column_constraints ADD CONSTRAINT checkb2 CHECK (b < 100), ALTER COLUMN b TYPE int;
+ALTER TABLE
+EXECUTE checkrelfilenodediff ('alter_column_constraints_fullrewrite', 'alter_column_constraints');
+ segid | casename | relname | rewritten
+-------+--------------------------------------+--------------------------+-----------
+ -1 | alter_column_constraints_fullrewrite | alter_column_constraints | t
+ 0 | alter_column_constraints_fullrewrite | alter_column_constraints | t
+ 1 | alter_column_constraints_fullrewrite | alter_column_constraints | t
+ 2 | alter_column_constraints_fullrewrite | alter_column_constraints | t
+(4 rows)
+
+--------------------------------------------------------------------------------
+-- Test if ALTER COLUMN TYPE works correctly when seg0 has some data
+-- Check if we handle rewrite on seg0
+--------------------------------------------------------------------------------
+CREATE TABLE alter_column_seg0(a int, b int) USING ao_column;
+CREATE TABLE
+1: BEGIN;
+BEGIN
+1: ALTER TABLE alter_column_seg0 ADD COLUMN c int;
+ALTER TABLE
+1: INSERT INTO alter_column_seg0 SELECT 1,i,i FROM generate_series(1,10)i;
+INSERT 0 10
+1: COMMIT;
+COMMIT
+INSERT INTO alter_column_seg0 SELECT 1,i,i FROM generate_series(1,10)i;
+INSERT 0 10
+ALTER TABLE alter_column_seg0 ALTER COLUMN b TYPE text;
+ALTER TABLE
+SELECT count(*) FROM alter_column_seg0;
+ count
+-------
+ 20
+(1 row)
+SELECT * FROM gp_toolkit.__gp_aocsseg('alter_column_seg0');
+ segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state
+------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+-------
+ 1 | 0 | 0 | 0 | 10 | 80 | 80 | 2 | 3 | 1
+ 1 | 0 | 1 | 204928 | 10 | 64 | 64 | 2 | 3 | 1
+ 1 | 0 | 2 | 256 | 10 | 80 | 80 | 2 | 3 | 1
+ 1 | 1 | 0 | 1 | 10 | 80 | 80 | 2 | 3 | 1
+ 1 | 1 | 1 | 204929 | 10 | 64 | 64 | 2 | 3 | 1
+ 1 | 1 | 2 | 257 | 10 | 80 | 80 | 2 | 3 | 1
+(6 rows)
+DROP TABLE alter_column_seg0;
+DROP TABLE
+CHECKPOINT;
+CHECKPOINT
+SELECT * FROM gp_toolkit.gp_check_orphaned_files WHERE split_part(filename,'.',1) = (SELECT oid::text FROM pg_class WHERE relname = 'alter_column_seg0');
+ gp_segment_id | tablespace | filename
+---------------+------------+----------
+(0 rows)
+
+--------------------------------------------------------------------------------
+-- Test if ALTER COLUMN TYPE works correctly multiple segfiles are created
+-- due to multiple concurrency
+-- Check if we handle rewrite on each segfile correctly
+--------------------------------------------------------------------------------
+CREATE TABLE alter_column_multiple_concurrency(a int, b int) USING ao_column;
+CREATE TABLE
+1: BEGIN;
+BEGIN
+2: BEGIN;
+BEGIN
+1: INSERT INTO alter_column_multiple_concurrency SELECT 1,i FROM generate_series(1,10)i;
+INSERT 0 10
+2: INSERT INTO alter_column_multiple_concurrency SELECT 1,i FROM generate_series(1,10)i;
+INSERT 0 10
+1: COMMIT;
+COMMIT
+2: COMMIT;
+COMMIT
+ALTER TABLE alter_column_multiple_concurrency ALTER COLUMN b TYPE text;
+ALTER TABLE
+SELECT count(*) FROM alter_column_multiple_concurrency;
+ count
+-------
+ 20
+(1 row)
+SELECT * FROM gp_toolkit.__gp_aocsseg('alter_column_multiple_concurrency');
+ segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state
+------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+-------
+ 1 | 1 | 0 | 1 | 10 | 80 | 80 | 2 | 3 | 1
+ 1 | 1 | 1 | 204929 | 10 | 64 | 64 | 2 | 3 | 1
+ 1 | 2 | 0 | 2 | 10 | 80 | 80 | 2 | 3 | 1
+ 1 | 2 | 1 | 204930 | 10 | 64 | 64 | 2 | 3 | 1
+(4 rows)
+DROP TABLE alter_column_multiple_concurrency;
+DROP TABLE
+CHECKPOINT;
+CHECKPOINT
+SELECT * FROM gp_toolkit.gp_check_orphaned_files WHERE split_part(filename,'.',1) = (SELECT oid::text FROM pg_class WHERE relname = 'alter_column_multiple_concurrency');
+ gp_segment_id | tablespace | filename
+---------------+------------+----------
+(0 rows)
+
+--------------------------------------------------------------------------------
+-- Test if ALTER COLUMN TYPE works correctly when a segfile is in AWAITING_DROP state
+-- Check if we handle rewrite on each segfile correctly
+--------------------------------------------------------------------------------
+CREATE TABLE alter_column_awaiting_drop(a int, b int) USING ao_column;
+CREATE TABLE
+1: BEGIN;
+BEGIN
+2: BEGIN;
+BEGIN
+1: INSERT INTO alter_column_awaiting_drop SELECT 1,i FROM generate_series(1,10)i;
+INSERT 0 10
+2: INSERT INTO alter_column_awaiting_drop SELECT 1,i FROM generate_series(11,20)i;
+INSERT 0 10
+1: COMMIT;
+COMMIT
+2: COMMIT;
+COMMIT
+DELETE FROM alter_column_awaiting_drop WHERE b > 10;
+DELETE 10
+VACUUM alter_column_awaiting_drop;
+VACUUM
+SELECT * FROM gp_toolkit.__gp_aocsseg('alter_column_awaiting_drop');
+ segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state
+------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+-------
+ 1 | 1 | 0 | 1 | 10 | 80 | 80 | 2 | 3 | 1
+ 1 | 1 | 1 | 129 | 10 | 80 | 80 | 2 | 3 | 1
+ 1 | 2 | 0 | 2 | 0 | 0 | 0 | 1 | 3 | 1
+ 1 | 2 | 1 | 130 | 0 | 0 | 0 | 1 | 3 | 1
+ 1 | 3 | 0 | 3 | 0 | 0 | 0 | 0 | 3 | 1
+ 1 | 3 | 1 | 131 | 0 | 0 | 0 | 0 | 3 | 1
+(6 rows)
+ALTER TABLE alter_column_awaiting_drop ALTER COLUMN b TYPE text;
+ALTER TABLE
+SELECT count(*) FROM alter_column_awaiting_drop;
+ count
+-------
+ 10
+(1 row)
+SELECT * FROM gp_toolkit.__gp_aocsseg('alter_column_awaiting_drop');
+ segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state
+------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+-------
+ 1 | 1 | 0 | 1 | 10 | 80 | 80 | 3 | 3 | 1
+ 1 | 1 | 1 | 204929 | 10 | 64 | 64 | 3 | 3 | 1
+ 1 | 2 | 0 | 2 | 0 | 0 | 0 | 1 | 3 | 1
+ 1 | 2 | 1 | 204930 | 0 | 0 | 0 | 1 | 3 | 1
+ 1 | 3 | 0 | 3 | 0 | 0 | 0 | 0 | 3 | 1
+ 1 | 3 | 1 | 204931 | 0 | 0 | 0 | 0 | 3 | 1
+(6 rows)
+DROP TABLE alter_column_awaiting_drop;
+DROP TABLE
+CHECKPOINT;
+CHECKPOINT
+SELECT * FROM gp_toolkit.gp_check_orphaned_files WHERE split_part(filename,'.',1) = (SELECT oid::text FROM pg_class WHERE relname = 'alter_column_awaiting_drop');
+ gp_segment_id | tablespace | filename
+---------------+------------+----------
+(0 rows)
+
+--------------------------------------------------------------------------------
+-- Test if ALTER COLUMN TYPE works correctly for 0 inserted rows
+--------------------------------------------------------------------------------
+CREATE TABLE alter_column_zero_tupcount(a int, b int) USING ao_column;
+CREATE TABLE
+1: BEGIN;
+BEGIN
+2: BEGIN;
+BEGIN
+1: INSERT INTO alter_column_zero_tupcount SELECT 1,i FROM generate_series(1,10)i;
+INSERT 0 10
+2: INSERT INTO alter_column_zero_tupcount SELECT 1,i FROM generate_series(1,10)i;
+INSERT 0 10
+1: ABORT;
+ROLLBACK
+2: ABORT;
+ROLLBACK
+ALTER TABLE alter_column_zero_tupcount ALTER COLUMN b TYPE text;
+ALTER TABLE
+SELECT count(*) FROM alter_column_zero_tupcount;
+ count
+-------
+ 0
+(1 row)
+SELECT * FROM gp_toolkit.__gp_aocsseg('alter_column_zero_tupcount');
+ segment_id | segno | column_num | physical_segno | tupcount | eof | eof_uncompressed | modcount | formatversion | state
+------------+-------+------------+----------------+----------+-----+------------------+----------+---------------+-------
+ 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 3 | 1
+ 1 | 1 | 1 | 204929 | 0 | 0 | 0 | 0 | 3 | 1
+ 1 | 2 | 0 | 2 | 0 | 0 | 0 | 0 | 3 | 1
+ 1 | 2 | 1 | 204930 | 0 | 0 | 0 | 0 | 3 | 1
+(4 rows)
+DROP TABLE alter_column_zero_tupcount;
+DROP TABLE
+CHECKPOINT;
+CHECKPOINT
+SELECT * FROM gp_toolkit.gp_check_orphaned_files WHERE split_part(filename,'.',1) = (SELECT oid::text FROM pg_class WHERE relname = 'alter_column_zero_tupcount');
+ gp_segment_id | tablespace | filename
+---------------+------------+----------
+(0 rows)
+
+--------------------------------------------------------------------------------
+-- Test if ALTER COLUMN TYPE works correctly for generated columns.
+-- Check if we error out on ALTERing type columns that have dependent generated columns
+--------------------------------------------------------------------------------
+CREATE TABLE alter_column_generated_cols(a int, b int, c int GENERATED ALWAYS AS (a+b) STORED, d int GENERATED ALWAYS AS (tableoid::regclass) STORED) USING ao_column;
+CREATE TABLE
+INSERT INTO alter_column_generated_cols SELECT 1,i FROM generate_series(1,5)i;
+INSERT 0 5
+SELECT attname, atttypid::regtype FROM pg_attribute WHERE attrelid='alter_column_generated_cols'::regclass and attname in ('b','c','d');
+ attname | atttypid
+---------+----------
+ b | integer
+ c | integer
+ d | integer
+(3 rows)
+-- b shouldn't be allowed for alter type
+ALTER TABLE alter_column_generated_cols ALTER COLUMN b TYPE text;
+ERROR: cannot alter type of a column used by a generated column
+DETAIL: Column "b" is used by generated column "c".
+SELECT attname, atttypid::regtype FROM pg_attribute WHERE attrelid='alter_column_generated_cols'::regclass and attname in ('b','c','d');
+ attname | atttypid
+---------+----------
+ b | integer
+ c | integer
+ d | integer
+(3 rows)
+ALTER TABLE alter_column_generated_cols ALTER COLUMN c TYPE text;
+ALTER TABLE
+SELECT attname, atttypid::regtype FROM pg_attribute WHERE attrelid='alter_column_generated_cols'::regclass and attname in ('b','c','d');
+ attname | atttypid
+---------+----------
+ b | integer
+ c | text
+ d | integer
+(3 rows)
+ALTER TABLE alter_column_generated_cols ALTER COLUMN d TYPE text;
+ALTER TABLE
+SELECT attname, atttypid::regtype FROM pg_attribute WHERE attrelid='alter_column_generated_cols'::regclass and attname in ('b','c','d');
+ attname | atttypid
+---------+----------
+ b | integer
+ c | text
+ d | text
+(3 rows)
+
+--------------------------------------------------------------------------------
+-- Test if ALTER COLUMN TYPE blocks concurrent INSERT, and vice versa
+--------------------------------------------------------------------------------
+CREATE TABLE aoco_concurrent_inserts(a int, b int, c int) USING ao_column;
+CREATE TABLE
+INSERT INTO aoco_concurrent_inserts SELECT i,i,i FROM generate_series(1,10)i;
+INSERT 0 10
+1: BEGIN;
+BEGIN
+1: INSERT INTO aoco_concurrent_inserts SELECT i,i,i FROM generate_series(1,10)i;
+INSERT 0 10
+2&: ALTER TABLE aoco_concurrent_inserts ALTER COLUMN b TYPE text;
+1: END;
+COMMIT
+2<: <... completed>
+ALTER TABLE
+-- should see 20 rows
+SELECT count(*) FROM aoco_concurrent_inserts;
+ count
+-------
+ 20
+(1 row)
+1: BEGIN;
+BEGIN
+1: ALTER TABLE aoco_concurrent_inserts ALTER COLUMN c TYPE text;
+ALTER TABLE
+2&: INSERT INTO aoco_concurrent_inserts SELECT i,i,i FROM generate_series(1,10)i;
+1: END;
+COMMIT
+2<: <... completed>
+INSERT 0 10
+-- should see 30 rows
+SELECT count(*) FROM aoco_concurrent_inserts;
+ count
+-------
+ 30
+(1 row)
+
+--------------------------------------------------------------------------------
+-- Tests for ALTER COLUMN SET ENCODING
+--------------------------------------------------------------------------------
+
+--
+-- Basic testing
+--
+create table atsetenc(c1 int, c2 int) using ao_column distributed replicated;
+CREATE TABLE
+-- first check an empty table
+-- check the initial encoding settings
+execute attribute_encoding_check('atsetenc');
+ relname | attname | filenum | attoptions
+----------+---------+---------+-------------------------------------------------------------
+ atsetenc | c1 | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ atsetenc | c2 | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(2 rows)
+-- no table rewrite
+execute capturerelfilenodebefore('set encoding - empty', 'atsetenc');
+INSERT 0 4
+alter table atsetenc alter column c1 set encoding (compresstype=zlib,compresslevel=9);
+ALTER TABLE
+execute checkrelfilenodediff('set encoding - empty', 'atsetenc');
+ segid | casename | relname | rewritten
+-------+----------------------+----------+-----------
+ 0 | set encoding - empty | atsetenc | f
+ 1 | set encoding - empty | atsetenc | f
+ 2 | set encoding - empty | atsetenc | f
+ -1 | set encoding - empty | atsetenc | f
+(4 rows)
+execute attribute_encoding_check('atsetenc');
+ relname | attname | filenum | attoptions
+----------+---------+---------+-------------------------------------------------------------
+ atsetenc | c1 | 1601 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c2 | 2 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(2 rows)
+select * from atsetenc;
+ c1 | c2
+----+----
+(0 rows)
+
+-- now insert some data and check
+insert into atsetenc values(1,2);
+INSERT 0 1
+-- no table rewrite setting encoding
+execute capturerelfilenodebefore('set encoding - basic', 'atsetenc');
+INSERT 0 4
+alter table atsetenc alter column c2 set encoding (compresstype=zlib,compresslevel=9);
+ALTER TABLE
+-- result intact
+select * from atsetenc;
+ c1 | c2
+----+----
+ 1 | 2
+(1 row)
+execute checkrelfilenodediff('set encoding - basic', 'atsetenc');
+ segid | casename | relname | rewritten
+-------+----------------------+----------+-----------
+ 0 | set encoding - basic | atsetenc | f
+ 1 | set encoding - basic | atsetenc | f
+ -1 | set encoding - basic | atsetenc | f
+ 2 | set encoding - basic | atsetenc | f
+(4 rows)
+execute attribute_encoding_check('atsetenc');
+ relname | attname | filenum | attoptions
+----------+---------+---------+-------------------------------------------------------------
+ atsetenc | c1 | 1601 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c2 | 1602 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+(2 rows)
+
+-- check if the encoding takes actual effect
+alter table atsetenc add column c3 text default 'a';
+ALTER TABLE
+insert into atsetenc values (1,2,repeat('a',10000));
+INSERT 0 1
+-- before alter encoding, no compression by default
+execute attribute_encoding_check('atsetenc');
+ relname | attname | filenum | attoptions
+----------+---------+---------+-------------------------------------------------------------
+ atsetenc | c1 | 1601 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c2 | 1602 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c3 | 3 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(3 rows)
+select relname, attnum, size, compression_ratio from gp_toolkit.gp_column_size where relid::regclass::text = 'atsetenc' and gp_segment_id = 0 and attnum = 3;
+ relname | attnum | size | compression_ratio
+----------+--------+-------+-------------------
+ atsetenc | 3 | 10096 | 1.00
+(1 row)
+execute capturerelfilenodebefore('set encoding - compress effect', 'atsetenc');
+INSERT 0 4
+alter table atsetenc alter column c3 set encoding (compresstype=zlib,compresslevel=9);
+ALTER TABLE
+execute capturerelfilenodebefore('set encoding - compress effect', 'atsetenc');
+INSERT 0 4
+-- after alter encoding, size is reduced
+execute attribute_encoding_check('atsetenc');
+ relname | attname | filenum | attoptions
+----------+---------+---------+-------------------------------------------------------------
+ atsetenc | c1 | 1601 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c2 | 1602 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c3 | 1603 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+(3 rows)
+select relname,attnum,size,compression_ratio from gp_toolkit.gp_column_size where relid::regclass::text = 'atsetenc' and gp_segment_id = 0 and attnum = 3;
+ relname | attnum | size | compression_ratio
+----------+--------+------+-------------------
+ atsetenc | 3 | 120 | 84.13
+(1 row)
+select length(c3) from atsetenc;
+ length
+--------
+ 1
+ 10000
+(2 rows)
+
+-- check if we'll re-index the index for the rewritten column, and not others
+create index atsetenc_idx2 on atsetenc(c2);
+CREATE INDEX
+create index atsetenc_idx3 on atsetenc(c3);
+CREATE INDEX
+execute capturerelfilenodebefore ('alter_column_c2', 'atsetenc_idx2');
+INSERT 0 4
+execute capturerelfilenodebefore ('alter_column_c2', 'atsetenc_idx3');
+INSERT 0 4
+alter table atsetenc alter column c2 set encoding (compresstype=zlib,compresslevel=1);
+ALTER TABLE
+execute checkrelfilenodediff('alter_column_c2', 'atsetenc_idx2');
+ segid | casename | relname | rewritten
+-------+-----------------+---------------+-----------
+ -1 | alter_column_c2 | atsetenc_idx2 | t
+ 2 | alter_column_c2 | atsetenc_idx2 | t
+ 0 | alter_column_c2 | atsetenc_idx2 | t
+ 1 | alter_column_c2 | atsetenc_idx2 | t
+(4 rows)
+execute checkrelfilenodediff('alter_column_c2', 'atsetenc_idx3');
+ segid | casename | relname | rewritten
+-------+-----------------+---------------+-----------
+ 2 | alter_column_c2 | atsetenc_idx3 | f
+ -1 | alter_column_c2 | atsetenc_idx3 | f
+ 0 | alter_column_c2 | atsetenc_idx3 | f
+ 1 | alter_column_c2 | atsetenc_idx3 | f
+(4 rows)
+
+--
+-- mixed AT commands
+--
+-- 1. with ALTER COLUMN TYPE
+alter table atsetenc add column c4 int default 4, add column c5 int default 5;
+ALTER TABLE
+execute capturerelfilenodebefore('set encoding - withaltercoltype', 'atsetenc');
+INSERT 0 4
+-- alter column type + alter column set encoding. The subcommands' order shouldn't matter.
+alter table atsetenc alter column c4 type text, alter column c4 set encoding (compresstype=zlib,compresslevel=9);
+ALTER TABLE
+alter table atsetenc alter column c5 set encoding (compresstype=zlib,compresslevel=9), alter column c5 type text;
+ALTER TABLE
+-- no rewrite
+execute checkrelfilenodediff('set encoding - withaltercoltype', 'atsetenc');
+ segid | casename | relname | rewritten
+-------+---------------------------------+----------+-----------
+ 2 | set encoding - withaltercoltype | atsetenc | f
+ 0 | set encoding - withaltercoltype | atsetenc | f
+ 1 | set encoding - withaltercoltype | atsetenc | f
+ -1 | set encoding - withaltercoltype | atsetenc | f
+(4 rows)
+execute attribute_encoding_check('atsetenc');
+ relname | attname | filenum | attoptions
+----------+---------+---------+-------------------------------------------------------------
+ atsetenc | c1 | 1601 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c2 | 2 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1']
+ atsetenc | c3 | 1603 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c4 | 1604 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c5 | 1605 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+(5 rows)
+select c4, c5 from atsetenc;
+ c4 | c5
+----+----
+ 4 | 5
+ 4 | 5
+(2 rows)
+
+-- 2. with ADD COLUMN
+execute capturerelfilenodebefore('set encoding - withaddcol', 'atsetenc');
+INSERT 0 4
+alter table atsetenc add column c6 int default 6, alter column c5 set encoding (compresstype=zlib,compresslevel=1);
+ALTER TABLE
+-- no rewrite
+execute checkrelfilenodediff('set encoding - withaddcol', 'atsetenc');
+ segid | casename | relname | rewritten
+-------+---------------------------+----------+-----------
+ 0 | set encoding - withaddcol | atsetenc | f
+ 1 | set encoding - withaddcol | atsetenc | f
+ 2 | set encoding - withaddcol | atsetenc | f
+ -1 | set encoding - withaddcol | atsetenc | f
+(4 rows)
+execute attribute_encoding_check('atsetenc');
+ relname | attname | filenum | attoptions
+----------+---------+---------+-------------------------------------------------------------
+ atsetenc | c1 | 1601 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c2 | 2 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1']
+ atsetenc | c3 | 1603 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c4 | 1604 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c5 | 5 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1']
+ atsetenc | c6 | 6 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+(6 rows)
+select c5, c6 from atsetenc;
+ c5 | c6
+----+----
+ 5 | 6
+ 5 | 6
+(2 rows)
+
+-- 3. with DROP COLUMN
+alter table atsetenc add column c7 int default 7;
+ALTER TABLE
+execute capturerelfilenodebefore('set encoding - withdropcol', 'atsetenc');
+INSERT 0 4
+-- alter and drop the same column, should complaint
+alter table atsetenc alter column c7 set encoding (compresstype=zlib,compresslevel=9), drop column c7;
+ERROR: column "c7" of relation "atsetenc" does not exist
+-- alter and drop different columns, should work and no rewrite
+alter table atsetenc alter column c7 set encoding (compresstype=zlib,compresslevel=9), drop column c3;
+ALTER TABLE
+execute checkrelfilenodediff('set encoding - withdropcol', 'atsetenc');
+ segid | casename | relname | rewritten
+-------+----------------------------+----------+-----------
+ 0 | set encoding - withdropcol | atsetenc | f
+ 1 | set encoding - withdropcol | atsetenc | f
+ 2 | set encoding - withdropcol | atsetenc | f
+ -1 | set encoding - withdropcol | atsetenc | f
+(4 rows)
+execute attribute_encoding_check('atsetenc');
+ relname | attname | filenum | attoptions
+----------+------------------------------+---------+-------------------------------------------------------------
+ atsetenc | c1 | 1601 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c2 | 2 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1']
+ atsetenc | c4 | 1604 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c5 | 5 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1']
+ atsetenc | c6 | 6 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ atsetenc | c7 | 1607 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | ........pg.dropped.3........ | 1603 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+(7 rows)
+-- should error out
+select c3 from atsetenc;
+ERROR: column "c3" does not exist
+LINE 1: select c3 from atsetenc;
+ ^
+select c7 from atsetenc;
+ c7
+----
+ 7
+ 7
+(2 rows)
+
+-- 4. with AT commands that rewrite table
+alter table atsetenc add column c8 int default 8;
+ALTER TABLE
+-- changing to another AM, should complaint
+alter table atsetenc set access method heap, alter column c8 set encoding (compresstype=zlib,compresslevel=9);
+ERROR: ALTER COLUMN SET ENCODING operation is only applicable to AOCO tables
+DETAIL: New access method for "atsetenc" is not ao_column
+-- reorganize, should rewrite
+execute capturerelfilenodebefore('set encoding - reorg', 'atsetenc');
+INSERT 0 4
+alter table atsetenc set with (reorganize=true), alter column c8 set encoding (compresstype=zlib,compresslevel=9);
+ALTER TABLE
+execute checkrelfilenodediff('set encoding - reorg', 'atsetenc');
+ segid | casename | relname | rewritten
+-------+----------------------+----------+-----------
+ 2 | set encoding - reorg | atsetenc | t
+ -1 | set encoding - reorg | atsetenc | t
+ 0 | set encoding - reorg | atsetenc | t
+ 1 | set encoding - reorg | atsetenc | t
+(4 rows)
+execute attribute_encoding_check('atsetenc');
+ relname | attname | filenum | attoptions
+----------+------------------------------+---------+-------------------------------------------------------------
+ atsetenc | c1 | 1 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c2 | 2 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1']
+ atsetenc | c4 | 4 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c5 | 5 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1']
+ atsetenc | c6 | 6 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ atsetenc | c7 | 7 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c8 | 8 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | ........pg.dropped.3........ | 3 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+(8 rows)
+
+-- 5. multiple SET ENCODING commands
+-- not rewrite
+execute capturerelfilenodebefore('set encoding - multiple', 'atsetenc');
+INSERT 0 4
+alter table atsetenc alter column c7 set encoding (compresstype=rle_type,compresslevel=3), alter column c8 set encoding (compresstype=rle_type,compresslevel=4);
+ALTER TABLE
+execute checkrelfilenodediff('set encoding - multiple', 'atsetenc');
+ segid | casename | relname | rewritten
+-------+-------------------------+----------+-----------
+ 0 | set encoding - multiple | atsetenc | f
+ 1 | set encoding - multiple | atsetenc | f
+ -1 | set encoding - multiple | atsetenc | f
+ 2 | set encoding - multiple | atsetenc | f
+(4 rows)
+execute attribute_encoding_check('atsetenc');
+ relname | attname | filenum | attoptions
+----------+------------------------------+---------+-----------------------------------------------------------------
+ atsetenc | c1 | 1 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c2 | 2 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1']
+ atsetenc | c4 | 4 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c5 | 5 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1']
+ atsetenc | ........pg.dropped.3........ | 3 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+ atsetenc | c6 | 6 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ atsetenc | c7 | 1607 | ['compresstype=rle_type', 'blocksize=32768', 'compresslevel=3']
+ atsetenc | c8 | 1608 | ['compresstype=rle_type', 'blocksize=32768', 'compresslevel=4']
+(8 rows)
+
+-- results all good
+select * from atsetenc;
+ c1 | c2 | c4 | c5 | c6 | c7 | c8
+----+----+----+----+----+----+----
+ 1 | 2 | 4 | 5 | 6 | 7 | 8
+ 1 | 2 | 4 | 5 | 6 | 7 | 8
+(2 rows)
+
+--
+-- partition table
+--
+create table atsetencpart (a int, b int) using ao_column partition by range(b);
+CREATE TABLE
+create table atsetencpart_p1 partition of atsetencpart for values from (0) to (10);
+CREATE TABLE
+create table atsetencpart_p2 partition of atsetencpart for values from (10) to (20);
+CREATE TABLE
+create table atsetencpart_def partition of atsetencpart default;
+CREATE TABLE
+insert into atsetencpart select 1,i from generate_series(1,100)i;
+INSERT 0 100
+execute capturerelfilenodebefore('set enc', 'atsetencpart_p1');
+INSERT 0 4
+execute capturerelfilenodebefore('set enc', 'atsetencpart_p2');
+INSERT 0 4
+execute capturerelfilenodebefore('set enc', 'atsetencpart_def');
+INSERT 0 4
+-- alter root table will alter all children
+alter table atsetencpart alter column b set encoding (compresstype=zlib,compresslevel=9);
+ALTER TABLE
+-- alter a child partition just alter that partition
+alter table atsetencpart_p2 alter column b set encoding (compresslevel=1);
+ALTER TABLE
+-- no table rewrite and the options are changed
+execute checkrelfilenodediff('set enc', 'atsetencpart_p1');
+ segid | casename | relname | rewritten
+-------+----------+-----------------+-----------
+ 2 | set enc | atsetencpart_p1 | f
+ -1 | set enc | atsetencpart_p1 | f
+ 0 | set enc | atsetencpart_p1 | f
+ 1 | set enc | atsetencpart_p1 | f
+(4 rows)
+execute checkrelfilenodediff('set enc', 'atsetencpart_p2');
+ segid | casename | relname | rewritten
+-------+----------+-----------------+-----------
+ 2 | set enc | atsetencpart_p2 | f
+ -1 | set enc | atsetencpart_p2 | f
+ 0 | set enc | atsetencpart_p2 | f
+ 1 | set enc | atsetencpart_p2 | f
+(4 rows)
+execute checkrelfilenodediff('set enc', 'atsetencpart_def');
+ segid | casename | relname | rewritten
+-------+----------+------------------+-----------
+ 2 | set enc | atsetencpart_def | f
+ -1 | set enc | atsetencpart_def | f
+ 0 | set enc | atsetencpart_def | f
+ 1 | set enc | atsetencpart_def | f
+(4 rows)
+execute attribute_encoding_check('atsetencpart_p1');
+ relname | attname | filenum | attoptions
+-----------------+---------+---------+-------------------------------------------------------------
+ atsetencpart_p1 | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ atsetencpart_p1 | b | 1602 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+(2 rows)
+execute attribute_encoding_check('atsetencpart_p2');
+ relname | attname | filenum | attoptions
+-----------------+---------+---------+-------------------------------------------------------------
+ atsetencpart_p2 | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ atsetencpart_p2 | b | 2 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=1']
+(2 rows)
+execute attribute_encoding_check('atsetencpart_def');
+ relname | attname | filenum | attoptions
+------------------+---------+---------+-------------------------------------------------------------
+ atsetencpart_def | a | 1 | ['compresstype=none', 'blocksize=32768', 'compresslevel=0']
+ atsetencpart_def | b | 1602 | ['compresstype=zlib', 'blocksize=32768', 'compresslevel=9']
+(2 rows)
+-- results are expected
+select sum(a), sum(b) from atsetencpart;
+ sum | sum
+-----+------
+ 100 | 5050
+(1 row)
+
diff --git a/src/test/isolation2/expected/aocs_unique_index.out b/src/test/isolation2/expected/aocs_unique_index.out
index 88a81e3220a..25cd89a8575 100644
--- a/src/test/isolation2/expected/aocs_unique_index.out
+++ b/src/test/isolation2/expected/aocs_unique_index.out
@@ -8,9 +8,9 @@
-- Case 1: Conflict with committed transaction----------------------------------
CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
INSERT INTO unique_index_ao_column SELECT * FROM generate_series(1, 658491);
-INSERT 658491
+INSERT 0 658491
-- should conflict
INSERT INTO unique_index_ao_column VALUES (1);
ERROR: duplicate key value violates unique constraint "unique_index_ao_column_a_key" (seg0 192.168.0.148:7002 pid=721860)
@@ -20,73 +20,73 @@ ERROR: duplicate key value violates unique constraint "unique_index_ao_column_a
DETAIL: Key (a)=(658491) already exists.
-- should not conflict
INSERT INTO unique_index_ao_column VALUES (658492);
-INSERT 1
+INSERT 0 1
DROP TABLE unique_index_ao_column;
-DROP
+DROP TABLE
-- Case 2: Conflict within the same transaction---------------------------------
CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
BEGIN;
BEGIN
INSERT INTO unique_index_ao_column SELECT * FROM generate_series(1, 658491);
-INSERT 658491
+INSERT 0 658491
-- should conflict
INSERT INTO unique_index_ao_column VALUES (1);
ERROR: duplicate key value violates unique constraint "unique_index_ao_column_a_key" (seg1 192.168.0.148:7003 pid=721861)
DETAIL: Key (a)=(1) already exists.
END;
-END
+ROLLBACK
DROP TABLE unique_index_ao_column;
-DROP
+DROP TABLE
CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
BEGIN;
BEGIN
INSERT INTO unique_index_ao_column SELECT * FROM generate_series(1, 658491);
-INSERT 658491
+INSERT 0 658491
-- should conflict
INSERT INTO unique_index_ao_column VALUES (658491);
ERROR: duplicate key value violates unique constraint "unique_index_ao_column_a_key" (seg0 192.168.0.148:7002 pid=721860)
DETAIL: Key (a)=(658491) already exists.
END;
-END
+ROLLBACK
DROP TABLE unique_index_ao_column;
-DROP
+DROP TABLE
CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
BEGIN;
BEGIN
INSERT INTO unique_index_ao_column SELECT * FROM generate_series(1, 658491);
-INSERT 658491
+INSERT 0 658491
-- should not conflict
INSERT INTO unique_index_ao_column VALUES (658492);
-INSERT 1
+INSERT 0 1
END;
-END
+COMMIT
DROP TABLE unique_index_ao_column;
-DROP
+DROP TABLE
-- Case 3: Conflict with aborted transaction is not a conflict------------------
CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
BEGIN;
BEGIN
INSERT INTO unique_index_ao_column SELECT * FROM generate_series(1, 658491);
-INSERT 658491
+INSERT 0 658491
ABORT;
-ABORT
+ROLLBACK
-- should not conflict
INSERT INTO unique_index_ao_column VALUES (1);
-INSERT 1
+INSERT 0 1
INSERT INTO unique_index_ao_column VALUES (658491);
-INSERT 1
+INSERT 0 1
INSERT INTO unique_index_ao_column VALUES (658492);
-INSERT 1
+INSERT 0 1
DROP TABLE unique_index_ao_column;
-DROP
+DROP TABLE
-- Case 4: Conflict with to-be-committed transaction----------------------------
--
@@ -106,21 +106,21 @@ DROP
-- 10. Tx 1 commits
--
CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
1: BEGIN;
BEGIN
1: INSERT INTO unique_index_ao_column VALUES (0);
-INSERT 1
+INSERT 0 1
2: BEGIN;
BEGIN
2: INSERT INTO unique_index_ao_column SELECT * FROM generate_series(1, 658491);
-INSERT 658491
+INSERT 0 658491
3&: INSERT INTO unique_index_ao_column VALUES (1);
4&: INSERT INTO unique_index_ao_column VALUES (658490);
5&: INSERT INTO unique_index_ao_column VALUES (658491);
-- should succeed immediately
6: INSERT INTO unique_index_ao_column VALUES (658492);
-INSERT 1
+INSERT 0 1
2: COMMIT;
COMMIT
3<: <... completed>
@@ -135,7 +135,7 @@ DETAIL: Key (a)=(658491) already exists.
1: COMMIT;
COMMIT
DROP TABLE unique_index_ao_column;
-DROP
+DROP TABLE
-- Case 5: Conflict with to-be-aborted transaction------------------------------
--
@@ -155,37 +155,37 @@ DROP
-- 10. Tx 1 commits
--
CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
1: BEGIN;
BEGIN
1: INSERT INTO unique_index_ao_column VALUES (0);
-INSERT 1
+INSERT 0 1
2: BEGIN;
BEGIN
2: INSERT INTO unique_index_ao_column SELECT * FROM generate_series(1, 658491);
-INSERT 658491
+INSERT 0 658491
3&: INSERT INTO unique_index_ao_column VALUES (1);
4&: INSERT INTO unique_index_ao_column VALUES (658490);
5&: INSERT INTO unique_index_ao_column VALUES (658491);
-- should succeed immediately
6: INSERT INTO unique_index_ao_column VALUES (658492);
-INSERT 1
+INSERT 0 1
2: ABORT;
-ABORT
+ROLLBACK
3<: <... completed>
-INSERT 1
+INSERT 0 1
4<: <... completed>
-INSERT 1
+INSERT 0 1
5<: <... completed>
-INSERT 1
+INSERT 0 1
1: COMMIT;
COMMIT
DROP TABLE unique_index_ao_column;
-DROP
+DROP TABLE
-- Case 6: Conflict with aborted rows following some committed rows ------------
CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
-- 1. Tx 1 commits rows 1-100.
-- 2. Tx 2 inserts rows 101-200 and then aborts.
-- 3. Tx 3 tries to insert row in range [101,200] and is immediately successful.
@@ -193,22 +193,22 @@ CREATE
-- constraint violation.
-- 5. Tx 5 tries to insert row in range [201, ) and is immediately successful.
1: INSERT INTO unique_index_ao_column SELECT generate_series(1, 100);
-INSERT 100
+INSERT 0 100
2: BEGIN;
BEGIN
2: INSERT INTO unique_index_ao_column SELECT generate_series(101, 200);
-INSERT 100
+INSERT 0 100
2: ABORT;
-ABORT
+ROLLBACK
3: INSERT INTO unique_index_ao_column VALUES(102);
-INSERT 1
+INSERT 0 1
4: INSERT INTO unique_index_ao_column VALUES(2);
ERROR: duplicate key value violates unique constraint "unique_index_ao_column_a_key" (seg0 192.168.0.148:7002 pid=659656)
DETAIL: Key (a)=(2) already exists.
5: INSERT INTO unique_index_ao_column VALUES(202);
-INSERT 1
+INSERT 0 1
DROP TABLE unique_index_ao_column;
-DROP
+DROP TABLE
--------------------------------------------------------------------------------
----------------- More concurrent tests with fault injection ------------------
@@ -232,7 +232,7 @@ DROP
-- 6. Tx 2 succeeds as Tx 1 aborted.
CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
SELECT gp_inject_fault('appendonly_insert', 'suspend', '', '', 'unique_index_ao_column', 4, 4, 0, dbid) FROM gp_segment_configuration WHERE role = 'p' AND content <> -1;
gp_inject_fault
-----------------
@@ -252,9 +252,9 @@ SELECT gp_wait_until_triggered_fault('appendonly_insert', 4, dbid) FROM gp_segme
(3 rows)
2&: INSERT INTO unique_index_ao_column VALUES(2);
4: INSERT INTO unique_index_ao_column VALUES(11);
-INSERT 1
+INSERT 0 1
3: INSERT INTO unique_index_ao_column VALUES(4);
-INSERT 1
+INSERT 0 1
SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_configuration WHERE role = 'p' AND content <> -1;
gp_inject_fault
-----------------
@@ -266,9 +266,9 @@ SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_confi
ERROR: duplicate key value violates unique constraint "unique_index_ao_column_a_key" (seg1 192.168.0.148:7003 pid=828519)
DETAIL: Key (a)=(4) already exists.
2<: <... completed>
-INSERT 1
+INSERT 0 1
DROP TABLE unique_index_ao_column;
-DROP
+DROP TABLE
-- Case 8: Conflict with to-be-committed transaction - generalization of case 7
-- where there are multiple minipages (and block directory rows) in play from
@@ -293,7 +293,7 @@ DROP
-- 7. All blocked Txs succeed.
CREATE TABLE unique_index_ao_column (a bigint unique) USING ao_column DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
SELECT gp_inject_fault('insert_new_entry_curr_minipage_full', 'suspend', '', '', '', 2, 2, 0, dbid) FROM gp_segment_configuration WHERE role = 'p' AND content <> -1;
gp_inject_fault
@@ -349,7 +349,7 @@ SELECT gp_wait_until_triggered_fault('appendonly_insert', 4, dbid) FROM gp_segme
8&: INSERT INTO unique_index_ao_column VALUES(1321071);
-- no index entry exists for it, so should not conflict.
9: INSERT INTO unique_index_ao_column VALUES(1321075);
-INSERT 1
+INSERT 0 1
SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_configuration WHERE role = 'p' AND content <> -1;
gp_inject_fault
@@ -363,29 +363,29 @@ SELECT gp_inject_fault('appendonly_insert', 'reset', dbid) FROM gp_segment_confi
ERROR: duplicate key value violates unique constraint "unique_index_ao_column_a_key" (seg2 192.168.0.148:7004 pid=735802)
DETAIL: Key (a)=(1321075) already exists.
2<: <... completed>
-INSERT 1
+INSERT 0 1
3<: <... completed>
-INSERT 1
+INSERT 0 1
4<: <... completed>
-INSERT 1
+INSERT 0 1
5<: <... completed>
-INSERT 1
+INSERT 0 1
6<: <... completed>
-INSERT 1
+INSERT 0 1
7<: <... completed>
-INSERT 1
+INSERT 0 1
8<: <... completed>
-INSERT 1
+INSERT 0 1
DROP TABLE unique_index_ao_column;
-DROP
+DROP TABLE
--------------------------------------------------------------------------------
--------------------------- Smoke tests for COPY -------------------------------
--------------------------------------------------------------------------------
CREATE TABLE unique_index_ao_column (a INT unique) USING ao_column DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
1: BEGIN;
BEGIN
@@ -405,23 +405,23 @@ CONTEXT: COPY unique_index_ao_column, line 1
2<: <... completed>
COPY 1
1: END;
-END
+ROLLBACK
DROP TABLE unique_index_ao_column;
-DROP
+DROP TABLE
--------------------------------------------------------------------------------
-------------------- Smoke tests for subtransactions ---------------------------
--------------------------------------------------------------------------------
CREATE TABLE unique_index_ao_column (a INT unique) USING ao_column DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
1: BEGIN;
BEGIN
1: SAVEPOINT a;
SAVEPOINT
1: INSERT INTO unique_index_ao_column VALUES(1);
-INSERT 1
+INSERT 0 1
-- concurrent tx inserting conflicting row should block.
2: BEGIN;
@@ -429,7 +429,7 @@ BEGIN
2&: INSERT INTO unique_index_ao_column VALUES(1);
-- concurrent tx inserting non-conflicting row should be successful.
3: INSERT INTO unique_index_ao_column VALUES(2);
-INSERT 1
+INSERT 0 1
-- conflict should be detected within the same subtx.
1: INSERT INTO unique_index_ao_column VALUES(1);
@@ -437,15 +437,15 @@ ERROR: duplicate key value violates unique constraint "unique_index_ao_column_a
DETAIL: Key (a)=(1) already exists.
-- the concurrent tx should now succeed.
2<: <... completed>
-INSERT 1
+INSERT 0 1
2: ABORT;
-ABORT
+ROLLBACK
-- after rolling back to the savepoint, we should be able to re-insert the key
1: ROLLBACK TO SAVEPOINT a;
ROLLBACK
1: INSERT INTO unique_index_ao_column VALUES(1);
-INSERT 1
+INSERT 0 1
1: COMMIT;
COMMIT
@@ -457,7 +457,7 @@ SELECT * FROM unique_index_ao_column;
(2 rows)
DROP TABLE unique_index_ao_column;
-DROP
+DROP TABLE
--------------------------------------------------------------------------------
-------------------- Smoke tests for repeatable read ---------------------------
@@ -467,7 +467,7 @@ DROP
-- boundaries.
CREATE TABLE unique_index_ao_column (a INT unique) USING ao_column DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
-- Begin two txs with tx level snapshot taken early.
1: BEGIN ISOLATION LEVEL REPEATABLE READ;
@@ -487,11 +487,11 @@ BEGIN
3: BEGIN;
BEGIN
3: INSERT INTO unique_index_ao_column VALUES(1);
-INSERT 1
+INSERT 0 1
-- And another transaction inserts a key and commits.
INSERT INTO unique_index_ao_column VALUES(2);
-INSERT 1
+INSERT 0 1
-- Tx should block on insert of conflicting key, even though it can't "see" the
-- conflicting key due to its isolation level.
@@ -502,11 +502,11 @@ INSERT 1
1&: INSERT INTO unique_index_ao_column VALUES(1);
3: ABORT;
-ABORT
+ROLLBACK
1<: <... completed>
-INSERT 1
+INSERT 0 1
1: ABORT;
-ABORT
+ROLLBACK
-- Tx should raise a conflict, even though it can't "see" the conflicting key
-- due to its isolation level.
@@ -522,52 +522,3 @@ ABORT
DROP TABLE unique_index_ao_column;
DROP
-
-
---------------------------------------------------------------------------------
------------------------ Smoke tests for ADD CONSTRAINT ------------------------
---------------------------------------------------------------------------------
-CREATE TABLE unique_index_ao_column (a INT) USING ao_column DISTRIBUTED REPLICATED;
-CREATE
-INSERT INTO unique_index_ao_column SELECT * FROM generate_series(1, 5);
-INSERT 5
-
-ALTER table unique_index_ao_column ADD CONSTRAINT a_unique UNIQUE(a);
-ALTER
--- should conflict
-INSERT INTO unique_index_ao_column VALUES (1);
-DETAIL: Key (a)=(1) already exists.
-ERROR: duplicate key value violates unique constraint "a_unique"
-ALTER table unique_index_ao_column DROP CONSTRAINT a_unique;
-ALTER
-
-INSERT INTO unique_index_ao_column VALUES (1);
-INSERT 1
--- should failed
-ALTER table unique_index_ao_column ADD CONSTRAINT a_unique UNIQUE(a);
-DETAIL: Key (a)=(1) is duplicated.
-ERROR: could not create unique index "a_unique"
-
-DROP TABLE unique_index_ao_column;
-DROP
-
-
---------------------------------------------------------------------------------
------------------------ Smoke tests for Multiple Key ---------------------------
---------------------------------------------------------------------------------
-CREATE TABLE unique_index_ao_column (a INT, b INT) USING ao_column DISTRIBUTED REPLICATED;
-CREATE
-INSERT INTO unique_index_ao_column SELECT i,i FROM generate_series(1, 5) i;
-INSERT 5
-
-CREATE UNIQUE INDEX a_b_unique ON unique_index_ao_column(a,b);
-CREATE
--- should not conflict
-INSERT INTO unique_index_ao_column VALUES (1,2);
-INSERT 1
--- should conflict
-INSERT INTO unique_index_ao_column VALUES (1,1);
-DETAIL: Key (a, b)=(1, 1) already exists.
-ERROR: duplicate key value violates unique constraint "a_b_unique"
-DROP TABLE unique_index_ao_column;
-DROP
diff --git a/src/test/isolation2/expected/bitmap_index_ao_sparse.out b/src/test/isolation2/expected/bitmap_index_ao_sparse.out
index 933aaad5552..8f9a1a15b51 100644
--- a/src/test/isolation2/expected/bitmap_index_ao_sparse.out
+++ b/src/test/isolation2/expected/bitmap_index_ao_sparse.out
@@ -9,7 +9,7 @@
-- Test AO table.
CREATE TABLE ao_sparse (id int) with(appendonly = true) DISTRIBUTED BY (id);
-CREATE
+CREATE TABLE
1: begin;
BEGIN
@@ -17,9 +17,9 @@ BEGIN
BEGIN
1: INSERT INTO ao_sparse SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
2: INSERT INTO ao_sparse SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
1: commit;
COMMIT
@@ -34,7 +34,7 @@ SELECT count(*) FROM ao_sparse WHERE id >= 97 and id <= 99 and gp_segment_id = 0
(1 row)
CREATE INDEX idx_ao_sparse_id ON ao_sparse USING bitmap (id);
-CREATE
+CREATE INDEX
-- Should generate Bitmap Heap Scan on the bitmap index.
@@ -74,7 +74,7 @@ SET
-- Test AOCS table.
CREATE TABLE aocs_sparse (id int) with(appendonly = true, orientation = COLUMN) DISTRIBUTED BY (id);
-CREATE
+CREATE TABLE
1: begin;
BEGIN
@@ -82,9 +82,9 @@ BEGIN
BEGIN
1: INSERT INTO aocs_sparse SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
2: INSERT INTO aocs_sparse SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
1: commit;
COMMIT
diff --git a/src/test/isolation2/expected/bitmap_index_concurrent.out b/src/test/isolation2/expected/bitmap_index_concurrent.out
index b87cae7f476..e46b8673437 100644
--- a/src/test/isolation2/expected/bitmap_index_concurrent.out
+++ b/src/test/isolation2/expected/bitmap_index_concurrent.out
@@ -25,7 +25,7 @@
-- will generate two bitmap pages, and the first page is a full page.
-- Use heap table, delete tuples and then vacuum should be the same. But it needs huge tuples.
CREATE TABLE bmupdate (id int) with(appendonly = true) DISTRIBUTED BY (id);
-CREATE
+CREATE TABLE
1: begin;
BEGIN
@@ -73,49 +73,49 @@ BEGIN
BEGIN
1: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
2: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
3: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
4: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
5: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
6: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
7: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
8: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
9: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
10: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
11: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
12: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
13: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
14: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
15: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
16: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
17: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
18: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
19: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
20: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
21: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
22: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i;
-INSERT 1000000
+INSERT 0 1000000
1: commit;
COMMIT
@@ -170,7 +170,7 @@ SELECT count(*) FROM bmupdate WHERE id = 97;
(1 row)
CREATE INDEX idx_bmupdate__id ON bmupdate USING bitmap (id);
-CREATE
+CREATE INDEX
--
-- Test 1, run Bitmap Heap Scan on the bitmap index when there's
@@ -214,7 +214,7 @@ SET
-- The reason it not insert at the end of bitmap LOV is because right now only one
-- transaction doing the insert, and it'll insert to small seg file number.
2: INSERT INTO bmupdate VALUES (97);
-INSERT 1
+INSERT 0 1
-- Query should read the first page(buffer lock released), and then INSERT insert to
-- the first page which will trigger rearrange words.
@@ -294,7 +294,7 @@ SET
-- The reason it not insert at the end of bitmap LOV is because right now only one
-- transaction doing the insert, and it'll insert to small seg file number.
2: INSERT INTO bmupdate VALUES (97);
-INSERT 1
+INSERT 0 1
-- Query should read the first page(buffer lock released), and then INSERT insert to
-- the first page which will trigger rearrange words.
@@ -374,9 +374,9 @@ SELECT gp_inject_fault_infinite('rearrange_word_to_next_bitmap_page', 'skip', db
-- transaction doing the insert, and it'll insert to small seg file number.
-- Here insert both values to make sure update on full bitmap happens for one LOV.
2: INSERT INTO bmupdate VALUES (97);
-INSERT 1
+INSERT 0 1
2: INSERT INTO bmupdate VALUES (99);
-INSERT 1
+INSERT 0 1
-- Query should read the first page(buffer lock released), and then INSERT insert to
-- the first page which will trigger rearrange words.
@@ -461,9 +461,9 @@ SET
-- transaction doing the insert, and it'll insert to small seg file number.
-- Here insert both values to make sure update on full bitmap happens for one LOV.
2: INSERT INTO bmupdate SELECT 97 FROM generate_series(1, 1000);
-INSERT 1000
+INSERT 0 1000
2: INSERT INTO bmupdate SELECT 99 FROM generate_series(1, 1000);
-INSERT 1000
+INSERT 0 1000
-- Query should read the first page(buffer lock released), and then INSERT insert to
-- the first page which will trigger rearrange words.
@@ -506,7 +506,7 @@ SELECT count(*) FROM bmupdate WHERE id >= 97 and id <= 99 and gp_segment_id = 0;
(1 row)
DROP TABLE bmupdate;
-DROP
+DROP TABLE
-- Regression test, when large amount of inserts concurrent inserts happen,
diff --git a/src/test/isolation2/expected/bitmap_index_crash.out b/src/test/isolation2/expected/bitmap_index_crash.out
index 7dce1ae6d31..b49ff50a065 100644
--- a/src/test/isolation2/expected/bitmap_index_crash.out
+++ b/src/test/isolation2/expected/bitmap_index_crash.out
@@ -5,7 +5,7 @@
-- subsequent flush of the metapage will lead to an inadvertent
-- overwrite.
1:CREATE EXTENSION IF NOT EXISTS gp_inject_fault;
-CREATE
+CREATE EXTENSION
-- skip FTS probes for this test to avoid segment being marked down on restart
1:SELECT gp_inject_fault_infinite('fts_probe', 'skip', dbid) FROM gp_segment_configuration WHERE role='p' AND content=-1;
gp_inject_fault_infinite
@@ -20,9 +20,9 @@ CREATE
-- test setup
1:CREATE TABLE bm(a int);
-CREATE
+CREATE TABLE
1:CREATE INDEX ON bm USING bitmap (a);
-CREATE
+CREATE INDEX
-- pause checkpoint to make sure CRASH RECOVERY happens for bitmap index replay
1:SELECT gp_inject_fault_infinite('checkpoint', 'skip', dbid) FROM gp_segment_configuration WHERE role='p';
gp_inject_fault_infinite
@@ -37,7 +37,7 @@ CHECKPOINT
-- this insert's WAL we wish to replay
1:insert into bm select generate_series(1, 5000);
-INSERT 5000
+INSERT 0 5000
1U:select pg_relation_size(oid)/current_setting('block_size')::bigint from pg_class where relname = 'bm_a_idx';
?column?
----------
@@ -46,7 +46,7 @@ INSERT 5000
-- set small shared_buffers to make sure META_PAGE of bitmap index evicts out
1U: ALTER SYSTEM set shared_buffers to 20;
-ALTER
+ALTER SYSTEM
1:SELECT pg_ctl(datadir, 'restart') from gp_segment_configuration where role = 'p' and content = 1;
pg_ctl
--------
@@ -73,7 +73,7 @@ SET
-- teardown cleanup for the test
1Uq: ...
1U:ALTER SYSTEM reset shared_buffers;
-ALTER
+ALTER SYSTEM
2:SELECT pg_ctl(datadir, 'restart') from gp_segment_configuration where role = 'p' and content = 1;
pg_ctl
--------
diff --git a/src/test/isolation2/expected/bitmap_index_inspect.out b/src/test/isolation2/expected/bitmap_index_inspect.out
index fa925fcad97..39f3865b035 100644
--- a/src/test/isolation2/expected/bitmap_index_inspect.out
+++ b/src/test/isolation2/expected/bitmap_index_inspect.out
@@ -3,14 +3,16 @@
-- inspect functions run against a single node, as opposed to the entire GP cluster)
-- Setup
+1U: CREATE EXTENSION pageinspect;
+CREATE EXTENSION
1U: CREATE TABLE bmtest_t1(i int, bmfield int);
-CREATE
+CREATE TABLE
1U: CREATE INDEX bmtest_i1 ON bmtest_t1 USING bitmap(bmfield);
-CREATE
+CREATE INDEX
1U: INSERT INTO bmtest_t1 SELECT i,1 FROM generate_series(1, 1000) i;
-INSERT 1000
+INSERT 0 1000
1U: INSERT INTO bmtest_t1 SELECT i,2 FROM generate_series(1, 1000) i;
-INSERT 1000
+INSERT 0 1000
-- start_matchsubs
-- m/bmfuncs.c:\d+/
@@ -61,4 +63,6 @@ ERROR: block 1 is not a bitmap page, it is a LOV item page (bmfuncs.c:507)
-- cleanup
1U: DROP TABLE bmtest_t1;
-DROP
+DROP TABLE
+1U: DROP EXTENSION pageinspect;
+DROP EXTENSION
diff --git a/src/test/isolation2/expected/bitmap_update_words_backup_block.out b/src/test/isolation2/expected/bitmap_update_words_backup_block.out
index 950f91d36be..98567439ee1 100644
--- a/src/test/isolation2/expected/bitmap_update_words_backup_block.out
+++ b/src/test/isolation2/expected/bitmap_update_words_backup_block.out
@@ -1,6 +1,6 @@
-- Setup fault injectors.
CREATE EXTENSION IF NOT EXISTS gp_inject_fault;
-CREATE
+CREATE EXTENSION
-- Skip FTS probes for this test to avoid segment being marked down on restart.
1:SELECT gp_inject_fault_infinite('fts_probe', 'skip', dbid) FROM gp_segment_configuration WHERE role='p' AND content=-1;
@@ -14,38 +14,38 @@ CREATE
t
(1 row)
CREATE TABLE bm_update_words_backup_block (id int) WITH (appendonly = true);
-CREATE
+CREATE TABLE
1: BEGIN;
BEGIN
2: BEGIN;
BEGIN
1: INSERT INTO bm_update_words_backup_block SELECT i%100 FROM generate_series(1, 200) AS i;
-INSERT 200
+INSERT 0 200
2: INSERT INTO bm_update_words_backup_block SELECT i%100 FROM generate_series(1, 200) AS i;
-INSERT 200
+INSERT 0 200
1: COMMIT;
COMMIT
2: COMMIT;
COMMIT
CREATE INDEX bm_update_words_backup_block_idx ON bm_update_words_backup_block USING bitmap (id);
-CREATE
+CREATE INDEX
-- INSERTs will attempt to add a bitmap page but will cause a word
-- expansion and a bitmap page split due to overflow. See bitmap
-- function updatesetbit_inpage().
2: INSERT INTO bm_update_words_backup_block VALUES (97);
-INSERT 1
+INSERT 0 1
2: INSERT INTO bm_update_words_backup_block VALUES (97), (99);
-INSERT 2
+INSERT 0 2
-- Run a CHECKPOINT to force this next INSERT to add backup blocks of
-- the two bitmap pages to its XLOG_BITMAP_UPDATEWORDS record.
2: CHECKPOINT;
CHECKPOINT
2: INSERT INTO bm_update_words_backup_block VALUES (97);
-INSERT 1
+INSERT 0 1
-- Do an immediate restart to force crash recovery. The above INSERT
-- should be replayed with the backup blocks.
@@ -55,7 +55,7 @@ INSERT 1
OK
(1 row)
3: INSERT INTO bm_update_words_backup_block VALUES (97);
-INSERT 1
+INSERT 0 1
-- Turn FTS back on.
3:SELECT gp_inject_fault('fts_probe', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=-1;
diff --git a/src/test/isolation2/expected/brin_heap.out b/src/test/isolation2/expected/brin_heap.out
index 83aa84b8e69..e656f3a16f5 100644
--- a/src/test/isolation2/expected/brin_heap.out
+++ b/src/test/isolation2/expected/brin_heap.out
@@ -2,6 +2,8 @@
-- White-box tests are necessary to ensure that summarization is done
-- successfully (to avoid cases where ranges have brin data tuples without
-- values or where the range is not covered by the revmap etc)
+CREATE EXTENSION pageinspect;
+CREATE EXTENSION
-- Turn off sequential scans to force usage of BRIN indexes for scans.
SET enable_seqscan TO off;
@@ -11,9 +13,9 @@ SET
-- by another transaction, while summarization was in flight.
CREATE TABLE brin_range_extended_heap(i int) USING heap;
-CREATE
+CREATE TABLE
CREATE INDEX ON brin_range_extended_heap USING brin(i) WITH (pages_per_range=5);
-CREATE
+CREATE INDEX
-- Insert 9 blocks of data on 1 QE; 8 blocks full, 1 block with 1 tuple.
SELECT populate_pages('brin_range_extended_heap', 1, tid '(8, 0)');
@@ -105,9 +107,9 @@ SELECT gp_inject_fault('summarize_last_partial_range', 'reset', dbid) FROM gp_se
-- Test build/summarize with aborted rows.
CREATE TABLE brin_abort_heap(i int);
-CREATE
+CREATE TABLE
CREATE INDEX ON brin_abort_heap USING brin(i) WITH (pages_per_range=1);
-CREATE
+CREATE INDEX
BEGIN;
BEGIN
-- Create 3 blocks all on 1 QE, in 1 aoseg: 2 blocks full, 1 block with 1 tuple.
@@ -117,7 +119,7 @@ SELECT populate_pages('brin_abort_heap', 1, tid '(2, 0)');
(1 row)
ABORT;
-ABORT
+ROLLBACK
-- Sanity: There is 1 revmap page and 1 data page, with 1 range (summarized).
-- This first range being summarized highlights a difference with AO/CO tables.
@@ -260,9 +262,9 @@ SELECT gp_inject_fault('brin_bitmap_page_added', 'reset', dbid) FROM gp_segment_
-- Drop and re-create the index to test build.
DROP INDEX brin_abort_heap_i_idx;
-DROP
+DROP INDEX
CREATE INDEX ON brin_abort_heap USING brin(i) WITH (pages_per_range=1);
-CREATE
+CREATE INDEX
-- Sanity: There is 1 revmap page and 1 data page, with 4 ranges. Only the last
-- two ranges (covering the committed rows) have non-empty tuples.
@@ -315,3 +317,5 @@ SELECT gp_inject_fault('brin_bitmap_page_added', 'reset', dbid) FROM gp_segment_
RESET enable_seqscan;
RESET
+DROP EXTENSION pageinspect;
+DROP EXTENSION
diff --git a/src/test/isolation2/expected/cancel_plpython.out b/src/test/isolation2/expected/cancel_plpython.out
index 37964e3b04c..fff46cf807b 100644
--- a/src/test/isolation2/expected/cancel_plpython.out
+++ b/src/test/isolation2/expected/cancel_plpython.out
@@ -3,25 +3,25 @@ CREATE LANGUAGE plpython3u;
CREATE
-- end_ignore
CREATE OR REPLACE FUNCTION pybusyloop() RETURNS double precision AS $$ import math while True: a = 1 return 1 $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
CREATE OR REPLACE FUNCTION pysleep() RETURNS double precision AS $$ import time time.sleep(100) return 1 $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
CREATE OR REPLACE FUNCTION pyspisleep() RETURNS double precision AS $$ # container: plc_python_shared rv = plpy.execute("select pg_sleep(100)") return 1 $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
CREATE OR REPLACE FUNCTION pynestsleep() RETURNS double precision AS $$ # container: plc_python_shared rv = plpy.execute("select pyspisleep()") return 1 $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
CREATE OR REPLACE FUNCTION pynestsleep2() RETURNS double precision AS $$ # container: plc_python_shared rv = plpy.execute("select pysleep()") return 1 $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
CREATE TABLE a(i int);
-CREATE
+CREATE TABLE
insert into a values(1),(10),(20),(100);
-INSERT 4
+INSERT 0 4
1&: select pybusyloop();
2&: select pybusyloop() from a;
diff --git a/src/test/isolation2/expected/cancel_query.out b/src/test/isolation2/expected/cancel_query.out
index 38bda39fe4e..2833da2dc05 100644
--- a/src/test/isolation2/expected/cancel_query.out
+++ b/src/test/isolation2/expected/cancel_query.out
@@ -1,14 +1,14 @@
CREATE EXTENSION IF NOT EXISTS gp_inject_fault;
-CREATE
+CREATE EXTENSION
0:CREATE TABLE a_partition_table_for_analyze_cancellation ( a_date date NOT NULL, a_bigint bigint NOT NULL, b_bigint bigint NOT NULL ) WITH (appendonly='true', orientation='column') DISTRIBUTED BY (a_bigint, b_bigint) PARTITION BY RANGE(a_date) ( PARTITION p1 START ('2018-01-01'::date) END ('2018-12-31'::date) WITH (appendonly='true', orientation='column') COLUMN a_date ENCODING (compresstype=zlib) COLUMN a_bigint ENCODING (compresstype=zlib) COLUMN b_bigint ENCODING (compresstype=zlib), PARTITION p2 START ('2019-01-01'::date) END ('2019-12-31'::date) WITH (appendonly='true', orientation='column') COLUMN a_date ENCODING (compresstype=zlib) COLUMN a_bigint ENCODING (compresstype=zlib) COLUMN b_bigint ENCODING (compresstype=zlib), PARTITION p3 START ('2020-01-01'::date) END ('2020-12-31'::date) WITH (appendonly='true', orientation='column') COLUMN a_date ENCODING (compresstype=zlib) COLUMN a_bigint ENCODING (compresstype=zlib) COLUMN b_bigint ENCODING (compresstype=zlib) );
-CREATE
+CREATE TABLE
0:INSERT INTO a_partition_table_for_analyze_cancellation VALUES(timestamp '2018-01-01 10:00:00', 1, 3);
-INSERT 1
+INSERT 0 1
0:INSERT INTO a_partition_table_for_analyze_cancellation VALUES(timestamp '2019-01-01 12:00:00', 2, 4);
-INSERT 1
+INSERT 0 1
0:INSERT INTO a_partition_table_for_analyze_cancellation VALUES(timestamp '2020-01-01 13:00:00', 3, 5);
-INSERT 1
+INSERT 0 1
0: SELECT gp_inject_fault('zlib_decompress_after_decompress_fn', 'sleep', '', '', '', 1, -1, 3600, dbid) FROM gp_segment_configuration WHERE content=1 AND role='p';
gp_inject_fault
diff --git a/src/test/isolation2/expected/checkpoint_dtx_info.out b/src/test/isolation2/expected/checkpoint_dtx_info.out
index e17e8706631..9e69d64006c 100644
--- a/src/test/isolation2/expected/checkpoint_dtx_info.out
+++ b/src/test/isolation2/expected/checkpoint_dtx_info.out
@@ -34,7 +34,7 @@
1: begin;
BEGIN
1: create table twopcbug(i int, j int);
-CREATE
+CREATE TABLE
1&: commit;
-- wait to make sure the commit is taking place and blocked at start_insertedDistributedCommitted
2: select gp_wait_until_triggered_fault('start_insertedDistributedCommitted', 1, 1);
@@ -102,7 +102,7 @@ server closed the connection unexpectedly
-- failure on coordinator. The solution is adding the expected length
-- in SizeOfXLogRecordDataHeaderLong also, to fixup the missing condition.
create table ckpt_xlog_len_tbl(a int, b int);
-CREATE
+CREATE TABLE
-- Need to start at least 18 concurrent sessions to create a long header
-- CHECKPOINT WAL record, which size is not less than 256.
@@ -118,115 +118,115 @@ CREATE
10: begin;
BEGIN
10: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
10&: commit;
11: begin;
BEGIN
11: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
11&: commit;
12: begin;
BEGIN
12: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
12&: commit;
13: begin;
BEGIN
13: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
13&: commit;
14: begin;
BEGIN
14: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
14&: commit;
15: begin;
BEGIN
15: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
15&: commit;
16: begin;
BEGIN
16: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
16&: commit;
17: begin;
BEGIN
17: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
17&: commit;
18: begin;
BEGIN
18: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
18&: commit;
19: begin;
BEGIN
19: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
19&: commit;
20: begin;
BEGIN
20: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
20&: commit;
21: begin;
BEGIN
21: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
21&: commit;
22: begin;
BEGIN
22: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
22&: commit;
23: begin;
BEGIN
23: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
23&: commit;
24: begin;
BEGIN
24: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
24&: commit;
25: begin;
BEGIN
25: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
25&: commit;
26: begin;
BEGIN
26: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
26&: commit;
27: begin;
BEGIN
27: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
27&: commit;
28: begin;
BEGIN
28: insert into ckpt_xlog_len_tbl select i,i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
28&: commit;
-- wait to make sure the commit is taking place and blocked at start_insertedDistributedCommitted
@@ -362,4 +362,8 @@ server closed the connection unexpectedly
(1 row)
3: drop table ckpt_xlog_len_tbl;
-DROP
+DROP TABLE
+
+-- start_ignore
+alter resource group admin_group set concurrency 10;
+-- end_ignore
diff --git a/src/test/isolation2/expected/commit_transaction_block_checkpoint.out b/src/test/isolation2/expected/commit_transaction_block_checkpoint.out
index 0857b3b7a2a..83bd051083a 100644
--- a/src/test/isolation2/expected/commit_transaction_block_checkpoint.out
+++ b/src/test/isolation2/expected/commit_transaction_block_checkpoint.out
@@ -13,7 +13,7 @@ CHECKPOINT
2: begin;
BEGIN
2: create table t_commit_transaction_block_checkpoint (c int) distributed by (c);
-CREATE
+CREATE TABLE
2&: commit;
-- wait for the fault to trigger since following checkpoint could be faster
@@ -53,7 +53,7 @@ CHECKPOINT
2: begin;
BEGIN
2: drop table t_commit_transaction_block_checkpoint;
-DROP
+DROP TABLE
2&: commit;
-- wait for the fault to trigger since following checkpoint could be faster
diff --git a/src/test/isolation2/expected/concurrent_drop_truncate_tablespace.out b/src/test/isolation2/expected/concurrent_drop_truncate_tablespace.out
index 501f188fc7d..7fa8607eeff 100644
--- a/src/test/isolation2/expected/concurrent_drop_truncate_tablespace.out
+++ b/src/test/isolation2/expected/concurrent_drop_truncate_tablespace.out
@@ -23,82 +23,8 @@
CREATE TABLESPACE concurrent_tblspace LOCATION '/tmp/concurrent_tblspace';
CREATE
--- test 1:
--- when creating a table using a tablespace, after the tuple of tablespace
--- is locked, the tablespace is not allowed to drop
-2: begin;
-BEGIN
-2: CREATE TABLE t_in_tablespace(a int, b int) TABLESPACE concurrent_tblspace;
-CREATE
-
--- drop tablespace will fail: can't acuqire the lock
-DROP TABLESPACE concurrent_tblspace;
-ERROR: could not lock tablespace "concurrent_tblspace"
-2: rollback;
-ROLLBACK
-
--- test 2:
--- if DROP TABLESPACE acquires lock first and rollback, the blocking CREATE
--- TABLE will be successful.
-
--- suspend execution after tablespace lock is acquired
-SELECT gp_inject_fault('drop_tablespace_after_acquire_lock', 'suspend', dbid) FROM gp_segment_configuration WHERE content <> -1 and role='p';
- gp_inject_fault
------------------
- Success:
- Success:
- Success:
-(3 rows)
-1&: DROP TABLESPACE concurrent_tblspace;
-
--- wait for the fault to be triggered
-SELECT gp_wait_until_triggered_fault('drop_tablespace_after_acquire_lock', 1, dbid) from gp_segment_configuration where content <> -1 and role='p';
- gp_wait_until_triggered_fault
--------------------------------
- Success:
- Success:
- Success:
-(3 rows)
-
-2&: CREATE TABLE t_in_tablespace(a int, b int) TABLESPACE concurrent_tblspace;
--- inject an error to ensure that the above DROP command will rollback
-SELECT gp_inject_fault('after_xlog_tblspc_drop', 'error', dbid) FROM gp_segment_configuration WHERE content <> -1 and role='p';
- gp_inject_fault
------------------
- Success:
- Success:
- Success:
-(3 rows)
-SELECT gp_inject_fault('drop_tablespace_after_acquire_lock', 'reset', dbid) FROM gp_segment_configuration WHERE content <> -1 and role='p';
- gp_inject_fault
------------------
- Success:
- Success:
- Success:
-(3 rows)
--- fail
-1<: <... completed>
-ERROR: fault triggered, fault name:'after_xlog_tblspc_drop' fault type:'error'
--- success
-2<: <... completed>
-CREATE
--- drop the above table, so the tablespace is empty.
-2: DROP TABLE t_in_tablespace;
-DROP
-SELECT gp_inject_fault('after_xlog_tblspc_drop', 'reset', dbid) FROM gp_segment_configuration WHERE content <> -1 and role='p';
- gp_inject_fault
------------------
- Success:
- Success:
- Success:
-(3 rows)
-
--- test 3:
--- if DROP TABLESPACE acquires lock first and going to drop, any CREATE TABLE
--- will fail
-
--- suspend execution after tablespace lock is acquired
-SELECT gp_inject_fault('drop_tablespace_after_acquire_lock', 'suspend', dbid) FROM gp_segment_configuration WHERE content <> -1 and role='p';
+-- suspend execution after TablespaceCreateLock is released
+SELECT gp_inject_fault('AfterTablespaceCreateLockRelease', 'suspend', dbid) FROM gp_segment_configuration WHERE content <> -1 and role='p';
gp_inject_fault
-----------------
Success:
@@ -117,9 +43,12 @@ SELECT gp_wait_until_triggered_fault('drop_tablespace_after_acquire_lock', 1, db
(3 rows)
-- create a table in the same tablespace which is being dropped via a concurrent session
-2&:CREATE TABLE drop_tablespace_tbl(a int, b int) TABLESPACE concurrent_tblspace DISTRIBUTED BY (a);
--- reset the fault, drop tablespace command will continue
-SELECT gp_inject_fault('drop_tablespace_after_acquire_lock', 'reset', dbid) FROM gp_segment_configuration WHERE content <> -1 and role='p';
+CREATE TABLE drop_tablespace_tbl(a int, b int) TABLESPACE concurrent_tblspace DISTRIBUTED BY (a);
+CREATE TABLE
+INSERT INTO drop_tablespace_tbl SELECT i, i FROM generate_series(1,100)i;
+INSERT 0 100
+-- reset the fault, drop tablespace command will not delete the data files on the tablespace
+SELECT gp_inject_fault('AfterTablespaceCreateLockRelease', 'reset', dbid) FROM gp_segment_configuration WHERE content <> -1 and role='p';
gp_inject_fault
-----------------
Success:
@@ -128,7 +57,18 @@ SELECT gp_inject_fault('drop_tablespace_after_acquire_lock', 'reset', dbid) FROM
(3 rows)
-- success
1<: <... completed>
-DROP
--- fail
-2<: <... completed>
-ERROR: could not create directory "pg_tblspc/33175/GPDB_1_302501601/32799": No such file or directory
+DROP TABLESPACE
+-- check data exists
+SELECT count(*) FROM drop_tablespace_tbl;
+ count
+-------
+ 100
+(1 row)
+-- move to another tablespace and check the data.
+ALTER TABLE drop_tablespace_tbl SET TABLESPACE pg_default;
+ALTER TABLE
+SELECT count(*) FROM drop_tablespace_tbl;
+ count
+-------
+ 100
+(1 row)
diff --git a/src/test/isolation2/expected/concurrent_index_creation_should_not_deadlock.out b/src/test/isolation2/expected/concurrent_index_creation_should_not_deadlock.out
index ebcc4248a48..0491f0ce3d9 100644
--- a/src/test/isolation2/expected/concurrent_index_creation_should_not_deadlock.out
+++ b/src/test/isolation2/expected/concurrent_index_creation_should_not_deadlock.out
@@ -1,9 +1,7 @@
-- Test to make sure non-first concurrent index creations don't deadlock
-- Create an append only table, popluated with data
CREATE TABLE index_deadlocking_test_table (value int) WITH (appendonly=true);
-CREATE
-CREATE INDEX index_deadlocking_test_table_initial_index on index_deadlocking_test_table (value);
-CREATE
+CREATE TABLE
-- Setup a fault to ensure that both sessions pauses while creating an index,
-- ensuring a concurrent index creation.
@@ -29,6 +27,6 @@ SELECT gp_inject_fault('defineindex_before_acquire_lock', 'reset', 1);
-- Both index creation attempts should succeed
1<: <... completed>
-CREATE
+CREATE INDEX
2<: <... completed>
-CREATE
+CREATE INDEX
diff --git a/src/test/isolation2/expected/crash_recovery.out b/src/test/isolation2/expected/crash_recovery.out
index a5078dd4657..7882e3f7149 100644
--- a/src/test/isolation2/expected/crash_recovery.out
+++ b/src/test/isolation2/expected/crash_recovery.out
@@ -1,5 +1,5 @@
1:CREATE TABLE crash_test_table(c1 int);
-CREATE
+CREATE TABLE
1:SELECT role, preferred_role, content, status FROM gp_segment_configuration;
role | preferred_role | content | status
diff --git a/src/test/isolation2/expected/crash_recovery_dtm.out b/src/test/isolation2/expected/crash_recovery_dtm.out
index 31b1e86391c..153ee28e91f 100644
--- a/src/test/isolation2/expected/crash_recovery_dtm.out
+++ b/src/test/isolation2/expected/crash_recovery_dtm.out
@@ -15,7 +15,7 @@
-- Make the test faster and also make some queries fail as expected after
-- 2pc retry PANIC (do not finish earlier before PANIC happens).
alter system set dtx_phase2_retry_second to 5;
-ALTER
+ALTER SYSTEM
select pg_reload_conf();
pg_reload_conf
----------------
@@ -83,7 +83,7 @@ server closed the connection unexpectedly
---+---
(0 rows)
4: INSERT INTO commit_phase1_panic select i,i from generate_series(1, 10)i;
-INSERT 10
+INSERT 0 10
4: SELECT count(*) from commit_phase1_panic;
count
-------
@@ -184,9 +184,9 @@ LINE 1: SELECT count(*) from abort_fatal_fault_test_table;
-- should cause master to broadcast abort and QEs handle the abort in
-- DTX_CONTEXT_LOCAL_ONLY context.
11: CREATE TABLE QE_panic_test_table(a int, b int);
-CREATE
+CREATE TABLE
11: INSERT INTO QE_panic_test_table SELECT * from generate_series(0, 9);
-INSERT 10
+INSERT 0 10
-- To help speedy recovery
11: CHECKPOINT;
CHECKPOINT
@@ -194,7 +194,7 @@ CHECKPOINT
-- system is required to set the GUC and can't be set on session level
-- as session reset happens for every abort retry.
11: alter system set dtx_phase2_retry_second to 600;
-ALTER
+ALTER SYSTEM
11: select pg_reload_conf();
pg_reload_conf
----------------
@@ -269,7 +269,7 @@ ERROR: Error on receive from seg0 127.0.0.1:7002 pid=5600: server closed the co
Success:
(1 row)
13: alter system reset dtx_phase2_retry_second;
-ALTER
+ALTER SYSTEM
13: select pg_reload_conf();
pg_reload_conf
----------------
@@ -278,8 +278,8 @@ ALTER
-- Scenario 5: QD panics when a QE process is doing prepare but not yet finished.
-- This should cause dtx recovery finally aborts the orphaned prepared transaction.
-15: CREATE TABLE master_reset(a int);
-CREATE
+15: CREATE TABLE coordinator_reset(a int);
+CREATE TABLE
15: SELECT gp_inject_fault_infinite('before_xlog_xact_prepare', 'suspend', dbid) from gp_segment_configuration where role = 'p' and content = 1;
gp_inject_fault_infinite
--------------------------
@@ -299,9 +299,9 @@ CREATE
-- set gucs to speed up testing
15: ALTER SYSTEM SET gp_dtx_recovery_prepared_period to 0;
-ALTER
+ALTER SYSTEM
15: ALTER SYSTEM SET gp_dtx_recovery_interval to 5;
-ALTER
+ALTER SYSTEM
15: SELECT pg_reload_conf();
pg_reload_conf
----------------
@@ -385,28 +385,12 @@ server closed the connection unexpectedly
--------------------------
Success:
(1 row)
--- verify orphaned prepared transacion is aborted
-19: SELECT gp_wait_until_triggered_fault('after_orphaned_check', 1, dbid) from gp_segment_configuration where role = 'p' and content = -1;
- gp_wait_until_triggered_fault
--------------------------------
- Success:
-(1 row)
-19: select * from gp_stat_progress_dtx_recovery;
- phase | recover_commited_dtx_total | recover_commited_dtx_completed | in_doubt_tx_total | in_doubt_tx_in_progress | in_doubt_tx_aborted
------------------------------------------+----------------------------+--------------------------------+-------------------+-------------------------+---------------------
- managing in-doubt orphaned transactions | 0 | 0 | 1 | 0 | 1
-(1 row)
-19: SELECT gp_inject_fault_infinite('after_orphaned_check', 'reset', dbid) from gp_segment_configuration where role = 'p' and content = -1;
- gp_inject_fault_infinite
---------------------------
- Success:
-(1 row)
-19: DROP TABLE master_reset;
-DROP
+19: DROP TABLE coordinator_reset;
+DROP TABLE
19: ALTER SYSTEM RESET gp_dtx_recovery_interval;
-ALTER
+ALTER SYSTEM
19: ALTER SYSTEM RESET gp_dtx_recovery_prepared_period;
-ALTER
+ALTER SYSTEM
19: SELECT pg_reload_conf();
pg_reload_conf
----------------
@@ -424,11 +408,11 @@ ALTER
-- speed up testing by setting some gucs.
20: ALTER SYSTEM SET gp_dtx_recovery_prepared_period to 0;
-ALTER
+ALTER SYSTEM
20: ALTER SYSTEM SET gp_dtx_recovery_interval to 5;
-ALTER
+ALTER SYSTEM
20: ALTER SYSTEM SET dtx_phase2_retry_second to 5;
-ALTER
+ALTER SYSTEM
20: SELECT pg_reload_conf();
pg_reload_conf
----------------
@@ -436,7 +420,7 @@ ALTER
(1 row)
20: CREATE TABLE test_retry_abort(a int);
-CREATE
+CREATE TABLE
-- master: set fault to trigger abort prepare
-- primary 0: set fault so that retry prepared abort fails.
@@ -535,11 +519,11 @@ ERROR: fault triggered, fault name:'dtm_broadcast_prepare' fault type:'error'
-- cleanup
20: ALTER SYSTEM RESET gp_dtx_recovery_interval;
-ALTER
+ALTER SYSTEM
20: ALTER SYSTEM RESET gp_dtx_recovery_prepared_period;
-ALTER
+ALTER SYSTEM
20: ALTER SYSTEM RESET dtx_phase2_retry_second;
-ALTER
+ALTER SYSTEM
20: SELECT pg_reload_conf();
pg_reload_conf
----------------
@@ -556,4 +540,4 @@ ALTER
Success:
(1 row)
20: DROP TABLE test_retry_abort;
-DROP
+DROP TABLE
diff --git a/src/test/isolation2/expected/crash_recovery_redundant_dtx.out b/src/test/isolation2/expected/crash_recovery_redundant_dtx.out
index 3db508d527f..b78e362f44f 100644
--- a/src/test/isolation2/expected/crash_recovery_redundant_dtx.out
+++ b/src/test/isolation2/expected/crash_recovery_redundant_dtx.out
@@ -1,5 +1,5 @@
1:CREATE TABLE crash_test_redundant(c1 int);
-CREATE
+CREATE TABLE
1:SELECT role, preferred_role, content, status FROM gp_segment_configuration;
role | preferred_role | content | status
diff --git a/src/test/isolation2/expected/create_memory_accounting_tables.out b/src/test/isolation2/expected/create_memory_accounting_tables.out
index f311164b13b..3867fd2f886 100644
--- a/src/test/isolation2/expected/create_memory_accounting_tables.out
+++ b/src/test/isolation2/expected/create_memory_accounting_tables.out
@@ -1,413 +1,413 @@
CREATE TABLE lineitem ( l_orderkey INT8 NOT NULL, l_partkey INTEGER NOT NULL, l_suppkey INTEGER NOT NULL, l_linenumber INTEGER NOT NULL, l_quantity DECIMAL(15, 2) NOT NULL, l_extendedprice DECIMAL(15, 2) NOT NULL, l_discount DECIMAL(15, 2) NOT NULL, l_tax DECIMAL(15, 2) NOT NULL, l_returnflag CHAR(1) NOT NULL, l_linestatus CHAR(1) NOT NULL, l_shipdate DATE NOT NULL, l_commitdate DATE NOT NULL, l_receiptdate DATE NOT NULL, l_shipinstruct CHAR(25) NOT NULL, l_shipmode CHAR(10) NOT NULL, l_comment VARCHAR(44) NOT NULL )PARTITION by range(l_shipdate) (partition p1 start('1992-01-01') end('1998-12-02') every(interval '1 month'));
-CREATE
+CREATE TABLE
CREATE TABLE nation ( n_nationkey INTEGER, n_name CHAR(25), n_regionkey INTEGER, n_comment VARCHAR(152) );
-CREATE
+CREATE TABLE
CREATE TABLE Customer ( C_CUSTKEY INTEGER , C_NAME VARCHAR(25) , C_ADDRESS VARCHAR(40) , C_NATIONKEY INTEGER , C_PHONE CHAR(15) , C_ACCTBAL DECIMAL(15,2) , C_MKTSEGMENT CHAR(10) , C_COMMENT VARCHAR(117) );
-CREATE
+CREATE TABLE
CREATE TABLE region ( r_regionkey INTEGER, r_name CHAR(25), r_comment VARCHAR(152) );
-CREATE
+CREATE TABLE
CREATE TABLE orders ( o_orderkey INTEGER, o_custkey INTEGER, o_orderstatus CHAR(1), o_totalprice DECIMAL(15, 2), o_orderdate DATE, o_orderpriority CHAR(15), o_clerk CHAR(15), o_shippriority INTEGER, o_comment VARCHAR(79) ) ;
-CREATE
+CREATE TABLE
CREATE TABLE supplier ( s_suppkey INTEGER, s_name CHAR(25), s_address VARCHAR(40), s_nationkey INTEGER, s_phone CHAR(15), s_acctbal DECIMAL(15, 2), s_comment VARCHAR(101) );
-CREATE
+CREATE TABLE
CREATE TABLE partsupp ( ps_partkey INTEGER, ps_suppkey INTEGER, ps_availqty INTEGER, ps_supplycost DECIMAL(15, 2), ps_comment VARCHAR(199) ) ;
-CREATE
+CREATE TABLE
INSERT INTO lineitem VALUES (2949,695,89,2,50,79784.50,0.05,0.04,'A','F','1994-08-04','1994-06-23','1994-08-17','TAKE BACK RETURN','FOB','gular courts cajole across t');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2949,1795,80,3,38,64478.02,0.02,0.06,'R','F','1994-05-22','1994-05-25','1994-05-27','COLLECT COD','REG AIR','se slyly requests. carefull');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2950,1295,96,1,32,38281.28,0.01,0.05,'N','O','1997-09-21','1997-08-25','1997-10-08','DELIVER IN PERSON','REG AIR','its wake carefully slyly final ideas.');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2950,658,59,2,18,28055.70,0.10,0.01,'N','O','1997-07-19','1997-08-29','1997-08-17','COLLECT COD','TRUCK','uests cajole furio');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2950,527,28,3,14,19985.28,0.01,0.02,'N','O','1997-07-29','1997-08-05','1997-07-31','TAKE BACK RETURN','MAIL','ccounts haggle carefully according');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2950,1864,65,4,45,79463.70,0.08,0.00,'N','O','1997-09-05','1997-09-23','1997-09-11','NONE','FOB','ides the b');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2950,610,11,5,46,69488.06,0.02,0.05,'N','O','1997-07-15','1997-09-30','1997-07-25','COLLECT COD','RAIL','to the regular accounts are slyly carefu');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2950,1736,37,6,27,44218.71,0.01,0.03,'N','O','1997-10-01','1997-09-13','1997-10-08','NONE','TRUCK','are alongside of the carefully silent');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2951,21,72,1,5,4605.10,0.03,0.03,'N','O','1996-03-27','1996-04-16','1996-03-30','NONE','REG AIR','to beans wake ac');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2951,1360,99,2,24,30272.64,0.07,0.03,'N','O','1996-03-24','1996-04-16','1996-04-08','NONE','SHIP','ironic multipliers. express, regular');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2951,1861,91,3,40,70514.40,0.02,0.07,'N','O','1996-05-03','1996-04-20','1996-05-22','COLLECT COD','REG AIR','ial deposits wake fluffily about th');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2951,722,55,4,21,34077.12,0.06,0.08,'N','O','1996-04-12','1996-04-27','1996-04-14','DELIVER IN PERSON','REG AIR','nt instructions toward the f');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2951,502,63,5,15,21037.50,0.07,0.00,'N','O','1996-03-25','1996-04-23','1996-03-27','COLLECT COD','REG AIR','inal account');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2951,1371,86,6,18,22902.66,0.06,0.00,'N','O','1996-04-04','1996-04-27','1996-04-06','COLLECT COD','FOB','ep about the final, even package');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2976,86,37,1,32,31554.56,0.06,0.00,'A','F','1994-01-26','1994-02-13','1994-02-10','NONE','MAIL','nding, ironic deposits sleep f');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2976,31,32,2,24,22344.72,0.00,0.03,'A','F','1994-03-19','1994-01-26','1994-04-18','COLLECT COD','TRUCK','ronic pinto beans. slyly bol');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2976,98,49,3,35,34933.15,0.10,0.07,'R','F','1993-12-19','1994-02-14','1994-01-11','NONE','RAIL','boost slyly about the regular, regular re');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2976,811,78,4,22,37659.82,0.00,0.04,'A','F','1994-02-08','1994-03-03','1994-02-12','TAKE BACK RETURN','FOB','ncies kindle furiously. carefull');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2976,1333,10,5,13,16046.29,0.00,0.06,'A','F','1994-02-06','1994-02-02','1994-02-19','NONE','FOB','furiously final courts boost');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2976,1084,20,6,30,29552.40,0.08,0.03,'R','F','1994-03-27','1994-02-01','1994-04-26','TAKE BACK RETURN','RAIL','c ideas! unusual');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2977,698,92,1,25,39967.25,0.03,0.07,'N','O','1996-09-21','1996-10-06','1996-10-13','TAKE BACK RETURN','RAIL','furiously pe');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2978,897,98,1,29,52138.81,0.00,0.08,'A','F','1995-06-03','1995-07-25','1995-06-06','NONE','SHIP','ecial ideas promise slyly');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2978,1270,8,2,42,49193.34,0.01,0.06,'N','O','1995-08-19','1995-07-18','1995-09-07','DELIVER IN PERSON','MAIL','ial requests nag blithely alongside of th');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2978,430,18,3,26,34591.18,0.07,0.05,'N','O','1995-07-29','1995-07-22','1995-08-20','COLLECT COD','REG AIR','as haggle against the carefully express dep');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2978,271,53,4,7,8198.89,0.00,0.00,'N','O','1995-07-18','1995-07-03','1995-07-23','NONE','FOB','. final ideas are blithe');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2978,285,67,5,33,39114.24,0.09,0.03,'R','F','1995-05-06','1995-07-23','1995-05-16','COLLECT COD','FOB','s. blithely unusual pack');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2978,1671,13,6,4,6290.68,0.08,0.04,'N','O','1995-07-06','1995-07-31','1995-07-19','COLLECT COD','AIR','ffily unusual');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2979,81,57,1,8,7848.64,0.00,0.08,'N','O','1996-06-18','1996-05-21','1996-07-06','COLLECT COD','REG AIR','st blithely; blithely regular gifts dazz');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2979,107,8,2,47,47333.70,0.05,0.00,'N','O','1996-03-25','1996-05-13','1996-04-04','TAKE BACK RETURN','SHIP','iously unusual dependencies wake across');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2979,1879,9,3,35,62330.45,0.04,0.03,'N','O','1996-05-25','1996-06-11','1996-06-24','DELIVER IN PERSON','MAIL','old ideas beneath the blit');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2979,1641,83,4,28,43193.92,0.05,0.08,'N','O','1996-06-04','1996-04-23','1996-06-24','DELIVER IN PERSON','FOB','ing, regular pinto beans. blithel');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2980,364,93,1,2,2528.72,0.09,0.03,'N','O','1996-11-18','1996-10-22','1996-11-27','TAKE BACK RETURN','SHIP','enly across the special, pending packag');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2980,96,72,2,48,47812.32,0.04,0.05,'N','O','1996-09-25','1996-12-09','1996-10-12','NONE','REG AIR','totes. regular pinto');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2980,1321,60,3,27,33002.64,0.08,0.08,'N','O','1996-12-08','1996-12-03','1996-12-14','NONE','REG AIR','theodolites cajole blithely sl');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2980,247,75,4,49,56214.76,0.03,0.02,'N','O','1996-10-04','1996-12-04','1996-10-06','NONE','RAIL','hy packages sleep quic');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2980,1861,62,5,24,42308.64,0.05,0.04,'N','O','1997-01-12','1996-10-27','1997-01-14','NONE','MAIL','elets. fluffily regular in');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2980,1087,58,6,43,42487.44,0.01,0.01,'N','O','1996-12-07','1996-11-10','1997-01-02','COLLECT COD','AIR','sts. slyly regu');
-INSERT 1
+INSERT 0 1
INSERT INTO lineitem VALUES (2981,136,15,1,17,17614.21,0.03,0.05,'N','O','1998-10-17','1998-10-02','1998-10-21','DELIVER IN PERSON','RAIL',', unusual packages x-ray. furious');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (1,'Supplier#000000001','N kD4on9OM Ipw3,gf0JBoQDd7tgrzrddZ',17,'27-918-335-1736',5755.94,'each slyly above the careful');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (2,'Supplier#000000002','89eJ5ksX3ImxJQBvxObC,',5,'15-679-861-2259',4032.68,'slyly bold instructions. idle dependen');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (3,'Supplier#000000003','q1,G3Pj6OjIuUYfUoH18BFTKP5aU9bEV3',1,'11-383-516-1199',4192.40,'blithely silent requests after the express dependencies are sl');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (4,'Supplier#000000004','Bk7ah4CK8SYQTepEmvMkkgMwg',15,'25-843-787-7479',4641.08,'riously even requests above the exp');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (5,'Supplier#000000005','Gcdm2rJRzl5qlTVzc',11,'21-151-690-3663',-283.84,'. slyly regular pinto bea');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (6,'Supplier#000000006','tQxuVm7s7CnK',14,'24-696-997-4969',1365.79,'final accounts. regular dolphins use against the furiously ironic decoys.');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (7,'Supplier#000000007','s,4TicNGB4uO6PaSqNBUq',23,'33-990-965-2201',6820.35,'s unwind silently furiously regular courts. final requests are deposits. requests wake quietly blit');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (8,'Supplier#000000008','9Sq4bBH2FQEmaFOocY45sRTxo6yuoG',17,'27-498-742-3860',7627.85,'al pinto beans. asymptotes haggl');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (9,'Supplier#000000009','1KhUgZegwM3ua7dsYmekYBsK',10,'20-403-398-8662',5302.37,'s. unusual, even requests along the furiously regular pac');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (10,'Supplier#000000010','Saygah3gYWMp72i PY',24,'34-852-489-8585',3891.91,'ing waters. regular requests ar');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (11,'Supplier#000000011','JfwTs,LZrV, M,9C',18,'28-613-996-1505',3393.08,'y ironic packages. slyly ironic accounts affix furiously; ironically unusual excuses across the flu');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (12,'Supplier#000000012','aLIW q0HYd',8,'18-179-925-7181',1432.69,'al packages nag alongside of the bold instructions. express, daring accounts');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (13,'Supplier#000000013','HK71HQyWoqRWOX8GI FpgAifW,2PoH',3,'13-727-620-7813',9107.22,'requests engage regularly instructions. furiously special requests ar');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (14,'Supplier#000000014','EXsnO5pTNj4iZRm',15,'25-656-247-5058',9189.82,'l accounts boost. fluffily bold warhorses wake');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (15,'Supplier#000000015','olXVbNBfVzRqgokr1T,Ie',8,'18-453-357-6394',308.56,'across the furiously regular platelets wake even deposits. quickly express she');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (16,'Supplier#000000016','YjP5C55zHDXL7LalK27zfQnwejdpin4AMpvh',22,'32-822-502-4215',2972.26,'ously express ideas haggle quickly dugouts? fu');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (17,'Supplier#000000017','c2d,ESHRSkK3WYnxpgw6aOqN0q',19,'29-601-884-9219',1687.81,'eep against the furiously bold ideas. fluffily bold packa');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (18,'Supplier#000000018','PGGVE5PWAMwKDZw',16,'26-729-551-1115',7040.82,'accounts snooze slyly furiously bold');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (19,'Supplier#000000019','edZT3es,nBFD8lBXTGeTl',24,'34-278-310-2731',6150.38,'refully final foxes across the dogged theodolites sleep slyly abou');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (20,'Supplier#000000020','iybAE,RmTymrZVYaFZva2SH,j',3,'13-715-945-6730',530.82,'n, ironic ideas would nag blithely about the slyly regular accounts. silent, expr');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (21,'Supplier#000000021','81CavellcrJ0PQ3CPBID0Z0JwyJm0ka5igEs',2,'12-253-590-5816',9365.80,'d. instructions integrate sometimes slyly pending instructions. accounts nag among the');
-INSERT 1
+INSERT 0 1
INSERT INTO supplier VALUES (22,'Supplier#000000022','okiiQFk 8lm6EVX6Q0,bEcO',4,'14-144-830-2814',-966.20,'ironically among the deposits. closely expre');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (1,370,'O',172799.49,'1996-01-02','5-LOW','Clerk#000000951',0,'nstructions sleep furiously among');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (2,781,'O',38426.09,'1996-12-01','1-URGENT','Clerk#000000880',0,'foxes. pending accounts at the pending, silent asymptot');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (3,1234,'F',205654.30,'1993-10-14','5-LOW','Clerk#000000955',0,'sly final accounts boost. carefully regular ideas cajole carefully. depos');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (4,1369,'O',56000.91,'1995-10-11','5-LOW','Clerk#000000124',0,'sits. slyly regular warthogs cajole. regular, regular theodolites acro');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (5,445,'F',105367.67,'1994-07-30','5-LOW','Clerk#000000925',0,'quickly. bold deposits sleep slyly. packages use slyly');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (6,557,'F',45523.10,'1992-02-21','4-NOT SPECIFIED','Clerk#000000058',0,'ggle. special, final requests are against the furiously specia');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (7,392,'O',271885.66,'1996-01-10','2-HIGH','Clerk#000000470',0,'ly special requests');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (32,1301,'O',198665.57,'1995-07-16','2-HIGH','Clerk#000000616',0,'ise blithely bold, regular requests. quickly unusual dep');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (33,670,'F',146567.24,'1993-10-27','3-MEDIUM','Clerk#000000409',0,'uriously. furiously final request');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (34,611,'O',73315.48,'1998-07-21','3-MEDIUM','Clerk#000000223',0,'ly final packages. fluffily final deposits wake blithely ideas. spe');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (35,1276,'O',194641.93,'1995-10-23','4-NOT SPECIFIED','Clerk#000000259',0,'zzle. carefully enticing deposits nag furio');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (36,1153,'O',42011.04,'1995-11-03','1-URGENT','Clerk#000000358',0,'quick packages are blithely. slyly silent accounts wake qu');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (37,862,'F',131896.49,'1992-06-03','3-MEDIUM','Clerk#000000456',0,'kly regular pinto beans. carefully unusual waters cajole never');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (38,1249,'O',71553.08,'1996-08-21','4-NOT SPECIFIED','Clerk#000000604',0,'haggle blithely. furiously express ideas haggle blithely furiously regular re');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (39,818,'O',326565.37,'1996-09-20','3-MEDIUM','Clerk#000000659',0,'ole express, ironic requests: ir');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (64,322,'F',35831.73,'1994-07-16','3-MEDIUM','Clerk#000000661',0,'wake fluffily. sometimes ironic pinto beans about the dolphin');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (65,163,'P',95469.44,'1995-03-18','1-URGENT','Clerk#000000632',0,'ular requests are blithely pending orbits-- even requests against the deposit');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (66,1292,'F',104190.66,'1994-01-20','5-LOW','Clerk#000000743',0,'y pending requests integrate');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (67,568,'O',182481.16,'1996-12-19','4-NOT SPECIFIED','Clerk#000000547',0,'symptotes haggle slyly around the furiously iron');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (68,286,'O',301968.79,'1998-04-18','3-MEDIUM','Clerk#000000440',0,'pinto beans sleep carefully. blithely ironic deposits haggle furiously acro');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (69,845,'F',204110.73,'1994-06-04','4-NOT SPECIFIED','Clerk#000000330',0,'depths atop the slyly thin deposits detect among the furiously silent accou');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (70,644,'F',125705.32,'1993-12-18','5-LOW','Clerk#000000322',0,'carefully ironic request');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (71,34,'O',260603.38,'1998-01-24','4-NOT SPECIFIED','Clerk#000000271',0,'express deposits along the blithely regul');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (96,1078,'F',64364.30,'1994-04-17','2-HIGH','Clerk#000000395',0,'oost furiously. pinto');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (97,211,'F',100572.55,'1993-01-29','3-MEDIUM','Clerk#000000547',0,'hang blithely along the regular accounts. furiously even ideas after the');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (98,1045,'F',71721.40,'1994-09-25','1-URGENT','Clerk#000000448',0,'c asymptotes. quickly regular packages should have to nag re');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (99,890,'F',108594.87,'1994-03-13','4-NOT SPECIFIED','Clerk#000000973',0,'e carefully ironic packages. pending');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (100,1471,'O',198978.27,'1998-02-28','4-NOT SPECIFIED','Clerk#000000577',0,'heodolites detect slyly alongside of the ent');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (101,280,'O',118448.39,'1996-03-17','3-MEDIUM','Clerk#000000419',0,'ding accounts above the slyly final asymptote');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (102,8,'O',184806.58,'1997-05-09','2-HIGH','Clerk#000000596',0,'slyly according to the asymptotes. carefully final packages integrate furious');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (103,292,'O',118745.16,'1996-06-20','4-NOT SPECIFIED','Clerk#000000090',0,'ges. carefully unusual instructions haggle quickly regular f');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (128,740,'F',34997.04,'1992-06-15','1-URGENT','Clerk#000000385',0,'ns integrate fluffily. ironic asymptotes after the regular excuses nag around');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (129,712,'F',254281.41,'1992-11-19','5-LOW','Clerk#000000859',0,'ing tithes. carefully pending deposits boost about the silently express');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (130,370,'F',140213.54,'1992-05-08','2-HIGH','Clerk#000000036',0,'le slyly unusual, regular packages? express deposits det');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (131,928,'F',140726.47,'1994-06-08','3-MEDIUM','Clerk#000000625',0,'after the fluffily special foxes integrate s');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (132,265,'F',133485.89,'1993-06-11','3-MEDIUM','Clerk#000000488',0,'sits are daringly accounts. carefully regular foxes sleep slyly about the');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (133,440,'O',95971.06,'1997-11-29','1-URGENT','Clerk#000000738',0,'usly final asymptotes');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (134,62,'F',208201.46,'1992-05-01','4-NOT SPECIFIED','Clerk#000000711',0,'lar theodolites boos');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (135,605,'O',230472.84,'1995-10-21','4-NOT SPECIFIED','Clerk#000000804',0,'l platelets use according t');
-INSERT 1
+INSERT 0 1
INSERT INTO orders VALUES (160,826,'O',114742.32,'1996-12-19','4-NOT SPECIFIED','Clerk#000000342',0,'thely special sauternes wake slyly of t');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (1,'Customer#000000001','IVhzIApeRb ot,c,E',15,'25-989-741-2988',711.56,'BUILDING','to the even, regular platelets. regular, ironic epitaphs nag e');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (2,'Customer#000000002','XSTf4,NCwDVaWNe6tEgvwfmRchLXak',13,'23-768-687-3665',121.65,'AUTOMOBILE','l accounts. blithely ironic theodolites integrate boldly: caref');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (3,'Customer#000000003','MG9kdTD2WBHm',1,'11-719-748-3364',7498.12,'AUTOMOBILE','deposits eat slyly ironic, even instructions. express foxes detect slyly. blithely even accounts abov');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (4,'Customer#000000004','XxVSJsLAGtn',4,'14-128-190-5944',2866.83,'MACHINERY','requests. final, regular ideas sleep final accou');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (5,'Customer#000000005','KvpyuHCplrB84WgAiGV6sYpZq7Tj',3,'13-750-942-6364',794.47,'HOUSEHOLD','n accounts will have to unwind. foxes cajole accor');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (6,'Customer#000000006','sKZz0CsnMD7mp4Xd0YrBvx,LREYKUWAh yVn',20,'30-114-968-4951',7638.57,'AUTOMOBILE','tions. even deposits boost according to the slyly bold packages. final accounts cajole requests. furious');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (7,'Customer#000000007','TcGe5gaZNgVePxU5kRrvXBfkasDTea',18,'28-190-982-9759',9561.95,'AUTOMOBILE','ainst the ironic, express theodolites. express, even pinto beans among the exp');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (8,'Customer#000000008','I0B10bB0AymmC, 0PrRYBCP1yGJ8xcBPmWhl5',17,'27-147-574-9335',6819.74,'BUILDING','among the slyly regular theodolites kindle blithely courts. carefully even theodolites haggle slyly along the ide');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (9,'Customer#000000009','xKiAFTjUsCuxfeleNqefumTrjS',8,'18-338-906-3675',8324.07,'FURNITURE','r theodolites according to the requests wake thinly excuses: pending requests haggle furiousl');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (10,'Customer#000000010','6LrEaV6KR6PLVcgl2ArL Q3rqzLzcT1 v2',5,'15-741-346-9870',2753.54,'HOUSEHOLD','es regular deposits haggle. fur');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (11,'Customer#000000011','PkWS 3HlXqwTuzrKg633BEi',23,'33-464-151-3439',-272.60,'BUILDING','ckages. requests sleep slyly. quickly even pinto beans promise above the slyly regular pinto beans.');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (12,'Customer#000000012','9PWKuhzT4Zr1Q',13,'23-791-276-1263',3396.49,'HOUSEHOLD','to the carefully final braids. blithely regular requests nag. ironic theodolites boost quickly along');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (13,'Customer#000000013','nsXQu0oVjD7PM659uC3SRSp',3,'13-761-547-5974',3857.34,'BUILDING','ounts sleep carefully after the close frays. carefully bold notornis use ironic requests. blithely');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (14,'Customer#000000014','KXkletMlL2JQEA',1,'11-845-129-3851',5266.30,'FURNITURE',', ironic packages across the unus');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (15,'Customer#000000015','YtWggXoOLdwdo7b0y,BZaGUQMLJMX1Y,EC,6Dn',23,'33-687-542-7601',2788.52,'HOUSEHOLD','platelets. regular deposits detect asymptotes. blithely unusual packages nag slyly at the fluf');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (16,'Customer#000000016','cYiaeMLZSMAOQ2 d0W,',10,'20-781-609-3107',4681.03,'FURNITURE','kly silent courts. thinly regular theodolites sleep fluffily after');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (17,'Customer#000000017','izrh 6jdqtp2eqdtbkswDD8SG4SzXruMfIXyR7',2,'12-970-682-3487',6.34,'AUTOMOBILE','packages wake! blithely even pint');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (18,'Customer#000000018','3txGO AiuFux3zT0Z9NYaFRnZt',6,'16-155-215-1315',5494.43,'BUILDING','s sleep. carefully even instructions nag furiously alongside of t');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (19,'Customer#000000019','uc,3bHIx84H,wdrmLOjVsiqXCq2tr',18,'28-396-526-5053',8914.71,'HOUSEHOLD','nag. furiously careful packages are slyly at the accounts. furiously regular in');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (20,'Customer#000000020','JrPk8Pqplj4Ne',22,'32-957-234-8742',7603.40,'FURNITURE','g alongside of the special excuses-- fluffily enticing packages wake');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (21,'Customer#000000021','XYmVpr9yAHDEn',8,'18-902-614-8344',1428.25,'MACHINERY','quickly final accounts integrate blithely furiously u');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (22,'Customer#000000022','QI6p41,FNs5k7RZoCCVPUTkUdYpB',3,'13-806-545-9701',591.98,'MACHINERY','s nod furiously above the furiously ironic ideas.');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (23,'Customer#000000023','OdY W13N7Be3OC5MpgfmcYss0Wn6TKT',3,'13-312-472-8245',3332.02,'HOUSEHOLD','deposits. special deposits cajole slyly. fluffily special deposits about the furiously');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (24,'Customer#000000024','HXAFgIAyjxtdqwimt13Y3OZO 4xeLe7U8PqG',13,'23-127-851-8031',9255.67,'MACHINERY','into beans. fluffily final ideas haggle fluffily');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (25,'Customer#000000025','Hp8GyFQgGHFYSilH5tBfe',12,'22-603-468-3533',7133.70,'FURNITURE','y. accounts sleep ruthlessly according to the regular theodolites. unusual instructions sleep. ironic, final');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (26,'Customer#000000026','8ljrc5ZeMl7UciP',22,'32-363-455-4837',5182.05,'AUTOMOBILE','c requests use furiously ironic requests. slyly ironic dependencies us');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (27,'Customer#000000027','IS8GIyxpBrLpMT0u7',3,'13-137-193-2709',5679.84,'BUILDING','about the carefully ironic pinto beans. accoun');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (28,'Customer#000000028','iVyg0daQ,Tha8x2WPWA9m2529m',8,'18-774-241-1462',1007.18,'FURNITURE','along the regular deposits. furiously final pac');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (29,'Customer#000000029','sJ5adtfyAkCK63df2,vF25zyQMVYE34uh',0,'10-773-203-7342',7618.27,'FURNITURE','its after the carefully final platelets x-ray against');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (30,'Customer#000000030','nJDsELGAavU63Jl0c5NKsKfL8rIJQQkQnYL2QJY',1,'11-764-165-5076',9321.01,'BUILDING','lithely final requests. furiously unusual account');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (31,'Customer#000000031','LUACbO0viaAv6eXOAebryDB xjVst',23,'33-197-837-7094',5236.89,'HOUSEHOLD','s use among the blithely pending depo');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (32,'Customer#000000032','jD2xZzi UmId,DCtNBLXKj9q0Tlp2iQ6ZcO3J',15,'25-430-914-2194',3471.53,'BUILDING','cial ideas. final, furious requests across the e');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (33,'Customer#000000033','qFSlMuLucBmx9xnn5ib2csWUweg D',17,'27-375-391-1280',-78.56,'AUTOMOBILE','s. slyly regular accounts are furiously. carefully pending requests');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (34,'Customer#000000034','Q6G9wZ6dnczmtOx509xgE,M2KV',15,'25-344-968-5422',8589.70,'HOUSEHOLD','nder against the even, pending accounts. even');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (35,'Customer#000000035','TEjWGE4nBzJL2',17,'27-566-888-7431',1228.24,'HOUSEHOLD','requests. special, express requests nag slyly furiousl');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (36,'Customer#000000036','3TvCzjuPzpJ0,DdJ8kW5U',21,'31-704-669-5769',4987.27,'BUILDING','haggle. enticing, quiet platelets grow quickly bold sheaves. carefully regular acc');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (37,'Customer#000000037','7EV4Pwh,3SboctTWt',8,'18-385-235-7162',-917.75,'FURNITURE','ilent packages are carefully among the deposits. furiousl');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (38,'Customer#000000038','a5Ee5e9568R8RLP 2ap7',12,'22-306-880-7212',6345.11,'HOUSEHOLD','lar excuses. closely even asymptotes cajole blithely excuses. carefully silent pinto beans sleep carefully fin');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (39,'Customer#000000039','nnbRg,Pvy33dfkorYE FdeZ60',2,'12-387-467-6509',6264.31,'AUTOMOBILE','tions. slyly silent excuses slee');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (40,'Customer#000000040','gOnGWAyhSV1ofv',3,'13-652-915-8939',1335.30,'BUILDING','rges impress after the slyly ironic courts. foxes are. blithely');
-INSERT 1
+INSERT 0 1
INSERT INTO customer VALUES (41,'Customer#000000041','IM9mzmyoxeBmvNw8lA7G3Ydska2nkZF',10,'20-917-711-4011',270.95,'HOUSEHOLD','ly regular accounts hang bold, silent packages. unusual foxes haggle slyly above the special, final depo');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (0,'ALGERIA',0,'haggle. carefully final deposits detect slyly agai');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (1,'ARGENTINA',1,'al foxes promise slyly according to the regular accounts. bold requests alon');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (2,'BRAZIL',1,'y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (3,'CANADA',1,'eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (4,'EGYPT',4,'y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (5,'ETHIOPIA',0,'ven packages wake quickly. regu');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (6,'FRANCE',3,'refully final requests. regular, ironi');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (7,'GERMANY',3,'l platelets. regular accounts x-ray: unusual, regular acco');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (8,'INDIA',2,'ss excuses cajole slyly across the packages. deposits print aroun');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (9,'INDONESIA',2,'slyly express asymptotes. regular deposits haggle slyly. carefully ironic hockey players sleep blithely. carefull');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (10,'IRAN',4,'efully alongside of the slyly final dependencies.');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (11,'IRAQ',4,'nic deposits boost atop the quickly final requests? quickly regula');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (12,'JAPAN',2,'ously. final, express gifts cajole a');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (13,'JORDAN',4,'ic deposits are blithely about the carefully regular pa');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (14,'KENYA',0,'pending excuses haggle furiously deposits. pending, express pinto beans wake fluffily past t');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (15,'MOROCCO',0,'rns. blithely bold courts among the closely regular packages use furiously bold platelets?');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (16,'MOZAMBIQUE',0,'s. ironic, unusual asymptotes wake blithely r');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (17,'PERU',1,'platelets. blithely pending dependencies use fluffily across the even pinto beans. carefully silent accoun');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (18,'CHINA',2,'c dependencies. furiously express notornis sleep slyly regular accounts. ideas sleep. depos');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (19,'ROMANIA',3,'ular asymptotes are about the furious multipliers. express dependencies nag above the ironically ironic account');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (20,'SAUDI ARABIA',4,'ts. silent requests haggle. closely express packages sleep across the blithely');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (21,'VIETNAM',2,'hely enticingly express accounts. even, final');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (22,'RUSSIA',3,'requests against the platelets use never according to the quickly regular pint');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (23,'UNITED KINGDOM',3,'eans boost carefully special requests. accounts are. carefull');
-INSERT 1
+INSERT 0 1
INSERT INTO nation VALUES (24,'UNITED STATES',1,'y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (1,2,3325,771.64,', even theodolites. regular, final theodolites eat after the carefully pending foxes. furiously regular deposits sleep slyly. carefully bold realms above the ironic dependencies haggle careful');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (1,27,8076,993.49,'ven ideas. quickly even packages print. pending multipliers must have to are fluff');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (1,52,3956,337.09,'after the fluffily ironic deposits? blithely special dependencies integrate furiously even excuses. blithely silent theodolites could have to haggle pending, express requests; fu');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (1,77,4069,357.84,'al, regular dependencies serve carefully after the quickly final pinto beans. furiously even deposits sleep quickly final, silent pinto beans. fluffily reg');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (2,3,8895,378.49,'nic accounts. final accounts sleep furiously about the ironic, bold packages. regular, regular accounts');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (2,28,4969,915.27,'ptotes. quickly pending dependencies integrate furiously. fluffily ironic ideas impress blithely above the express accounts. furiously even epitaphs need to wak');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (2,53,8539,438.37,'blithely bold ideas. furiously stealthy packages sleep fluffily. slyly special deposits snooze furiously carefully regular accounts. regular deposits according to the accounts nag carefully slyl');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (2,78,3025,306.39,'olites. deposits wake carefully. even, express requests cajole. carefully regular ex');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (3,4,4651,920.92,'ilent foxes affix furiously quickly unusual requests. even packages across the carefully even theodolites nag above the sp');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (3,29,4093,498.13,'ending dependencies haggle fluffily. regular deposits boost quickly carefully regular requests. deposits affix furiously around the pinto beans. ironic, unusual platelets across the p');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (3,54,3917,645.40,'of the blithely regular theodolites. final theodolites haggle blithely carefully unusual ideas. blithely even f');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (3,79,9942,191.92,'unusual, ironic foxes according to the ideas detect furiously alongside of the even, express requests. blithely regular the');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (4,5,1339,113.97,'carefully unusual ideas. packages use slyly. blithely final pinto beans cajole along the furiously express requests. regular orbits haggle carefully. care');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (4,30,6377,591.18,'ly final courts haggle carefully regular accounts. carefully regular accounts could integrate slyly. slyly express packages about the accounts wake slyly');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (4,55,2694,51.37,'g, regular deposits: quick instructions run across the carefully ironic theodolites-- final dependencies haggle into the dependencies. f');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (4,80,2480,444.37,'requests sleep quickly regular accounts. theodolites detect. carefully final depths w');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (5,6,3735,255.88,'arefully even requests. ironic requests cajole carefully even dolphin');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (5,31,9653,50.52,'y stealthy deposits. furiously final pinto beans wake furiou');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (5,56,1329,219.83,'iously regular deposits wake deposits. pending pinto beans promise ironic dependencies. even, regular pinto beans integrate');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (5,81,6925,537.98,'sits. quickly fluffy packages wake quickly beyond the blithely regular requests. pending requests cajole among the final pinto beans. carefully busy theodolites affix quickly stealthily');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (6,7,8851,130.72,'usly final packages. slyly ironic accounts poach across the even, sly requests. carefully pending request');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (6,32,1627,424.25,'quick packages. ironic deposits print. furiously silent platelets across the carefully final requests are slyly along the furiously even instructi');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (6,57,3336,642.13,'final instructions. courts wake packages. blithely unusual realms along the multipliers nag');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (6,82,6451,175.32,'accounts alongside of the slyly even accounts wake carefully final instructions-- ruthless platelets wake carefully ideas. even deposits are quickly final,');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (7,8,7454,763.98,'y express tithes haggle furiously even foxes. furiously ironic deposits sleep toward the furiously unusual');
-INSERT 1
+INSERT 0 1
INSERT INTO partsupp VALUES (7,33,2770,149.66,'hould have to nag after the blithely final asymptotes. fluffily spe');
-INSERT 1
+INSERT 0 1
diff --git a/src/test/isolation2/expected/deadlock_under_entry_db_singleton.out b/src/test/isolation2/expected/deadlock_under_entry_db_singleton.out
index d097b84db6e..8fa6bb422b9 100644
--- a/src/test/isolation2/expected/deadlock_under_entry_db_singleton.out
+++ b/src/test/isolation2/expected/deadlock_under_entry_db_singleton.out
@@ -22,9 +22,9 @@
-- the table to ao table here.
CREATE TABLE deadlock_entry_db_singleton_table (c int, d int) WITH (appendonly=true);
-CREATE
+CREATE TABLE
INSERT INTO deadlock_entry_db_singleton_table select i, i+1 from generate_series(1,10) i;
-INSERT 10
+INSERT 0 10
-- Function that needs ExclusiveLock on a table. Use a non-SQL
-- language for this function so that parser cannot understand its
@@ -32,7 +32,7 @@ INSERT 10
-- of the function. If the lock is acquired during plan generation of
-- the calling SQL statement, we don't get the deadlock.
CREATE FUNCTION function_volatile(x int) RETURNS int AS $$ /*in func*/ BEGIN /*in func*/ UPDATE deadlock_entry_db_singleton_table SET d = d + 1 WHERE c = $1; /*in func*/ RETURN $1 + 1; /*in func*/ END $$ /*in func*/ LANGUAGE plpgsql VOLATILE MODIFIES SQL DATA;
-CREATE
+CREATE FUNCTION
-- inject fault on QD
select gp_inject_fault('transaction_start_under_entry_db_singleton', 'reset', 1);
diff --git a/src/test/isolation2/expected/disable_autovacuum.out b/src/test/isolation2/expected/disable_autovacuum.out
index 409e40bbd2b..382fe3cac90 100644
--- a/src/test/isolation2/expected/disable_autovacuum.out
+++ b/src/test/isolation2/expected/disable_autovacuum.out
@@ -1,5 +1,5 @@
alter system set autovacuum = off;
-ALTER
+ALTER SYSTEM
select gp_segment_id, pg_reload_conf() from gp_id union select gp_segment_id, pg_reload_conf() from gp_dist_random('gp_id');
gp_segment_id | pg_reload_conf
---------------+----------------
diff --git a/src/test/isolation2/expected/distributed_transactions.out b/src/test/isolation2/expected/distributed_transactions.out
index 1135a126d13..d2278385273 100644
--- a/src/test/isolation2/expected/distributed_transactions.out
+++ b/src/test/isolation2/expected/distributed_transactions.out
@@ -9,7 +9,7 @@ SELECT gp_inject_fault( 'abort_after_procarray_end', 'error', 1);
BEGIN;
BEGIN
CREATE TABLE test_xact_abort_failure(a int);
-CREATE
+CREATE TABLE
ABORT;
ERROR: fault triggered, fault name:'abort_after_procarray_end' fault type:'error'
SELECT gp_inject_fault( 'abort_after_procarray_end', 'reset', 1);
@@ -27,9 +27,9 @@ SELECT gp_inject_fault( 'abort_after_procarray_end', 'error', dbid) from gp_segm
BEGIN;
BEGIN
CREATE TABLE test_xact_abort_failure(a int);
-CREATE
+CREATE TABLE
ABORT;
-ABORT
+ROLLBACK
SELECT gp_inject_fault( 'abort_after_procarray_end', 'reset', dbid) from gp_segment_configuration where role = 'p' and content = 0;
gp_inject_fault
-----------------
@@ -45,7 +45,7 @@ SELECT gp_inject_fault( 'abort_after_procarray_end', 'error', dbid) from gp_segm
0U: BEGIN;
BEGIN
0U: CREATE TABLE test_xact_abort_failure(a int);
-CREATE
+CREATE TABLE
0U: ABORT;
ERROR: fault triggered, fault name:'abort_after_procarray_end' fault type:'error'
SELECT gp_inject_fault( 'abort_after_procarray_end', 'reset', dbid) from gp_segment_configuration where role = 'p' and content = 0;
diff --git a/src/test/isolation2/expected/distributedlog-bug.out b/src/test/isolation2/expected/distributedlog-bug.out
index 829c96b3dc5..6b716726a92 100644
--- a/src/test/isolation2/expected/distributedlog-bug.out
+++ b/src/test/isolation2/expected/distributedlog-bug.out
@@ -6,7 +6,7 @@
-- in-progress.
--
CREATE TABLE distributed_snapshot_test ( id INTEGER, f FLOAT);
-CREATE
+CREATE TABLE
1: BEGIN;
BEGIN
@@ -27,7 +27,7 @@ SET
-- Drop table in a transaction
1: drop table distributed_snapshot_test;
-DROP
+DROP TABLE
3: vacuum pg_class;
VACUUM
diff --git a/src/test/isolation2/expected/drop_rename.out b/src/test/isolation2/expected/drop_rename.out
index af0916cb293..96cdb5bb428 100644
--- a/src/test/isolation2/expected/drop_rename.out
+++ b/src/test/isolation2/expected/drop_rename.out
@@ -5,17 +5,17 @@
-- relation does not exist error.
1:drop table if exists t1;
-DROP
+DROP TABLE
1:drop table if exists newt1;
-DROP
+DROP TABLE
1:create table t1 (a int, b text) distributed by (a);
-CREATE
+CREATE TABLE
1:insert into t1 select i, 'abc '||i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
1:begin;
BEGIN
1:alter table t1 rename to newt1;
-ALTER
+ALTER TABLE
1:analyze newt1;
ANALYZE
-- this drop should block to acquire AccessExclusive lock on t1's OID.
@@ -32,17 +32,17 @@ ERROR: table "t1" does not exist
-- DROP is executed concurrently with ALTER RENAME but not ANALYZE.
1:drop table if exists t2;
-DROP
+DROP TABLE
1:drop table if exists newt2;
-DROP
+DROP TABLE
1:create table t2 (a int, b text) distributed by (a);
-CREATE
+CREATE TABLE
1:insert into t2 select i, 'pqr '||i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
1:begin;
BEGIN
1:alter table t2 rename to newt2;
-ALTER
+ALTER TABLE
2&:drop table t2;
1:commit;
COMMIT
@@ -57,20 +57,20 @@ ERROR: table "t2" does not exist
-- The same, but with DROP IF EXISTS. (We used to have a bug, where the DROP
-- command found and drop the relation in the segments, but not in master.)
1:drop table if exists t3;
-DROP
+DROP TABLE
1:create table t3 (a int, b text) distributed by (a);
-CREATE
+CREATE TABLE
1:insert into t3 select i, '123 '||i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
1:begin;
BEGIN
1:alter table t3 rename to t3_new;
-ALTER
+ALTER TABLE
2&:drop table if exists t3;
1:commit;
COMMIT
2<: <... completed>
-DROP
+DROP TABLE
2:select count(*) from t3;
ERROR: relation "t3" does not exist
LINE 1: select count(*) from t3;
@@ -89,15 +89,15 @@ LINE 1: select count(*) from t3;
(3 rows)
1:drop table if exists t3;
-DROP
+DROP TABLE
1:create table t3 (a int, b text) distributed by (a);
-CREATE
+CREATE TABLE
1:insert into t3 select i, '123 '||i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
1:begin;
BEGIN
1:drop table t3;
-DROP
+DROP TABLE
2&:drop table if exists t3;
3&:drop table t3;
1:commit;
@@ -105,7 +105,7 @@ COMMIT
3<: <... completed>
ERROR: table "t3" does not exist
2<: <... completed>
-DROP
+DROP TABLE
2:select count(*) from t3;
ERROR: relation "t3" does not exist
LINE 1: select count(*) from t3;
diff --git a/src/test/isolation2/expected/enable_autovacuum.out b/src/test/isolation2/expected/enable_autovacuum.out
index 90c866cb6dc..c8a4aec3dc4 100644
--- a/src/test/isolation2/expected/enable_autovacuum.out
+++ b/src/test/isolation2/expected/enable_autovacuum.out
@@ -1,5 +1,5 @@
alter system set autovacuum = on;
-ALTER
+ALTER SYSTEM
select gp_segment_id, pg_reload_conf() from gp_id union select gp_segment_id, pg_reload_conf() from gp_dist_random('gp_id');
gp_segment_id | pg_reload_conf
---------------+----------------
diff --git a/src/test/isolation2/expected/execute_on_utilitymode.out b/src/test/isolation2/expected/execute_on_utilitymode.out
index 84c397c7776..8d6d24d823f 100644
--- a/src/test/isolation2/expected/execute_on_utilitymode.out
+++ b/src/test/isolation2/expected/execute_on_utilitymode.out
@@ -4,17 +4,17 @@
-- First, create test functions with different EXECUTE ON options
-create function srf_on_master () returns setof text as $$ begin /* in func */ return next 'foo ' || current_setting('gp_contentid'); /* in func */ return next 'bar ' || current_setting('gp_contentid'); /* in func */ end; /* in func */ $$ language plpgsql EXECUTE ON COORDINATOR;
-CREATE
+create function srf_on_coordinator () returns setof text as $$ begin /* in func */ return next 'foo ' || current_setting('gp_contentid'); /* in func */ return next 'bar ' || current_setting('gp_contentid'); /* in func */ end; /* in func */ $$ language plpgsql EXECUTE ON COORDINATOR;
+CREATE FUNCTION
create function srf_on_all_segments () returns setof text as $$ begin /* in func */ return next 'foo ' || current_setting('gp_contentid'); /* in func */ return next 'bar ' || current_setting('gp_contentid'); /* in func */ end; /* in func */ $$ language plpgsql EXECUTE ON ALL SEGMENTS;
-CREATE
+CREATE FUNCTION
create function srf_on_any () returns setof text as $$ begin /* in func */ return next 'foo ' || current_setting('gp_contentid'); /* in func */ return next 'bar ' || current_setting('gp_contentid'); /* in func */ end; /* in func */ $$ language plpgsql EXECUTE ON ANY IMMUTABLE;
-CREATE
+CREATE FUNCTION
create function srf_on_initplan () returns setof text as $$ begin /* in func */ return next 'foo ' || current_setting('gp_contentid'); /* in func */ return next 'bar ' || current_setting('gp_contentid'); /* in func */ end; /* in func */ $$ language plpgsql EXECUTE ON INITPLAN;
-CREATE
+CREATE FUNCTION
-- Now try executing them in utility mode, in the master node and on a
-- segment. The expected behavior is that the function runs on the node
@@ -23,9 +23,9 @@ CREATE
-- Join with a table, to give the planner something more exciting to do
-- than just create the FunctionScan plan.
create table fewrows (t text) distributed by (t);
-CREATE
+CREATE TABLE
insert into fewrows select g from generate_series(1, 10) g;
-INSERT 10
+INSERT 0 10
-1U: select * from srf_on_master() as srf (x) left join fewrows on x = t;
x | t
diff --git a/src/test/isolation2/expected/export_distributed_snapshot.out b/src/test/isolation2/expected/export_distributed_snapshot.out
index d9270f20e30..fcd8dd58442 100644
--- a/src/test/isolation2/expected/export_distributed_snapshot.out
+++ b/src/test/isolation2/expected/export_distributed_snapshot.out
@@ -10,31 +10,31 @@
-- start_ignore
DROP FUNCTION IF EXISTS corrupt_snapshot_file(text, text);
-DROP
+DROP FUNCTION
DROP FUNCTION IF EXISTS snapshot_file_ds_fields_exist(text);
-DROP
-DROP LANGUAGE IF EXISTS plpython3u;
-DROP
+DROP FUNCTION
+DROP LANGUAGE IF EXISTS plpython3u cascade;
+DROP LANGUAGE
DROP TABLE IF EXISTS export_distributed_snapshot_test1;
-DROP
+DROP TABLE
+-- end_ignore
CREATE LANGUAGE plpython3u;
-CREATE
--- end_ignore
+CREATE LANGUAGE
-- Corrupt field entry for given snapshot file
CREATE OR REPLACE FUNCTION corrupt_snapshot_file(token text, field text) RETURNS integer as $$ import os content = bytearray() query = "select (select datadir from gp_segment_configuration where role='p' and content=-1) || '/pg_snapshots/' as path" rv = plpy.execute(query) abs_path = rv[0]['path'] snapshot_file = abs_path + token if not os.path.isfile(snapshot_file): plpy.info('skipping non-existent file %s' % (snapshot_file)) else: plpy.info('corrupting file %s for field %s' % (snapshot_file, field)) with open(snapshot_file , "rb+") as f: for line in f: l = line.decode() id = l.split(":")[0] if field == id: corrupt = l[:-2] + '*' + l[len(l)-1:] content.extend(corrupt.encode()) else: content.extend(line) f.seek(0) f.truncate f.write(content) f.close() return 0 $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
-- Determine if field exists for given snapshot file
CREATE OR REPLACE FUNCTION snapshot_file_ds_fields_exist(token text) RETURNS boolean as $$ import os content = bytearray() query = "select (select datadir from gp_segment_configuration where role='p' and content=-1) || '/pg_snapshots/' as path" rv = plpy.execute(query) abs_path = rv[0]['path'] snapshot_file = abs_path + token if not os.path.isfile(snapshot_file): plpy.info('snapshot file %s does not exist' % (snapshot_file)) return -1 else: plpy.info('checking file %s for ds fields' % (snapshot_file)) with open(snapshot_file , "rb+") as f: for line in f: l = line.decode() if "ds" in l: return True return False $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
-- INSERT test
CREATE TABLE export_distributed_snapshot_test1 (a int);
-CREATE
+CREATE TABLE
INSERT INTO export_distributed_snapshot_test1 values(1);
-INSERT 1
+INSERT 0 1
1: BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN
@@ -45,7 +45,7 @@ BEGIN
(1 row)
INSERT INTO export_distributed_snapshot_test1 values(2);
-INSERT 1
+INSERT 0 1
SELECT * FROM export_distributed_snapshot_test1;
a
---
@@ -81,9 +81,9 @@ COMMIT
-- DELETE test
CREATE TABLE export_distributed_snapshot_test2 (a int);
-CREATE
+CREATE TABLE
INSERT INTO export_distributed_snapshot_test2 SELECT a FROM generate_series(1,3) a;
-INSERT 3
+INSERT 0 3
1: BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN
@@ -134,9 +134,9 @@ COMMIT
-- UPDATE test
CREATE TABLE export_distributed_snapshot_test3 (a int);
-CREATE
+CREATE TABLE
INSERT INTO export_distributed_snapshot_test3 SELECT a FROM generate_series(1,5) a;
-INSERT 5
+INSERT 0 5
1: BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN
@@ -187,7 +187,7 @@ COMMIT
-- DROP test
CREATE TABLE export_distributed_snapshot_test4 (a int);
-CREATE
+CREATE TABLE
1: BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ;
BEGIN
@@ -209,7 +209,7 @@ BEGIN
-- Drop table in transaction
2: DROP TABLE export_distributed_snapshot_test4;
-DROP
+DROP TABLE
2: COMMIT;
COMMIT
@@ -263,9 +263,9 @@ ERROR: could not import the requested snapshot
DETAIL: The source process with PID 651456 is not running anymore.
1: END;
-END
+COMMIT
2: END;
-END
+ROLLBACK
-- dsxminall
@@ -292,9 +292,9 @@ BEGIN
SET
1: END;
-END
+COMMIT
2: END;
-END
+COMMIT
-- dsxmin
1: BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ;
@@ -320,9 +320,9 @@ BEGIN
ERROR: invalid snapshot data in file "pg_snapshots/00000007-00000006-1"
1: END;
-END
+COMMIT
2: END;
-END
+ROLLBACK
-- Test export snapshot in utility mode does not export distributed snapshot fields
@@ -345,7 +345,7 @@ BEGIN
-1Uq: ...
1: END;
-END
+COMMIT
-- Test import snapshot in utility mode fails if distributed snapshot fields exist
1: BEGIN;
@@ -367,7 +367,7 @@ HINT: export the snapshot in utility mode
-1Uq: ...
1: END;
-END
+COMMIT
-- Test export snapshot in utility mode and import snapshot in utility mode succeeds
-1U: @db_name postgres: BEGIN;
diff --git a/src/test/isolation2/expected/frozen_insert_crash.out b/src/test/isolation2/expected/frozen_insert_crash.out
index bf41eebf750..334f2dbf389 100644
--- a/src/test/isolation2/expected/frozen_insert_crash.out
+++ b/src/test/isolation2/expected/frozen_insert_crash.out
@@ -13,7 +13,7 @@
-- the WAL record responsible for updating it to frozen.
-- After crash recovery, the insert will follow regular MVCC and not be seen.
1: create table tab_fi(a int) with (appendoptimized=true) distributed replicated;
-CREATE
+CREATE TABLE
-- switch WAL on seg0 to reduce flakiness
1: select gp_segment_id, pg_switch_wal() is not null from gp_dist_random('gp_id') where gp_segment_id = 0;
@@ -100,12 +100,12 @@ RESET
RESET
1: drop table tab_fi;
-DROP
+DROP TABLE
-- Case 2. crash after we have flushed the WAL that updates the row to be frozen.
-- After crash recovery, the insert should be seen.
1: create table tab_fi(a int) with (appendoptimized=true) distributed replicated;
-CREATE
+CREATE TABLE
-- switch WAL on seg0 to reduce flakiness
1: select gp_segment_id, pg_switch_wal() is not null from gp_dist_random('gp_id') where gp_segment_id = 0;
@@ -245,20 +245,22 @@ END
(2 rows)
-- Same set of tests for bitmap LOV insert.
+create extension if not exists pageinspect;
+CREATE EXTENSION
-- Function to check the bitmap lov content regarding the column 'b'
-- which is the table column that we will have bitmap created on.
-- Basically, we want to see if "SELECT b FROM pg_bitmapindex.pg_bm_xxx"
-- returns the same result in seqscan and indexscan.
CREATE OR REPLACE FUNCTION insert_bm_lov_res() RETURNS void AS $$ DECLARE lov_table text; /* in func */ sql text; /* in func */ BEGIN /* in func */ drop table if exists bm_lov_res; /* in func */ create temp table bm_lov_res(b int); /* in func */ SELECT c.relname INTO lov_table /* in func */ FROM bm_metap('tab_fi_idx') b /* in func */ JOIN pg_class c ON b.auxrelid = c.oid; /* in func */ sql := format('INSERT INTO bm_lov_res SELECT b FROM pg_bitmapindex.%I', lov_table); /* in func */ EXECUTE sql; /* in func */ END; /* in func */ $$ LANGUAGE plpgsql;
-CREATE
+CREATE FUNCTION
1: create table tab_fi(a int, b int) with (appendoptimized=true) distributed replicated;
-CREATE
+CREATE TABLE
1: create index tab_fi_idx on tab_fi using bitmap(b);
-CREATE
+CREATE INDEX
1: insert into tab_fi values(1, 1);
-INSERT 1
+INSERT 0 1
-- switch WAL on seg0 to reduce flakiness
1: select gp_segment_id, pg_switch_wal() is not null from gp_dist_random('gp_id') where gp_segment_id = 0;
gp_segment_id | ?column?
@@ -345,16 +347,16 @@ SET
(1 row)
0Uq: ...
1: drop table tab_fi;
-DROP
+DROP TABLE
-- case 2: suspend and flush WAL after freezing the tuple
1: create table tab_fi(a int, b int) with (appendoptimized=true) distributed replicated;
-CREATE
+CREATE TABLE
1: create index tab_fi_idx on tab_fi using bitmap(b);
-CREATE
+CREATE INDEX
1: insert into tab_fi values(1, 1);
-INSERT 1
+INSERT 0 1
-- switch WAL on seg0 to reduce flakiness
1: select gp_segment_id, pg_switch_wal() is not null from gp_dist_random('gp_id') where gp_segment_id = 0;
gp_segment_id | ?column?
@@ -441,6 +443,50 @@ SET
2
(2 rows)
+1: drop extension pageinspect;
+DROP EXTENSION
+
+-- Test for aoseg: suspend the insert into aoseg table before we mark the row frozen.
+-- Another session should still be able to choose a different segno.
+1: create table tab_aoseg(a int) using ao_row;
+CREATE TABLE
+1: select gp_inject_fault('insert_aoseg_before_freeze', 'suspend', dbid) from gp_segment_configuration where role = 'p' and content = 0;
+ gp_inject_fault
+-----------------
+ Success:
+(1 row)
+1: begin;
+BEGIN
+1>: insert into tab_aoseg select * from generate_series(1,10);
+-- wait until the aoseg record is inserted but not yet frozen
+2: select gp_wait_until_triggered_fault('insert_aoseg_before_freeze', 1, dbid) from gp_segment_configuration where role = 'p' and content = 0;
+ gp_wait_until_triggered_fault
+-------------------------------
+ Success:
+(1 row)
+2: begin;
+BEGIN
+2>: insert into tab_aoseg select * from generate_series(1,10);
+3: select gp_inject_fault('insert_aoseg_before_freeze', 'reset', dbid) from gp_segment_configuration where role = 'p' and content = 0;
+ gp_inject_fault
+-----------------
+ Success:
+(1 row)
+1<: <... completed>
+INSERT 0 10
+2<: <... completed>
+INSERT 0 10
+1: end;
+COMMIT
+2: end;
+COMMIT
+3: select segment_id, segno, eof from gp_toolkit.__gp_aoseg('tab_aoseg') where segment_id = 0;
+ segment_id | segno | eof
+------------+-------+-----
+ 0 | 1 | 88
+ 0 | 2 | 88
+(2 rows)
+
-- validate that we've actually tested desired scan method
-- for some reason this disrupts the output of subsequent queries so
-- validating at the end here
diff --git a/src/test/isolation2/expected/fsync_ao.out b/src/test/isolation2/expected/fsync_ao.out
index 4f2a466507a..c4dbaae74ae 100644
--- a/src/test/isolation2/expected/fsync_ao.out
+++ b/src/test/isolation2/expected/fsync_ao.out
@@ -21,17 +21,17 @@
(exited with code 0)
create table fsync_ao(a int, b int) with (appendoptimized = true) distributed by (a);
-CREATE
+CREATE TABLE
create table fsync_co(a int, b int) with (appendoptimized = true, orientation = column) distributed by (a);
-CREATE
+CREATE TABLE
-- no fsync requests should ever be registered for unlogged tables
create unlogged table ul_fsync_co(a int, b int, c int) using ao_column distributed by (a);
-CREATE
+CREATE TABLE
insert into fsync_ao select i, i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
insert into fsync_co select i, i from generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
-- Fault to check that mirror has flushed pending fsync requests.
select gp_inject_fault_infinite('restartpoint_guts', 'skip', dbid) from gp_segment_configuration where role = 'm' and content = 0;
@@ -66,11 +66,11 @@ select gp_inject_fault_infinite('ao_fsync_counter', 'skip', dbid) from gp_segmen
-- Write ao and co data files including aoseg & gp_fastsequence.
-- These should be fsync-ed by checkpoint & restartpoint.
insert into fsync_ao select i, i from generate_series(1,20)i;
-INSERT 20
+INSERT 0 20
insert into fsync_co select i, i from generate_series(1,20)i;
-INSERT 20
+INSERT 0 20
insert into ul_fsync_co select i, i, i from generate_series(1,20)i;
-INSERT 20
+INSERT 0 20
checkpoint;
CHECKPOINT
@@ -103,19 +103,19 @@ select gp_inject_fault('ao_fsync_counter', 'status', dbid) from gp_segment_confi
1: begin;
BEGIN
1: insert into fsync_ao select i, i from generate_series(1,20)i;
-INSERT 20
+INSERT 0 20
1: insert into fsync_co select i, i from generate_series(1,20)i;
-INSERT 20
+INSERT 0 20
1: insert into ul_fsync_co select i, i, i from generate_series(1,20)i;
-INSERT 20
+INSERT 0 20
insert into fsync_ao select i, i from generate_series(21,40)i;
-INSERT 20
+INSERT 0 20
insert into fsync_co select i, i from generate_series(21,40)i;
-INSERT 20
+INSERT 0 20
insert into ul_fsync_co select i, i, i from generate_series(1,40)i;
-INSERT 40
+INSERT 0 40
1: end;
-END
+COMMIT
-- Generate some invisible tuples in both the tables so as to trigger
-- compaction during vacuum.
delete from fsync_ao where a > 20;
@@ -207,23 +207,23 @@ select gp_inject_fault('ao_fsync_counter', 'status', dbid) from gp_segment_confi
update fsync_co set b = -a;
UPDATE 70
drop table fsync_co;
-DROP
+DROP TABLE
update ul_fsync_co set c = -a;
UPDATE 23
drop table ul_fsync_co;
-DROP
+DROP TABLE
-- Drop but don't commit the transaction.
begin;
BEGIN
update fsync_ao set b = -a;
UPDATE 50
drop table fsync_ao;
-DROP
+DROP TABLE
abort;
-ABORT
+ROLLBACK
-- Fsync request for the following insert should not be forgotten.
insert into fsync_ao select * from generate_series(41,60)i;
-INSERT 20
+INSERT 0 20
checkpoint;
CHECKPOINT
@@ -244,8 +244,8 @@ select gp_wait_until_triggered_fault('ao_fsync_counter', 13, dbid) from gp_segme
select gp_inject_fault('ao_fsync_counter', 'status', dbid) from gp_segment_configuration where content=0 and role='m';
gp_inject_fault
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
-
Success: fault name:'ao_fsync_counter' fault type:'skip' ddl statement:'' database name:'' table name:'' start occurrence:'1' end occurrence:'-1' extra arg:'0' fault injection state:'triggered' num times hit:'13'
+
(1 row)
-- Reset all faults.
diff --git a/src/test/isolation2/expected/fts_errors.out b/src/test/isolation2/expected/fts_errors.out
index 69cb7a2a38c..fca1ecaf5d8 100644
--- a/src/test/isolation2/expected/fts_errors.out
+++ b/src/test/isolation2/expected/fts_errors.out
@@ -27,8 +27,8 @@
(exited with code 0)
-- Helper function
-CREATE or REPLACE FUNCTION wait_until_segments_are_down(num_segs int) RETURNS bool AS $$ declare retries int; /* in func */ begin /* in func */ retries := 120; /* in func */ loop /* in func */ if (select count(*) = num_segs from gp_segment_configuration where status = 'd') then /* in func */ return true; /* in func */ end if; /* in func */ if retries <= 0 then /* in func */ return false; /* in func */ end if; /* in func */ perform pg_sleep(1); /* in func */ retries := retries - 1; /* in func */ end loop; /* in func */ end; /* in func */ $$ language plpgsql;
-CREATE
+CREATE or REPLACE FUNCTION wait_until_segments_are_down(num_segs int) RETURNS bool AS $$ declare retries int; /* in func */ begin /* in func */ retries := 1200; /* in func */ loop /* in func */ if (select count(*) = num_segs from gp_segment_configuration where status = 'd') then /* in func */ return true; /* in func */ end if; /* in func */ if retries <= 0 then /* in func */ return false; /* in func */ end if; /* in func */ perform pg_sleep(0.1); /* in func */ retries := retries - 1; /* in func */ end loop; /* in func */ end; /* in func */ $$ language plpgsql;
+CREATE FUNCTION
-- no segment down.
select count(*) from gp_segment_configuration where status = 'd';
@@ -38,34 +38,34 @@ select count(*) from gp_segment_configuration where status = 'd';
(1 row)
drop table if exists fts_errors_test;
-DROP
+DROP TABLE
create table fts_errors_test(a int);
-CREATE
+CREATE TABLE
1:BEGIN;
BEGIN
1:END;
-END
+COMMIT
2:BEGIN;
BEGIN
2:INSERT INTO fts_errors_test SELECT * FROM generate_series(1,100);
-INSERT 100
+INSERT 0 100
3:BEGIN;
BEGIN
3:CREATE TEMP TABLE tmp3 (c1 int, c2 int);
-CREATE
+CREATE TABLE
3:DECLARE c1 CURSOR for select * from tmp3;
-DECLARE
+DECLARE CURSOR
4:CREATE TEMP TABLE tmp4 (c1 int, c2 int);
-CREATE
+CREATE TABLE
5:BEGIN;
BEGIN
5:CREATE TEMP TABLE tmp5 (c1 int, c2 int);
-CREATE
+CREATE TABLE
5:SAVEPOINT s1;
SAVEPOINT
5:CREATE TEMP TABLE tmp51 (c1 int, c2 int);
-CREATE
+CREATE TABLE
-- probe to make sure when we call gp_request_fts_probe_scan() next
-- time below, don't overlap with auto-trigger of FTS scans by FTS
@@ -144,7 +144,7 @@ select gp_inject_fault('get_dns_cached_address', 'reset', 1);
1:BEGIN;
BEGIN
1:END;
-END
+COMMIT
-- session 2: in transaction, gxid is dispatched to writer gang, cann't
-- update cdb_component_dbs, following query should fail
-- start_ignore
@@ -177,7 +177,7 @@ ERROR: gang was lost due to cluster reconfiguration (cdbgang_async.c:98)
5:ROLLBACK TO SAVEPOINT s1;
ERROR: Could not rollback to savepoint (ROLLBACK TO SAVEPOINT s1)
5:END;
-END
+ROLLBACK
1q: ...
2q: ...
3q: ...
diff --git a/src/test/isolation2/expected/fts_errors_1.out b/src/test/isolation2/expected/fts_errors_1.out
index 945b669d7f6..6b99fc282c8 100644
--- a/src/test/isolation2/expected/fts_errors_1.out
+++ b/src/test/isolation2/expected/fts_errors_1.out
@@ -34,7 +34,7 @@
-- Helper function
CREATE or REPLACE FUNCTION wait_until_segments_are_down(num_segs int) RETURNS bool AS $$ declare retries int; /* in func */ begin /* in func */ retries := 1200; /* in func */ loop /* in func */ if (select count(*) = num_segs from gp_segment_configuration where status = 'd') then /* in func */ return true; /* in func */ end if; /* in func */ if retries <= 0 then /* in func */ return false; /* in func */ end if; /* in func */ perform pg_sleep(0.1); /* in func */ retries := retries - 1; /* in func */ end loop; /* in func */ end; /* in func */ $$ language plpgsql;
-CREATE
+CREATE FUNCTION
-- no segment down.
select count(*) from gp_segment_configuration where status = 'd';
@@ -44,34 +44,34 @@ select count(*) from gp_segment_configuration where status = 'd';
(1 row)
drop table if exists fts_errors_test;
-DROP
+DROP TABLE
create table fts_errors_test(a int);
-CREATE
+CREATE TABLE
1:BEGIN;
BEGIN
1:END;
-END
+COMMIT
2:BEGIN;
BEGIN
2:INSERT INTO fts_errors_test SELECT * FROM generate_series(1,100);
-INSERT 100
+INSERT 0 100
3:BEGIN;
BEGIN
3:CREATE TEMP TABLE tmp3 (c1 int, c2 int);
-CREATE
+CREATE TABLE
3:DECLARE c1 CURSOR for select * from tmp3;
-DECLARE
+DECLARE CURSOR
4:CREATE TEMP TABLE tmp4 (c1 int, c2 int);
-CREATE
+CREATE TABLE
5:BEGIN;
BEGIN
5:CREATE TEMP TABLE tmp5 (c1 int, c2 int);
-CREATE
+CREATE TABLE
5:SAVEPOINT s1;
SAVEPOINT
5:CREATE TEMP TABLE tmp51 (c1 int, c2 int);
-CREATE
+CREATE TABLE
-- probe to make sure when we call gp_request_fts_probe_scan() next
-- time below, don't overlap with auto-trigger of FTS scans by FTS
@@ -130,7 +130,7 @@ select gp_inject_fault('get_dns_cached_address', 'reset', 1);
1:BEGIN;
BEGIN
1:END;
-END
+COMMIT
-- session 2: in transaction, gxid is dispatched to writer gang, cann't
-- update cdb_component_dbs, following query should fail
2:END;
@@ -161,7 +161,7 @@ ERROR: gang was lost due to cluster reconfiguration (cdbgang_async.c:98)
5:ROLLBACK TO SAVEPOINT s1;
ERROR: Could not rollback to savepoint (ROLLBACK TO SAVEPOINT s1)
5:END;
-END
+ROLLBACK
1q: ...
2q: ...
3q: ...
diff --git a/src/test/isolation2/expected/fts_segment_reset.out b/src/test/isolation2/expected/fts_segment_reset.out
index 141c104f772..2f346219da9 100644
--- a/src/test/isolation2/expected/fts_segment_reset.out
+++ b/src/test/isolation2/expected/fts_segment_reset.out
@@ -58,7 +58,7 @@ select gp_inject_fault_infinite('postmaster_server_loop_no_sigkill', 'skip', dbi
3:set gp_gang_creation_retry_timer = 10000;
SET
3:create table fts_reset_t3(a int);
-CREATE
+CREATE TABLE
1<: <... completed>
ERROR: fault triggered, fault name:'start_prepare' fault type:'panic'
@@ -105,7 +105,7 @@ select pg_reload_conf();
-- The only table that should have been created successfully
drop table fts_reset_t3;
-DROP
+DROP TABLE
-- In case anything goes wrong, we don't want to affect other tests. So rebalance the cluster anyway.
!\retcode gprecoverseg -aF !\retcode gprecoverseg -ar
\ No newline at end of file
diff --git a/src/test/isolation2/expected/fts_session_reset.out b/src/test/isolation2/expected/fts_session_reset.out
index afeef05ed74..e667f4c546d 100644
--- a/src/test/isolation2/expected/fts_session_reset.out
+++ b/src/test/isolation2/expected/fts_session_reset.out
@@ -6,15 +6,24 @@
-- set these values purely to cut down test time, as default ts trigger is
-- every min and 5 retries
+alter system set gp_fts_probe_interval to 10;
+ALTER SYSTEM
+alter system set gp_fts_probe_retries to 0;
+ALTER SYSTEM
+select pg_reload_conf();
+ pg_reload_conf
+----------------
+ t
+(1 row)
create table test_fts_session_reset(c1 int);
-CREATE
+CREATE TABLE
1:BEGIN;
BEGIN
-- let the dispatcher create a gang
1:insert into test_fts_session_reset select * from generate_series(1,20);
-INSERT 20
+INSERT 0 20
-- this injected fault can make dispatcher think the primary is down
2:select gp_inject_fault_infinite('fts_conn_startup_packet', 'error', dbid) from gp_segment_configuration where role='p' and content=0;
gp_inject_fault_infinite
@@ -44,12 +53,7 @@ ERROR: gang was lost due to cluster reconfiguration (cdbgang_async.c:98)
1:select count(*) from test_fts_session_reset;
ERROR: current transaction is aborted, commands ignored until end of transaction block
1:END;
-END
-1:select pg_sleep(30);
- pg_sleep
-----------
-
-(1 row)
+ROLLBACK
1:select count(*) from test_fts_session_reset;
count
-------
@@ -117,3 +121,14 @@ select count(*) from gp_segment_configuration where status = 'd';
-------
0
(1 row)
+
+alter system reset gp_fts_probe_interval;
+ALTER SYSTEM
+alter system reset gp_fts_probe_retries;
+ALTER SYSTEM
+select pg_reload_conf();
+ pg_reload_conf
+----------------
+ t
+(1 row)
+
diff --git a/src/test/isolation2/expected/gdd/avoid-qd-deadlock.out b/src/test/isolation2/expected/gdd/avoid-qd-deadlock.out
index 81825036cbc..6d064de62d2 100644
--- a/src/test/isolation2/expected/gdd/avoid-qd-deadlock.out
+++ b/src/test/isolation2/expected/gdd/avoid-qd-deadlock.out
@@ -1,14 +1,14 @@
DROP TABLE IF EXISTS tsudf;
-DROP
+DROP TABLE
CREATE TABLE tsudf (c int, d int);
-CREATE
+CREATE TABLE
CREATE OR REPLACE FUNCTION func1(int) RETURNS int AS $$ BEGIN UPDATE tsudf SET d = d+1 WHERE c = $1; /* in func */ RETURN $1; /* in func */ END; /* in func */ $$ LANGUAGE plpgsql;
-CREATE
+CREATE FUNCTION
INSERT INTO tsudf select i, i+1 from generate_series(1,10) i;
-INSERT 10
+INSERT 0 10
SELECT gp_inject_fault('upgrade_row_lock', 'reset', 1);
gp_inject_fault
diff --git a/src/test/isolation2/expected/gdd/concurrent_update.out b/src/test/isolation2/expected/gdd/concurrent_update.out
index 761e3f681a1..a109c0b6a0e 100644
--- a/src/test/isolation2/expected/gdd/concurrent_update.out
+++ b/src/test/isolation2/expected/gdd/concurrent_update.out
@@ -1,8 +1,8 @@
-- Test concurrent update a table with a varying length type
CREATE TABLE t_concurrent_update(a int, b int, c char(84));
-CREATE
+CREATE TABLE
INSERT INTO t_concurrent_update VALUES(1,1,'test');
-INSERT 1
+INSERT 0 1
1: BEGIN;
BEGIN
@@ -14,7 +14,7 @@ UPDATE 1
SET
2&: UPDATE t_concurrent_update SET b=b+10 WHERE a=1;
1: END;
-END
+COMMIT
2<: <... completed>
UPDATE 1
1: SELECT * FROM t_concurrent_update;
@@ -26,13 +26,13 @@ UPDATE 1
2q: ...
DROP TABLE t_concurrent_update;
-DROP
+DROP TABLE
-- Test the concurrent update transaction order on the segment is reflected on master
1: CREATE TABLE t_concurrent_update(a int, b int);
-CREATE
+CREATE TABLE
1: INSERT INTO t_concurrent_update VALUES(1,1);
-INSERT 1
+INSERT 0 1
2: BEGIN;
BEGIN
@@ -76,9 +76,9 @@ UPDATE 1
Success:
(1 row)
2<: <... completed>
-END
+COMMIT
3<: <... completed>
-END
+COMMIT
2q: ...
3q: ...
@@ -117,7 +117,7 @@ SET
Success:
(1 row)
4: END;
-END
+COMMIT
4: SELECT gp_inject_fault('before_get_distributed_xid', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=1;
gp_inject_fault
-----------------
@@ -127,14 +127,14 @@ END
5<: <... completed>
UPDATE 1
5: END;
-END
+COMMIT
6: SELECT * FROM t_concurrent_update;
a | b
---+----
1 | 41
(1 row)
6: DROP TABLE t_concurrent_update;
-DROP
+DROP TABLE
4q: ...
5q: ...
6q: ...
@@ -152,9 +152,9 @@ DROP
-- distribution keys is updated.
0: create table tab_update_hashcol (c1 int, c2 int) distributed by(c1);
-CREATE
+CREATE TABLE
0: insert into tab_update_hashcol values(1,1);
-INSERT 1
+INSERT 0 1
0: select * from tab_update_hashcol;
c1 | c2
----+----
@@ -169,18 +169,18 @@ BEGIN
UPDATE 1
2&: update tab_update_hashcol set c1 = c1 + 1 where c1 = 1;
1: end;
-END
+COMMIT
2<: <... completed>
ERROR: EvalPlanQual can not handle subPlan with Motion node (seg1 127.0.1.1:7003 pid=34629)
2: end;
-END
+ROLLBACK
0: select * from tab_update_hashcol;
c1 | c2
----+----
2 | 1
(1 row)
0: drop table tab_update_hashcol;
-DROP
+DROP TABLE
-- Test EvalplanQual
-- If we enable the GDD, then the lock maybe downgrade to
@@ -192,13 +192,13 @@ DROP
-- GDD is enabled and EvalPlanQual is tiggered.
0: create table tab_update_epq1 (c1 int, c2 int) distributed randomly;
-CREATE
+CREATE TABLE
0: create table tab_update_epq2 (c1 int, c2 int) distributed randomly;
-CREATE
+CREATE TABLE
0: insert into tab_update_epq1 values(1,1);
-INSERT 1
+INSERT 0 1
0: insert into tab_update_epq2 values(1,1);
-INSERT 1
+INSERT 0 1
0: select * from tab_update_epq1;
c1 | c2
----+----
@@ -223,11 +223,11 @@ BEGIN
UPDATE 1
2&: update tab_update_epq1 set c1 = tab_update_epq1.c1 + 1 from tab_update_epq2 where tab_update_epq1.c2 = tab_update_epq2.c2;
1: end;
-END
+COMMIT
2<: <... completed>
ERROR: EvalPlanQual can not handle subPlan with Motion node (seg0 127.0.1.1:7002 pid=108407)
2: end;
-END
+ROLLBACK
0: select * from tab_update_epq1;
c1 | c2
@@ -235,16 +235,16 @@ END
2 | 1
(1 row)
0: drop table tab_update_epq1;
-DROP
+DROP TABLE
0: drop table tab_update_epq2;
-DROP
+DROP TABLE
0q: ...
1q: ...
2q: ...
-- check that orca concurrent delete transaction won't delete tuple, updated in other transaction (which doesn't match predicate anymore)
create table test as select 0 as i distributed randomly;
-CREATE 1
+SELECT 1
1: begin;
BEGIN
1: update test set i = i + 1;
@@ -253,17 +253,17 @@ UPDATE 1
-- the following SQL will hang due to XID lock
2&: delete from test where i = 0;
1: end;
-END
+COMMIT
2<: <... completed>
DELETE 0
drop table test;
-DROP
+DROP TABLE
1q: ...
2q: ...
-- check that orca concurrent delete transaction will delete tuple, updated in other transaction (which still matches predicate)
create table test as select 0 as i distributed randomly;
-CREATE 1
+SELECT 1
1: begin;
BEGIN
1: update test set i = i;
@@ -272,19 +272,19 @@ UPDATE 1
-- the following SQL will hang due to XID lock
2&: delete from test where i = 0;
1: end;
-END
+COMMIT
2<: <... completed>
DELETE 1
drop table test;
-DROP
+DROP TABLE
1q: ...
2q: ...
-- test ORCA partition table
create table test(a int, b int, c int) partition by range(b) (start (1) end (7) every (3));
-CREATE
+CREATE TABLE
insert into test values (1, 1, 1);
-INSERT 1
+INSERT 0 1
1: begin;
BEGIN
1: delete from test where b = 1;
@@ -293,7 +293,7 @@ DELETE 1
-- the following SQL will hang due to XID lock
2&: update test set b = 1;
1: end;
-END
+COMMIT
2<: <... completed>
UPDATE 0
@@ -302,7 +302,7 @@ UPDATE 0
---+---+---
(0 rows)
0: drop table test;
-DROP
+DROP TABLE
0q: ...
1q: ...
2q: ...
@@ -310,9 +310,9 @@ DROP
-- test ORCA partition table
-- related github issue https://github.com/greenplum-db/gpdb/issues/14935
create table test(a int, b int, c int) partition by range(b) (start (1) end (7) every (3));
-CREATE
+CREATE TABLE
insert into test values (1, 1, 1), (1, 2, 1);
-INSERT 2
+INSERT 0 2
1: begin;
BEGIN
1: update test set c = 1;
@@ -321,7 +321,7 @@ UPDATE 2
-- the following SQL will hang due to XID lock
2&: update test set c = 1;
1: end;
-END
+COMMIT
2<: <... completed>
UPDATE 2
@@ -332,7 +332,7 @@ UPDATE 2
1 | 2 | 1
(2 rows)
0: drop table test;
-DROP
+DROP TABLE
0q: ...
1q: ...
2q: ...
@@ -346,9 +346,9 @@ DROP
-- See github issue: https://github.com/greenplum-db/gpdb/issues/8919
0:create table t_splitupdate_raise_error (a int, b int) distributed by (a);
-CREATE
+CREATE TABLE
0:insert into t_splitupdate_raise_error values (1, 1);
-INSERT 1
+INSERT 0 1
-- test delete will throw error
1: begin;
@@ -361,12 +361,12 @@ BEGIN
2&: delete from t_splitupdate_raise_error;
1: end;
-END
+COMMIT
2<: <... completed>
ERROR: tuple to be locked was already moved to another partition or segment due to concurrent update (seg1 127.0.1.1:7003 pid=34629)
2: abort;
-ABORT
+ROLLBACK
1q: ...
2q: ...
@@ -381,12 +381,12 @@ BEGIN
2&: update t_splitupdate_raise_error set b = 999;
1: end;
-END
+COMMIT
2<: <... completed>
ERROR: tuple to be locked was already moved to another partition or segment due to concurrent update (seg0 127.0.1.1:7002 pid=43842)
2: abort;
-ABORT
+ROLLBACK
1q: ...
2q: ...
@@ -409,14 +409,14 @@ SET
2&: select * from t_splitupdate_raise_error for update;
1: end;
-END
+COMMIT
2<: <... completed>
ERROR: tuple to be locked was already moved to another partition or segment due to concurrent update (seg0 slice1 127.0.1.1:7002 pid=43866)
2: abort;
-ABORT
+ROLLBACK
1q: ...
2q: ...
0:drop table t_splitupdate_raise_error;
-DROP
+DROP TABLE
diff --git a/src/test/isolation2/expected/gdd/concurrent_update_optimizer.out b/src/test/isolation2/expected/gdd/concurrent_update_optimizer.out
index 4a04cd86a21..2642b507f7d 100644
--- a/src/test/isolation2/expected/gdd/concurrent_update_optimizer.out
+++ b/src/test/isolation2/expected/gdd/concurrent_update_optimizer.out
@@ -1,8 +1,8 @@
-- Test concurrent update a table with a varying length type
CREATE TABLE t_concurrent_update(a int, b int, c char(84));
-CREATE
+CREATE TABLE
INSERT INTO t_concurrent_update VALUES(1,1,'test');
-INSERT 1
+INSERT 0 1
1: BEGIN;
BEGIN
@@ -14,7 +14,7 @@ UPDATE 1
SET
2&: UPDATE t_concurrent_update SET b=b+10 WHERE a=1;
1: END;
-END
+COMMIT
2<: <... completed>
UPDATE 1
1: SELECT * FROM t_concurrent_update;
@@ -26,13 +26,13 @@ UPDATE 1
2q: ...
DROP TABLE t_concurrent_update;
-DROP
+DROP TABLE
-- Test the concurrent update transaction order on the segment is reflected on master
1: CREATE TABLE t_concurrent_update(a int, b int);
-CREATE
+CREATE TABLE
1: INSERT INTO t_concurrent_update VALUES(1,1);
-INSERT 1
+INSERT 0 1
2: BEGIN;
BEGIN
@@ -76,9 +76,9 @@ UPDATE 1
Success:
(1 row)
2<: <... completed>
-END
+COMMIT
3<: <... completed>
-END
+COMMIT
2q: ...
3q: ...
@@ -117,7 +117,7 @@ SET
Success:
(1 row)
4: END;
-END
+COMMIT
4: SELECT gp_inject_fault('before_get_distributed_xid', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=1;
gp_inject_fault
-----------------
@@ -127,14 +127,14 @@ END
5<: <... completed>
UPDATE 1
5: END;
-END
+COMMIT
6: SELECT * FROM t_concurrent_update;
a | b
---+----
1 | 41
(1 row)
6: DROP TABLE t_concurrent_update;
-DROP
+DROP TABLE
4q: ...
5q: ...
6q: ...
@@ -152,9 +152,9 @@ DROP
-- distribution keys is updated.
0: create table tab_update_hashcol (c1 int, c2 int) distributed by(c1);
-CREATE
+CREATE TABLE
0: insert into tab_update_hashcol values(1,1);
-INSERT 1
+INSERT 0 1
0: select * from tab_update_hashcol;
c1 | c2
----+----
@@ -169,18 +169,18 @@ BEGIN
UPDATE 1
2&: update tab_update_hashcol set c1 = c1 + 1 where c1 = 1;
1: end;
-END
+COMMIT
2<: <... completed>
ERROR: EvalPlanQual can not handle subPlan with Motion node
2: end;
-END
+ROLLBACK
0: select * from tab_update_hashcol;
c1 | c2
----+----
2 | 1
(1 row)
0: drop table tab_update_hashcol;
-DROP
+DROP TABLE
-- Test EvalplanQual
-- If we enable the GDD, then the lock maybe downgrade to
@@ -192,13 +192,13 @@ DROP
-- GDD is enabled and EvalPlanQual is tiggered.
0: create table tab_update_epq1 (c1 int, c2 int) distributed randomly;
-CREATE
+CREATE TABLE
0: create table tab_update_epq2 (c1 int, c2 int) distributed randomly;
-CREATE
+CREATE TABLE
0: insert into tab_update_epq1 values(1,1);
-INSERT 1
+INSERT 0 1
0: insert into tab_update_epq2 values(1,1);
-INSERT 1
+INSERT 0 1
0: select * from tab_update_epq1;
c1 | c2
----+----
@@ -223,11 +223,11 @@ BEGIN
UPDATE 1
2&: update tab_update_epq1 set c1 = tab_update_epq1.c1 + 1 from tab_update_epq2 where tab_update_epq1.c2 = tab_update_epq2.c2;
1: end;
-END
+COMMIT
2<: <... completed>
ERROR: EvalPlanQual can not handle subPlan with Motion node (seg1 127.0.1.1:6003 pid=76275)
2: end;
-END
+ROLLBACK
0: select * from tab_update_epq1;
c1 | c2
@@ -235,16 +235,16 @@ END
2 | 1
(1 row)
0: drop table tab_update_epq1;
-DROP
+DROP TABLE
0: drop table tab_update_epq2;
-DROP
+DROP TABLE
0q: ...
1q: ...
2q: ...
-- check that orca concurrent delete transaction won't delete tuple, updated in other transaction (which doesn't match predicate anymore)
create table test as select 0 as i distributed randomly;
-CREATE 1
+SELECT 1
1: begin;
BEGIN
1: update test set i = i + 1;
@@ -253,17 +253,17 @@ UPDATE 1
-- the following SQL will hang due to XID lock
2&: delete from test where i = 0;
1: end;
-END
+COMMIT
2<: <... completed>
DELETE 0
drop table test;
-DROP
+DROP TABLE
1q: ...
2q: ...
-- check that orca concurrent delete transaction will delete tuple, updated in other transaction (which still matches predicate)
create table test as select 0 as i distributed randomly;
-CREATE 1
+SELECT 1
1: begin;
BEGIN
1: update test set i = i;
@@ -272,19 +272,19 @@ UPDATE 1
-- the following SQL will hang due to XID lock
2&: delete from test where i = 0;
1: end;
-END
+COMMIT
2<: <... completed>
DELETE 1
drop table test;
-DROP
+DROP TABLE
1q: ...
2q: ...
-- test ORCA partition table
create table test(a int, b int, c int) partition by range(b) (start (1) end (7) every (3));
-CREATE
+CREATE TABLE
insert into test values (1, 1, 1);
-INSERT 1
+INSERT 0 1
1: begin;
BEGIN
1: delete from test where b = 1;
@@ -293,7 +293,7 @@ DELETE 1
-- the following SQL will hang due to XID lock
2&: update test set b = 1;
1: end;
-END
+COMMIT
2<: <... completed>
UPDATE 0
@@ -302,7 +302,7 @@ UPDATE 0
---+---+---
(0 rows)
0: drop table test;
-DROP
+DROP TABLE
0q: ...
1q: ...
2q: ...
@@ -310,9 +310,9 @@ DROP
-- test ORCA partition table
-- related github issue https://github.com/greenplum-db/gpdb/issues/14935
create table test(a int, b int, c int) partition by range(b) (start (1) end (7) every (3));
-CREATE
+CREATE TABLE
insert into test values (1, 1, 1), (1, 2, 1);
-INSERT 2
+INSERT 0 2
1: begin;
BEGIN
1: update test set c = 1;
@@ -321,7 +321,7 @@ UPDATE 2
-- the following SQL will hang due to XID lock
2&: update test set c = 1;
1: end;
-END
+COMMIT
2<: <... completed>
UPDATE 2
@@ -332,7 +332,7 @@ UPDATE 2
1 | 2 | 1
(2 rows)
0: drop table test;
-DROP
+DROP TABLE
0q: ...
1q: ...
2q: ...
@@ -346,9 +346,9 @@ DROP
-- See github issue: https://github.com/greenplum-db/gpdb/issues/8919
0:create table t_splitupdate_raise_error (a int, b int) distributed by (a);
-CREATE
+CREATE TABLE
0:insert into t_splitupdate_raise_error values (1, 1);
-INSERT 1
+INSERT 0 1
-- test delete will throw error
1: begin;
@@ -361,12 +361,12 @@ BEGIN
2&: delete from t_splitupdate_raise_error;
1: end;
-END
+COMMIT
2<: <... completed>
ERROR: tuple to be locked was already moved to another partition or segment due to concurrent update (seg1 127.0.1.1:6003 pid=76275)
2: abort;
-ABORT
+ROLLBACK
1q: ...
2q: ...
@@ -381,12 +381,12 @@ BEGIN
2&: update t_splitupdate_raise_error set b = 999;
1: end;
-END
+COMMIT
2<: <... completed>
ERROR: tuple to be locked was already moved to another partition or segment due to concurrent update (seg0 127.0.1.1:6002 pid=76337)
2: abort;
-ABORT
+ROLLBACK
1q: ...
2q: ...
@@ -409,14 +409,14 @@ SET
2&: select * from t_splitupdate_raise_error for update;
1: end;
-END
+COMMIT
2<: <... completed>
ERROR: tuple to be locked was already moved to another partition or segment due to concurrent update (seg0 slice1 127.0.1.1:7002 pid=43866)
2: abort;
-ABORT
+ROLLBACK
1q: ...
2q: ...
0:drop table t_splitupdate_raise_error;
-DROP
+DROP TABLE
diff --git a/src/test/isolation2/expected/gdd/delete-deadlock-root-leaf-concurrent-op.out b/src/test/isolation2/expected/gdd/delete-deadlock-root-leaf-concurrent-op.out
index 1b4dd43bf0a..d21de9145b0 100644
--- a/src/test/isolation2/expected/gdd/delete-deadlock-root-leaf-concurrent-op.out
+++ b/src/test/isolation2/expected/gdd/delete-deadlock-root-leaf-concurrent-op.out
@@ -1,9 +1,9 @@
DROP TABLE IF EXISTS part_tbl;
-DROP
+DROP TABLE
CREATE TABLE part_tbl (a int, b int, c int) PARTITION BY RANGE(b) (START(1) END(2) EVERY(1));
-CREATE
+CREATE TABLE
INSERT INTO part_tbl SELECT i, 1, i FROM generate_series(1,100)i;
-INSERT 100
+INSERT 0 100
-- check gdd is enabled
show gp_enable_global_deadlock_detector;
@@ -44,4 +44,4 @@ ROLLBACK
2:ROLLBACK;
ROLLBACK
DROP TABLE IF EXISTS part_tbl;
-DROP
+DROP TABLE
diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-01.out b/src/test/isolation2/expected/gdd/dist-deadlock-01.out
index 3cedc30f509..b8b46b715db 100644
--- a/src/test/isolation2/expected/gdd/dist-deadlock-01.out
+++ b/src/test/isolation2/expected/gdd/dist-deadlock-01.out
@@ -1,9 +1,9 @@
DROP TABLE IF EXISTS t01;
-DROP
+DROP TABLE
CREATE TABLE t01 (id int, val int);
-CREATE
+CREATE TABLE
INSERT INTO t01 (id, val) SELECT i, i FROM generate_series(1, 100) i;
-INSERT 100
+INSERT 0 100
-- gang creation order is important, reset any guc to force the creation
10: RESET optimizer;
diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-04.out b/src/test/isolation2/expected/gdd/dist-deadlock-04.out
index c393a69b54b..f497faa08f3 100644
--- a/src/test/isolation2/expected/gdd/dist-deadlock-04.out
+++ b/src/test/isolation2/expected/gdd/dist-deadlock-04.out
@@ -1,12 +1,12 @@
DROP TABLE IF EXISTS t04a;
-DROP
+DROP TABLE
CREATE TABLE t04a (id int, val int);
-CREATE
+CREATE TABLE
INSERT INTO t04a (id, val) SELECT i, i FROM generate_series(1, 100) i;
-INSERT 100
+INSERT 0 100
DROP TABLE IF EXISTS t04b;
-DROP
+DROP TABLE
-- gang creation order is important, reset any guc to force the creation
10: RESET optimizer;
@@ -22,7 +22,7 @@ BEGIN
10: UPDATE t04a SET val=val WHERE id=segid(0,1);
UPDATE 1
10: CREATE TABLE t04b (id int);
-CREATE
+CREATE TABLE
20: UPDATE t04a SET val=val WHERE id=segid(1,1);
UPDATE 1
diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-05.out b/src/test/isolation2/expected/gdd/dist-deadlock-05.out
index 57a8dced788..0611482c312 100644
--- a/src/test/isolation2/expected/gdd/dist-deadlock-05.out
+++ b/src/test/isolation2/expected/gdd/dist-deadlock-05.out
@@ -1,7 +1,7 @@
DROP TABLE IF EXISTS t05;
-DROP
+DROP TABLE
CREATE TABLE t05 (id int primary key);
-CREATE
+CREATE TABLE
-- gang creation order is important, reset any guc to force the creation
10: RESET optimizer;
@@ -15,10 +15,10 @@ BEGIN
BEGIN
10: INSERT INTO t05 VALUES(segid(0,1));
-INSERT 1
+INSERT 0 1
20: INSERT INTO t05 VALUES(segid(1,1));
-INSERT 1
+INSERT 0 1
-- seg 0: con20 ==> con10, xid lock
20&: INSERT INTO t05 VALUES(segid(0,1));
@@ -32,5 +32,5 @@ ERROR: canceling statement due to user request: "cancelled by global deadlock d
-- no more deadlock
10<: <... completed>
-INSERT 1
+INSERT 0 1
10q: ...
diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-06.out b/src/test/isolation2/expected/gdd/dist-deadlock-06.out
index 08202fcc758..45a116ce075 100644
--- a/src/test/isolation2/expected/gdd/dist-deadlock-06.out
+++ b/src/test/isolation2/expected/gdd/dist-deadlock-06.out
@@ -1,9 +1,9 @@
DROP TABLE IF EXISTS t06;
-DROP
+DROP TABLE
CREATE TABLE t06 (id int, val int);
-CREATE
+CREATE TABLE
INSERT INTO t06 (id, val) SELECT i, i FROM generate_series(1, 100) i;
-INSERT 100
+INSERT 0 100
-- gang creation order is important, reset any guc to force the creation
10: RESET optimizer;
diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-07.out b/src/test/isolation2/expected/gdd/dist-deadlock-07.out
index 40e2fa8253d..37cde3b837c 100644
--- a/src/test/isolation2/expected/gdd/dist-deadlock-07.out
+++ b/src/test/isolation2/expected/gdd/dist-deadlock-07.out
@@ -1,15 +1,15 @@
DROP TABLE IF EXISTS t07a;
-DROP
+DROP TABLE
DROP TABLE IF EXISTS t07b;
-DROP
+DROP TABLE
CREATE TABLE t07a (c1 int, c2 int);
-CREATE
+CREATE TABLE
CREATE TABLE t07b (c1 int, c2 int);
-CREATE
+CREATE TABLE
INSERT INTO t07a (c1, c2) SELECT i, i FROM generate_series(1, 100) i;
-INSERT 100
+INSERT 0 100
INSERT INTO t07b (c1, c2) SELECT i, i FROM generate_series(1, 100) i;
-INSERT 100
+INSERT 0 100
-- gang creation order is important, reset any guc to force the creation
10: RESET optimizer;
@@ -37,7 +37,7 @@ UPDATE 1
UPDATE 1
30: LOCK t07a;
-LOCK
+LOCK TABLE
-- seg 0: con30 ==> con10, xid lock
30&: UPDATE t07b SET c2 = 21 WHERE c1 = segid(0,1);
diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-102.out b/src/test/isolation2/expected/gdd/dist-deadlock-102.out
index c7ecda8664e..fc1603bada2 100644
--- a/src/test/isolation2/expected/gdd/dist-deadlock-102.out
+++ b/src/test/isolation2/expected/gdd/dist-deadlock-102.out
@@ -1,9 +1,9 @@
DROP TABLE IF EXISTS t102;
-DROP
+DROP TABLE
CREATE TABLE t102 (id int, val int);
-CREATE
+CREATE TABLE
INSERT INTO t102 (id, val) SELECT i, i FROM generate_series(1, 100) i;
-INSERT 100
+INSERT 0 100
-- gang creation order is important, reset any guc to force the creation
10: RESET optimizer;
diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-103.out b/src/test/isolation2/expected/gdd/dist-deadlock-103.out
index befd5eaab5d..fe7c5eb2dbf 100644
--- a/src/test/isolation2/expected/gdd/dist-deadlock-103.out
+++ b/src/test/isolation2/expected/gdd/dist-deadlock-103.out
@@ -1,9 +1,9 @@
DROP TABLE IF EXISTS t103;
-DROP
+DROP TABLE
CREATE TABLE t103 (id int, val int);
-CREATE
+CREATE TABLE
INSERT INTO t103 (id, val) SELECT i, i FROM generate_series(1, 100) i;
-INSERT 100
+INSERT 0 100
-- gang creation order is important, reset any guc to force the creation
10: RESET optimizer;
diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-104.out b/src/test/isolation2/expected/gdd/dist-deadlock-104.out
index 7feeb022c89..c8bd4aa6880 100644
--- a/src/test/isolation2/expected/gdd/dist-deadlock-104.out
+++ b/src/test/isolation2/expected/gdd/dist-deadlock-104.out
@@ -1,9 +1,9 @@
DROP TABLE IF EXISTS t104;
-DROP
+DROP TABLE
CREATE TABLE t104 (id int, val int);
-CREATE
+CREATE TABLE
INSERT INTO t104 (id, val) SELECT i, i FROM generate_series(1, 100) i;
-INSERT 100
+INSERT 0 100
-- gang creation order is important, reset any guc to force the creation
10: RESET optimizer;
diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-106.out b/src/test/isolation2/expected/gdd/dist-deadlock-106.out
index 4441f8d94ef..1640d6bc857 100644
--- a/src/test/isolation2/expected/gdd/dist-deadlock-106.out
+++ b/src/test/isolation2/expected/gdd/dist-deadlock-106.out
@@ -1,9 +1,9 @@
DROP TABLE IF EXISTS t106;
-DROP
+DROP TABLE
CREATE TABLE t106 (id int, val int);
-CREATE
+CREATE TABLE
INSERT INTO t106 (id, val) SELECT i, i FROM generate_series(1, 100) i;
-INSERT 100
+INSERT 0 100
-- gang creation order is important, reset any guc to force the creation
10: RESET optimizer;
diff --git a/src/test/isolation2/expected/gdd/dist-deadlock-upsert.out b/src/test/isolation2/expected/gdd/dist-deadlock-upsert.out
index db209932428..7a900b6ab96 100644
--- a/src/test/isolation2/expected/gdd/dist-deadlock-upsert.out
+++ b/src/test/isolation2/expected/gdd/dist-deadlock-upsert.out
@@ -4,13 +4,13 @@
-- global deadlock when GDD is enabled.
DROP TABLE IF EXISTS t_upsert;
-DROP
+DROP TABLE
CREATE TABLE t_upsert (id int, val int) distributed by (id);
-CREATE
+CREATE TABLE
CREATE UNIQUE INDEX uidx_t_upsert on t_upsert(id, val);
-CREATE
+CREATE INDEX
INSERT INTO t_upsert (id, val) SELECT i, i FROM generate_series(1, 100) i;
-INSERT 100
+INSERT 0 100
-- gang creation order is important, reset any guc to force the creation
10: RESET optimizer;
@@ -24,10 +24,10 @@ BEGIN
BEGIN
10: INSERT INTO t_upsert VALUES (segid(0,1), segid(0,1)) on conflict (id, val) do update set val = 999;
-INSERT 1
+INSERT 0 1
20: INSERT INTO t_upsert VALUES (segid(1,1), segid(1,1)) on conflict (id, val) do update set val = 888;
-INSERT 1
+INSERT 0 1
select gp_inject_fault('gdd_probe', 'suspend', dbid) from gp_segment_configuration where content=-1 and role='p';
gp_inject_fault
@@ -57,5 +57,5 @@ ERROR: canceling statement due to user request: "cancelled by global deadlock d
-- no more deadlock
10<: <... completed>
-INSERT 1
+INSERT 0 1
10q: ...
diff --git a/src/test/isolation2/expected/gdd/end.out b/src/test/isolation2/expected/gdd/end.out
index 01a10b8a47a..70986307742 100644
--- a/src/test/isolation2/expected/gdd/end.out
+++ b/src/test/isolation2/expected/gdd/end.out
@@ -1,7 +1,7 @@
ALTER SYSTEM RESET gp_enable_global_deadlock_detector;
-ALTER
+ALTER SYSTEM
ALTER SYSTEM RESET gp_global_deadlock_detector_period;
-ALTER
+ALTER SYSTEM
-- Use utility session on seg 0 to restart master. This way avoids the
-- situation where session issuing the restart doesn't disappear
diff --git a/src/test/isolation2/expected/gdd/insert_root_partition_truncate_deadlock.out b/src/test/isolation2/expected/gdd/insert_root_partition_truncate_deadlock.out
index bf78325fe93..3392152eb54 100644
--- a/src/test/isolation2/expected/gdd/insert_root_partition_truncate_deadlock.out
+++ b/src/test/isolation2/expected/gdd/insert_root_partition_truncate_deadlock.out
@@ -11,7 +11,7 @@
-- without GDD it is running to show that no deadlock happens.
create table rank_13652 (id int, year int) partition by range (year) (start (2006) end (2009) every (1));
-CREATE
+CREATE TABLE
1: select gp_inject_fault('func_init_plan_end', 'suspend', dbid, current_setting('gp_session_id')::int) from gp_segment_configuration where content = 0 and role = 'p';
gp_inject_fault
@@ -35,7 +35,7 @@ select gp_inject_fault('func_init_plan_end', 'reset', dbid) from gp_segment_conf
(1 row)
1<: <... completed>
-INSERT 30
+INSERT 0 30
2<: <... completed>
ERROR: canceling statement due to user request: "cancelled by global deadlock detector"
@@ -43,4 +43,4 @@ ERROR: canceling statement due to user request: "cancelled by global deadlock d
2q: ...
drop table rank_13652;
-DROP
+DROP TABLE
diff --git a/src/test/isolation2/expected/gdd/local-deadlock-03.out b/src/test/isolation2/expected/gdd/local-deadlock-03.out
index 0a0fc49e086..6aa7a02a2a6 100644
--- a/src/test/isolation2/expected/gdd/local-deadlock-03.out
+++ b/src/test/isolation2/expected/gdd/local-deadlock-03.out
@@ -4,7 +4,7 @@
-- deadlock testcases stable we reset the gdd period to 2min so should
-- not be triggered during the local deadlock tests.
ALTER SYSTEM SET gp_global_deadlock_detector_period to '2min';
-ALTER
+ALTER SYSTEM
SELECT pg_reload_conf();
pg_reload_conf
----------------
@@ -18,11 +18,11 @@ SELECT pg_reload_conf();
(1 row)
DROP TABLE IF EXISTS t03;
-DROP
+DROP TABLE
CREATE TABLE t03 (id int, val int);
-CREATE
+CREATE TABLE
INSERT INTO t03 (id, val) SELECT i, i FROM generate_series(1, 100) i;
-INSERT 100
+INSERT 0 100
-- gang creation order is important, reset any guc to force the creation
10: RESET optimizer;
diff --git a/src/test/isolation2/expected/gdd/non-lock-105.out b/src/test/isolation2/expected/gdd/non-lock-105.out
index 781e6d9929d..537f7225ddc 100644
--- a/src/test/isolation2/expected/gdd/non-lock-105.out
+++ b/src/test/isolation2/expected/gdd/non-lock-105.out
@@ -1,9 +1,9 @@
DROP TABLE IF EXISTS t105;
-DROP
+DROP TABLE
CREATE TABLE t105 (id int, val int);
-CREATE
+CREATE TABLE
INSERT INTO t105 (id, val) SELECT i, i FROM generate_series(1, 100) i;
-INSERT 100
+INSERT 0 100
-- gang creation order is important, reset any guc to force the creation
10: RESET optimizer;
diff --git a/src/test/isolation2/expected/gdd/prepare.out b/src/test/isolation2/expected/gdd/prepare.out
index d8940c2d3cf..73af33a0268 100644
--- a/src/test/isolation2/expected/gdd/prepare.out
+++ b/src/test/isolation2/expected/gdd/prepare.out
@@ -1,17 +1,17 @@
-- t0r is the reference table to provide the data distribution info.
DROP TABLE IF EXISTS t0p;
-DROP
+DROP TABLE
CREATE TABLE t0p (id int, val int);
-CREATE
+CREATE TABLE
INSERT INTO t0p (id, val) SELECT i, i FROM generate_series(1, 100) i;
-INSERT 100
+INSERT 0 100
DROP TABLE IF EXISTS t0r;
-DROP
+DROP TABLE
CREATE TABLE t0r (id int, val int, segid int) DISTRIBUTED REPLICATED;
-CREATE
+CREATE TABLE
INSERT INTO t0r (id, val, segid) SELECT id, val, gp_segment_id from t0p;
-INSERT 100
+INSERT 0 100
-- GDD tests rely on the data distribution, but depends on the number of
-- the segments the distribution might be different.
@@ -19,7 +19,7 @@ INSERT 100
-- * `seg` is the segment id, starts from 0;
-- * `idx` is the index on the segment, starts from 1;
CREATE OR REPLACE FUNCTION segid(seg int, idx int) RETURNS int AS $$ SELECT id FROM t0r WHERE segid=$1 ORDER BY id LIMIT 1 OFFSET ($2-1) $$ LANGUAGE sql;
-CREATE
+CREATE FUNCTION
-- In some of the testcases the execution order of two background queries
-- must be enforced not only on master but also on segments, for example
@@ -38,7 +38,7 @@ CREATE
-- So we provide this barrier function to ensure the execution order.
-- It's implemented with sleep now, but should at least work.
CREATE OR REPLACE FUNCTION barrier() RETURNS void AS $$ SELECT pg_sleep(4) $$ LANGUAGE sql;
-CREATE
+CREATE FUNCTION
-- verify the function
-- Data distribution is sensitive to the underlying hash algorithm, we need each
@@ -63,14 +63,14 @@ SELECT segid(2,10) is not null;
-- table to just store the master's data directory path on segment.
CREATE TABLE datadir(a int, dir text);
-CREATE
+CREATE TABLE
INSERT INTO datadir select 1,datadir from gp_segment_configuration where role='p' and content=-1;
-INSERT 1
+INSERT 0 1
ALTER SYSTEM SET gp_enable_global_deadlock_detector TO on;
-ALTER
+ALTER SYSTEM
ALTER SYSTEM SET gp_global_deadlock_detector_period TO 5;
-ALTER
+ALTER SYSTEM
-- Use utility session on seg 0 to restart master. This way avoids the
-- situation where session issuing the restart doesn't disappear
diff --git a/src/test/isolation2/expected/gdd/update-deadlock-root-leaf-concurrent-op.out b/src/test/isolation2/expected/gdd/update-deadlock-root-leaf-concurrent-op.out
index c88976980b4..a8954eae80f 100644
--- a/src/test/isolation2/expected/gdd/update-deadlock-root-leaf-concurrent-op.out
+++ b/src/test/isolation2/expected/gdd/update-deadlock-root-leaf-concurrent-op.out
@@ -1,9 +1,9 @@
DROP TABLE IF EXISTS part_tbl;
-DROP
+DROP TABLE
CREATE TABLE part_tbl (a int, b int, c int) PARTITION BY RANGE(b) (START(1) END(2) EVERY(1));
-CREATE
+CREATE TABLE
INSERT INTO part_tbl SELECT i, 1, i FROM generate_series(1,100)i;
-INSERT 100
+INSERT 0 100
-- check gdd is enabled
show gp_enable_global_deadlock_detector;
@@ -44,4 +44,4 @@ ROLLBACK
2:ROLLBACK;
ROLLBACK
DROP TABLE IF EXISTS part_tbl;
-DROP
+DROP TABLE
diff --git a/src/test/isolation2/expected/gp_terminate_mpp_backends.out b/src/test/isolation2/expected/gp_terminate_mpp_backends.out
index 941ccef8ddc..6c8ba84db6a 100644
--- a/src/test/isolation2/expected/gp_terminate_mpp_backends.out
+++ b/src/test/isolation2/expected/gp_terminate_mpp_backends.out
@@ -1,6 +1,6 @@
-- test gp_terminate_mpp_backends
1:create table gp_terminate_mpp_backends_t (a int);
-CREATE
+CREATE TABLE
select gp_terminate_mpp_backends() from gp_dist_random('gp_id');
gp_terminate_mpp_backends
diff --git a/src/test/isolation2/expected/gpdispatch.out b/src/test/isolation2/expected/gpdispatch.out
index 67f569f65f4..8f6ca9e71a6 100644
--- a/src/test/isolation2/expected/gpdispatch.out
+++ b/src/test/isolation2/expected/gpdispatch.out
@@ -2,7 +2,7 @@
-- Report on https://github.com/greenplum-db/gpdb/issues/12399
create extension if not exists gp_inject_fault;
-CREATE
+CREATE EXTENSION
1: select gp_inject_fault('make_dispatch_result_error', 'skip', dbid) from gp_segment_configuration where role = 'p' and content = -1;
gp_inject_fault
@@ -41,9 +41,9 @@ select gp_inject_fault('make_dispatch_result_error', 'reset', dbid) from gp_segm
--
create table test_waitevent(i int);
-CREATE
+CREATE TABLE
insert into test_waitevent select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
1: set optimizer = off;
SET
@@ -59,7 +59,7 @@ SET
1&: WITH a1 as (select * from test_waitevent), a2 as (select * from test_waitevent) SELECT sum(a1.i) FROM a1 INNER JOIN a2 ON a2.i = a1.i UNION ALL SELECT count(a1.i) FROM a1 INNER JOIN a2 ON a2.i = a1.i;
-- start_ignore
2: copy (select pg_stat_get_activity(NULL) from gp_dist_random('gp_id') where gp_segment_id=0) to '/tmp/_gpdb_test_output.txt';
-COPY 9
+COPY 10
-- end_ignore
2: select gp_wait_until_triggered_fault('shareinput_writer_notifyready', 1, 2);
gp_wait_until_triggered_fault
@@ -98,13 +98,13 @@ COPY 9
-- Case for cdbgang_createGang_async
1: create table t_12703(a int);
-CREATE
+CREATE TABLE
1:begin;
BEGIN
-- make a cursor so that we have a named portal
1: declare cur12703 cursor for select * from t_12703;
-DECLARE
+DECLARE CURSOR
2: select pg_ctl((select datadir from gp_segment_configuration c where c.role='p' and c.content=1), 'stop');
pg_ctl
@@ -135,7 +135,7 @@ DECLARE
1: select * from t_12703;
ERROR: gang was lost due to cluster reconfiguration (cdbgang_async.c:98)
1: abort;
-ABORT
+ROLLBACK
1q: ...
2q: ...
@@ -143,7 +143,7 @@ ABORT
-- Case for cdbCopyEndInternal
-- Provide some data to copy in
4: insert into t_12703 select * from generate_series(1, 10)i;
-INSERT 10
+INSERT 0 10
4: copy t_12703 to '/tmp/t_12703';
COPY 10
-- make copy in statement hang at the entry point of cdbCopyEndInternal
@@ -193,7 +193,7 @@ ERROR: MPP detected 1 segment failures, system is reconnected
1
(1 row)
2: end;
-END
+COMMIT
2q: ...
!\retcode gprecoverseg -aF --no-progress;
diff --git a/src/test/isolation2/expected/gpdispatch_1.out b/src/test/isolation2/expected/gpdispatch_1.out
index edd307d6d14..d54e376acd7 100644
--- a/src/test/isolation2/expected/gpdispatch_1.out
+++ b/src/test/isolation2/expected/gpdispatch_1.out
@@ -2,7 +2,7 @@
-- Report on https://github.com/greenplum-db/gpdb/issues/12399
create extension if not exists gp_inject_fault;
-CREATE
+CREATE EXTENSION
1: select gp_inject_fault('make_dispatch_result_error', 'skip', dbid) from gp_segment_configuration where role = 'p' and content = -1;
gp_inject_fault
@@ -41,9 +41,9 @@ select gp_inject_fault('make_dispatch_result_error', 'reset', dbid) from gp_segm
--
create table test_waitevent(i int);
-CREATE
+CREATE TABLE
insert into test_waitevent select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
1: set optimizer = off;
SET
@@ -98,13 +98,13 @@ COPY 9
-- Case for cdbgang_createGang_async
1: create table t_12703(a int);
-CREATE
+CREATE TABLE
1:begin;
BEGIN
-- make a cursor so that we have a named portal
1: declare cur12703 cursor for select * from t_12703;
-DECLARE
+DECLARE CURSOR
2: select pg_ctl((select datadir from gp_segment_configuration c where c.role='p' and c.content=1), 'stop');
pg_ctl
@@ -127,7 +127,7 @@ ERROR: Error on receive from seg1 slice1 127.0.1.1:7003 pid=58391: server close
This probably means the server terminated abnormally
before or while processing the request.
1: abort;
-ABORT
+ROLLBACK
1q: ...
2q: ...
@@ -135,7 +135,7 @@ ABORT
-- Case for cdbCopyEndInternal
-- Provide some data to copy in
4: insert into t_12703 select * from generate_series(1, 10)i;
-INSERT 10
+INSERT 0 10
4: copy t_12703 to '/tmp/t_12703';
COPY 10
-- make copy in statement hang at the entry point of cdbCopyEndInternal
@@ -180,7 +180,7 @@ ERROR: MPP detected 1 segment failures, system is reconnected
1
(1 row)
2: end;
-END
+COMMIT
2q: ...
!\retcode gprecoverseg -aF --no-progress;
diff --git a/src/test/isolation2/expected/gpexpand_catalog_lock.out b/src/test/isolation2/expected/gpexpand_catalog_lock.out
index b060a23cfcc..580eaaf0e7d 100644
--- a/src/test/isolation2/expected/gpexpand_catalog_lock.out
+++ b/src/test/isolation2/expected/gpexpand_catalog_lock.out
@@ -1,12 +1,12 @@
drop table if exists t20;
-DROP
+DROP TABLE
drop table if exists t30;
-DROP
+DROP TABLE
create table t20 (c1 int, c2 int) distributed by (c1);
-CREATE
+CREATE TABLE
create table t30 (c1 int, c2 int) distributed by (c1);
-CREATE
+CREATE TABLE
-- c10, c11 simulate gpexpand's catalog lock protection
-- they will acquire the catalog lock in exclusive mode
@@ -34,7 +34,7 @@ BEGIN
11&: select gp_expand_lock_catalog();
10: end;
-END
+COMMIT
-- c10 released the lock, c11 acquired it now
11<: <... completed>
@@ -43,7 +43,7 @@ END
(1 row)
11: end;
-END
+COMMIT
--
-- client sessions do not block each other on catalog changes
@@ -56,14 +56,14 @@ BEGIN
-- c20 and c30 both acquired the catalog lock in shared mode
20: create table t21 (c1 int, c2 int) distributed by (c1);
-CREATE
+CREATE TABLE
30: create table t31 (c1 int, c2 int) distributed by (c1);
-CREATE
+CREATE TABLE
20: insert into t21 values (1,1);
-INSERT 1
+INSERT 0 1
30: insert into t31 values (1,1);
-INSERT 1
+INSERT 0 1
20: rollback;
ROLLBACK
@@ -83,17 +83,17 @@ BEGIN
-- c20 and c30 both acquired the catalog lock in shared mode
20: create table t21 (c1 int, c2 int) distributed by (c1);
-CREATE
+CREATE TABLE
30: create table t31 (c1 int, c2 int) distributed by (c1);
-CREATE
+CREATE TABLE
-- c10 can not acquire the lock in exclusive mode ...
10&: select gp_expand_lock_catalog();
20: insert into t21 values (1,1);
-INSERT 1
+INSERT 0 1
30: insert into t31 values (1,1);
-INSERT 1
+INSERT 0 1
20: rollback;
ROLLBACK
@@ -107,7 +107,7 @@ ROLLBACK
(1 row)
10: end;
-END
+COMMIT
--
-- the catalog lock can be acquired in order
@@ -122,7 +122,7 @@ BEGIN
-- c20 acquired the catalog lock in shared mode
20: create table t21 (c1 int, c2 int) distributed by (c1);
-CREATE
+CREATE TABLE
-- c10 has to wait for c20
10&: select gp_expand_lock_catalog();
@@ -135,7 +135,7 @@ ROLLBACK
-- c20 can still make catalog changes
20: drop table t21;
-DROP
+DROP TABLE
20: rollback;
ROLLBACK
@@ -147,7 +147,7 @@ ROLLBACK
(1 row)
10: end;
-END
+COMMIT
--
-- gpexpand does not block DMLs or readonly queries to catalogs
@@ -169,7 +169,7 @@ BEGIN
-- c20 and c30 can still run DMLs
20: insert into t20 values (1,1);
-INSERT 1
+INSERT 0 1
20: select * from t20;
c1 | c2
----+----
@@ -181,7 +181,7 @@ UPDATE 1
DELETE 1
30: insert into t30 values (1,1);
-INSERT 1
+INSERT 0 1
30: select * from t30;
c1 | c2
----+----
@@ -210,7 +210,7 @@ ROLLBACK
ROLLBACK
10: end;
-END
+COMMIT
--
-- catalog changes are disallowed when gpexpand is in progress
@@ -246,4 +246,4 @@ ROLLBACK
ROLLBACK
10: end;
-END
+COMMIT
diff --git a/src/test/isolation2/expected/insert_root_partition_truncate_deadlock_without_gdd.out b/src/test/isolation2/expected/insert_root_partition_truncate_deadlock_without_gdd.out
index 4724a9cf7ef..25834116f2f 100644
--- a/src/test/isolation2/expected/insert_root_partition_truncate_deadlock_without_gdd.out
+++ b/src/test/isolation2/expected/insert_root_partition_truncate_deadlock_without_gdd.out
@@ -11,7 +11,7 @@
-- without GDD it is running to show that no deadlock happens.
create table rank_13652 (id int, year int) partition by range (year) (start (2006) end (2009) every (1));
-CREATE
+CREATE TABLE
1: select gp_inject_fault('func_init_plan_end', 'suspend', dbid, current_setting('gp_session_id')::int) from gp_segment_configuration where content = 0 and role = 'p';
gp_inject_fault
@@ -35,12 +35,12 @@ select gp_inject_fault('func_init_plan_end', 'reset', dbid) from gp_segment_conf
(1 row)
1<: <... completed>
-INSERT 30
+INSERT 0 30
2<: <... completed>
-TRUNCATE
+TRUNCATE TABLE
1q: ...
2q: ...
drop table rank_13652;
-DROP
+DROP TABLE
diff --git a/src/test/isolation2/expected/instr_in_shmem_terminate.out b/src/test/isolation2/expected/instr_in_shmem_terminate.out
index 2bb0a86a6c9..338f5fd9b9a 100644
--- a/src/test/isolation2/expected/instr_in_shmem_terminate.out
+++ b/src/test/isolation2/expected/instr_in_shmem_terminate.out
@@ -5,34 +5,34 @@
-- slots in shmem should be recycled correctly.
DROP SCHEMA IF EXISTS QUERY_METRICS CASCADE;
-DROP
+DROP SCHEMA
CREATE SCHEMA QUERY_METRICS;
-CREATE
+CREATE SCHEMA
SET SEARCH_PATH=QUERY_METRICS;
SET
CREATE EXTERNAL WEB TABLE __gp_localid ( localid int ) EXECUTE E'echo $GP_SEGMENT_ID' FORMAT 'TEXT';
-CREATE
+CREATE EXTERNAL TABLE
GRANT SELECT ON TABLE __gp_localid TO public;
GRANT
-CREATE EXTERNAL WEB TABLE __gp_masterid ( masterid int ) EXECUTE E'echo $GP_SEGMENT_ID' ON COORDINATOR FORMAT 'TEXT';
-CREATE
-GRANT SELECT ON TABLE __gp_masterid TO public;
+CREATE EXTERNAL WEB TABLE __gp_coordinatorid ( coordinatorid int ) EXECUTE E'echo $GP_SEGMENT_ID' ON COORDINATOR FORMAT 'TEXT';
+CREATE EXTERNAL TABLE
+GRANT SELECT ON TABLE __gp_coordinatorid TO public;
GRANT
CREATE FUNCTION gp_instrument_shmem_detail_f() RETURNS SETOF RECORD AS '$libdir/gp_instrument_shmem', 'gp_instrument_shmem_detail' LANGUAGE C IMMUTABLE;
-CREATE
+CREATE FUNCTION
GRANT EXECUTE ON FUNCTION gp_instrument_shmem_detail_f() TO public;
GRANT
-CREATE VIEW gp_instrument_shmem_detail AS WITH all_entries AS ( SELECT C.* FROM __gp_localid, gp_instrument_shmem_detail_f() as C ( tmid int4,ssid int4,ccnt int2,segid int2,pid int4 ,nid int2,tuplecount int8,nloops int8,ntuples int8 ) UNION ALL SELECT C.* FROM __gp_masterid, gp_instrument_shmem_detail_f() as C ( tmid int4,ssid int4,ccnt int2,segid int2,pid int4 ,nid int2,tuplecount int8,nloops int8,ntuples int8 )) SELECT tmid, ssid, ccnt,segid, pid, nid, tuplecount, nloops, ntuples FROM all_entries ORDER BY segid;
-CREATE
+CREATE VIEW gp_instrument_shmem_detail AS WITH all_entries AS ( SELECT C.* FROM __gp_localid, gp_instrument_shmem_detail_f() as C ( tmid int4,ssid int4,ccnt int2,segid int2,pid int4 ,nid int2,tuplecount int8,nloops int8,ntuples int8 ) UNION ALL SELECT C.* FROM __gp_coordinatorid, gp_instrument_shmem_detail_f() as C ( tmid int4,ssid int4,ccnt int2,segid int2,pid int4 ,nid int2,tuplecount int8,nloops int8,ntuples int8 )) SELECT tmid, ssid, ccnt,segid, pid, nid, tuplecount, nloops, ntuples FROM all_entries ORDER BY segid;
+CREATE VIEW
CREATE TABLE a (id int, c char) DISTRIBUTED BY (id);
-CREATE
+CREATE TABLE
INSERT INTO a SELECT *, 'a' FROM generate_series(1, 50);
-INSERT 50
+INSERT 0 50
SET OPTIMIZER=OFF;
SET
ANALYZE a;
@@ -48,7 +48,7 @@ SELECT count(*) FROM (SELECT 1 FROM gp_instrument_shmem_detail GROUP BY ssid, cc
(1 row)
CREATE TABLE foo AS SELECT i a, i b FROM generate_series(1, 10) i;
-CREATE 10
+SELECT 10
-- this query will be terminated by 'test pg_terminate_backend'
1&:EXPLAIN ANALYZE CREATE TEMP TABLE t1 AS SELECT count(*) FROM QUERY_METRICS.foo WHERE pg_sleep(200) IS NULL;
@@ -156,9 +156,9 @@ SELECT count(*) FROM foo, pg_sleep(2);
-- test 4: Merge Append should expose plan_node_id for whole plan tree
CREATE TABLE QUERY_METRICS.mergeappend_test (a int, b int, x int) DISTRIBUTED BY (a,b);
-CREATE
+CREATE TABLE
INSERT INTO QUERY_METRICS.mergeappend_test SELECT g/100, g/100, g FROM generate_series(1, 500) g;
-INSERT 500
+INSERT 0 500
ANALYZE QUERY_METRICS.mergeappend_test;
ANALYZE
@@ -270,5 +270,5 @@ SELECT count(*) FROM (SELECT 1 FROM gp_instrument_shmem_detail GROUP BY ssid, cc
-- start_ignore
DROP SCHEMA IF EXISTS QUERY_METRICS CASCADE;
-DROP
+DROP SCHEMA
-- end_ignore
diff --git a/src/test/isolation2/expected/invalidated_toast_index.out b/src/test/isolation2/expected/invalidated_toast_index.out
index 2c7e974813e..fe1351a4d75 100644
--- a/src/test/isolation2/expected/invalidated_toast_index.out
+++ b/src/test/isolation2/expected/invalidated_toast_index.out
@@ -5,19 +5,19 @@
--
CREATE TABLE toastable_heap(a text, b varchar, c int);
-CREATE
+CREATE TABLE
-- Force external storage for toasted columns.
ALTER TABLE toastable_heap ALTER COLUMN a SET STORAGE EXTERNAL;
-ALTER
+ALTER TABLE
ALTER TABLE toastable_heap ALTER COLUMN b SET STORAGE EXTERNAL;
-ALTER
+ALTER TABLE
-- Insert two values that we know will be toasted.
INSERT INTO toastable_heap VALUES(repeat('a',100000), repeat('b',100001), 1);
-INSERT 1
+INSERT 0 1
INSERT INTO toastable_heap VALUES(repeat('A',100000), repeat('B',100001), 2);
-INSERT 1
+INSERT 0 1
-- start_ignore
--
@@ -34,11 +34,7 @@ SET
SET
SET
-*U: UPDATE pg_index
- SET indisvalid = false
- FROM pg_class heap
- WHERE indrelid = heap.reltoastrelid
- AND heap.oid = 'toastable_heap'::regclass;
+*U: UPDATE pg_index SET indisvalid = false FROM pg_class heap WHERE indrelid = heap.reltoastrelid AND heap.oid = 'toastable_heap'::regclass;
UPDATE 1
UPDATE 1
@@ -60,4 +56,4 @@ ERROR: no valid index found for toast relation with Oid 107484 (tuptoaster.c:10
-- Don't leave an unusable table in the DB for others to trip over.
DROP TABLE toastable_heap;
-DROP
+DROP TABLE
diff --git a/src/test/isolation2/expected/lockmodes.out b/src/test/isolation2/expected/lockmodes.out
index 16635f9618b..01ce04b0582 100644
--- a/src/test/isolation2/expected/lockmodes.out
+++ b/src/test/isolation2/expected/lockmodes.out
@@ -1,14 +1,14 @@
-- table to just store the master's data directory path on segment.
CREATE TABLE lockmodes_datadir(a int, dir text);
-CREATE
+CREATE TABLE
INSERT INTO lockmodes_datadir select 1,datadir from gp_segment_configuration where role='p' and content=-1;
-INSERT 1
+INSERT 0 1
1: set optimizer = off;
SET
create or replace view show_locks_lockmodes as select locktype, mode, granted, relation::regclass from pg_locks where gp_segment_id = -1 and locktype = 'relation' and relation::regclass::text like 't_lockmods%';
-CREATE
+CREATE VIEW
show gp_enable_global_deadlock_detector;
gp_enable_global_deadlock_detector
@@ -22,25 +22,25 @@ show gp_enable_global_deadlock_detector;
-- 1.1 test for heap tables
create table t_lockmods (c int) distributed randomly;
-CREATE
+CREATE TABLE
insert into t_lockmods select * from generate_series(1, 5);
-INSERT 5
+INSERT 0 5
analyze t_lockmods;
ANALYZE
create table t_lockmods1 (c int) distributed randomly;
-CREATE
+CREATE TABLE
create table t_lockmods_rep(c int) distributed replicated;
-CREATE
+CREATE TABLE
-- See github issue: https://github.com/greenplum-db/gpdb/issues/9449
-- upsert may lock tuples on segment, so we should upgrade lock level
-- on QD if GDD is disabled.
create table t_lockmods_upsert(a int, b int) distributed by (a);
-CREATE
+CREATE TABLE
create unique index uidx_t_lockmodes_upsert on t_lockmods_upsert(a, b);
-CREATE
+CREATE INDEX
-- add analyze to avoid auto vacuum when executing first insert
analyze t_lockmods_upsert;
ANALYZE
@@ -71,7 +71,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -98,7 +98,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -125,7 +125,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -152,7 +152,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -180,7 +180,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -208,7 +208,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -236,7 +236,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -264,7 +264,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(4 rows)
1: abort;
-ABORT
+ROLLBACK
-- 1.1.2 update | delete should hold ExclusiveLock on result relations
1: begin;
@@ -277,7 +277,7 @@ UPDATE 5
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -289,33 +289,33 @@ DELETE 5
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 1.1.3 insert should hold RowExclusiveLock on result relations
1: begin;
BEGIN
1: insert into t_lockmods select * from generate_series(1, 5);
-INSERT 5
+INSERT 0 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 1.1.4 upsert should hold ExclusiveLock on result relations
1: begin;
BEGIN
1: insert into t_lockmods_upsert values (1, 1) on conflict(a, b) do update set b = 99;
-INSERT 1
+INSERT 0 1
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+-------------------
relation | ExclusiveLock | t | t_lockmods_upsert
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 1.1.5 use cached plan should be consistent with no cached plan
1: prepare select_for_update as select * from t_lockmods for update;
@@ -352,7 +352,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -371,7 +371,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -390,7 +390,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -409,65 +409,65 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute update_tlockmods;
-EXECUTE 5
+UPDATE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+------------
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute delete_tlockmods;
-EXECUTE 5
+DELETE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+------------
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute insert_tlockmods;
-EXECUTE 5
+INSERT 0 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute upsert_tlockmods;
-EXECUTE 1
+INSERT 0 1
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+-------------------
relation | ExclusiveLock | t | t_lockmods_upsert
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 1.2 test for AO table
create table t_lockmods_ao (c int) with (appendonly=true) distributed randomly;
-CREATE
+CREATE TABLE
insert into t_lockmods_ao select * from generate_series(1, 8);
-INSERT 8
+INSERT 0 8
analyze t_lockmods_ao;
ANALYZE
create table t_lockmods_ao1 (c int) with (appendonly=true) distributed randomly;
-CREATE
+CREATE TABLE
-- 1.2.1 select for (update|share|key share|no key update) should hold ExclusiveLock on range tables
1: begin;
@@ -498,7 +498,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -528,7 +528,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -558,7 +558,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -588,7 +588,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -616,7 +616,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -644,7 +644,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -672,7 +672,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -700,7 +700,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(4 rows)
1: abort;
-ABORT
+ROLLBACK
-- 1.2.2 update | delete should hold ExclusiveLock on result relations
1: begin;
@@ -713,7 +713,7 @@ UPDATE 8
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -725,20 +725,20 @@ DELETE 8
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 1.2.3 insert should hold RowExclusiveLock on result relations
1: begin;
BEGIN
1: insert into t_lockmods_ao select * from generate_series(1, 5);
-INSERT 5
+INSERT 0 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+---------------
relation | RowExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 1.2.4 use cached plan should be consistent with no cached plan
1: prepare select_for_update_ao as select * from t_lockmods_ao for update;
@@ -776,7 +776,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -798,7 +798,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -820,7 +820,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -842,43 +842,43 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute update_tlockmods_ao;
-EXECUTE 8
+UPDATE 8
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute delete_tlockmods_ao;
-EXECUTE 8
+DELETE 8
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute insert_tlockmods_ao;
-EXECUTE 5
+INSERT 0 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+---------------
relation | RowExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 1.3 With limit clause, such case should
-- acquire ExclusiveLock on the whole table and do not generate lockrows node
@@ -908,7 +908,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
-- 1.4 For replicated table, we should lock the entire table on ExclusiveLock
1: begin;
@@ -931,7 +931,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_rep
(2 rows)
1: abort;
-ABORT
+ROLLBACK
-- 1.5 test order-by's plan
1: begin;
@@ -962,7 +962,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
-- 1.6 select for update NOWAIT/SKIP LOCKED
-- NOWAIT/SKIP LOCKED should not affect the table-level lock
@@ -979,7 +979,7 @@ BEGIN
(5 rows)
2&: select * from t_lockmods for update nowait;
1: abort;
-ABORT
+ROLLBACK
2<: <... completed>
c
---
@@ -1003,7 +1003,7 @@ BEGIN
(5 rows)
2&: select * from t_lockmods for update skip locked;
1: abort;
-ABORT
+ROLLBACK
2<: <... completed>
c
---
@@ -1026,12 +1026,12 @@ ABORT
-- Details: https://groups.google.com/a/greenplum.org/g/gpdb-dev/c/wAPKpJzhbpM
-- Issue: https://github.com/greenplum-db/gpdb/issues/13652
1:DROP TABLE IF EXISTS t_lockmods_part_tbl_dml;
-DROP
+DROP TABLE
1:CREATE TABLE t_lockmods_part_tbl_dml (a int, b int, c int) PARTITION BY RANGE(b) (START(1) END(3) EVERY(1));
-CREATE
+CREATE TABLE
1:INSERT INTO t_lockmods_part_tbl_dml SELECT i, 1, i FROM generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
--
1: BEGIN;
@@ -1052,7 +1052,7 @@ ROLLBACK
1: BEGIN;
BEGIN
1: INSERT INTO t_lockmods_part_tbl_dml SELECT i, 1, i FROM generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
-- without GDD, it will lock all leaf partitions on QD
1: select * from show_locks_lockmodes;
locktype | mode | granted | relation
@@ -1134,8 +1134,8 @@ ROLLBACK
-- enable gdd
ALTER SYSTEM SET gp_enable_global_deadlock_detector TO on;
-ALTER
--- Use utility session on seg 0 to restart master. This way avoids the
+ALTER SYSTEM
+-- Use utility session on seg 0 to restart coordinator. This way avoids the
-- situation where session issuing the restart doesn't disappear
-- itself.
1U:SELECT pg_ctl(dir, 'restart') from lockmodes_datadir;
@@ -1191,7 +1191,7 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1219,7 +1219,7 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1247,7 +1247,7 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1275,7 +1275,7 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1303,7 +1303,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1331,7 +1331,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1359,7 +1359,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1387,7 +1387,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(4 rows)
1: abort;
-ABORT
+ROLLBACK
-- 2.1.2 update | delete should hold RowExclusiveLock on result relations
@@ -1401,7 +1401,7 @@ UPDATE 5
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1413,33 +1413,33 @@ DELETE 5
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 2.1.3 insert should hold RowExclusiveLock on result relations
1: begin;
BEGIN
1: insert into t_lockmods select * from generate_series(1, 5);
-INSERT 5
+INSERT 0 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 2.1.4 upsert should hold RowExclusiveLock on result relations
1: begin;
BEGIN
1: insert into t_lockmods_upsert values (1, 1) on conflict(a, b) do update set b = 99;
-INSERT 1
+INSERT 0 1
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+-------------------
relation | RowExclusiveLock | t | t_lockmods_upsert
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 2.1.5 use cached plan should be consistent with no cached plan
1: prepare select_for_update as select * from t_lockmods for update;
@@ -1476,7 +1476,7 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1495,7 +1495,7 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1514,7 +1514,7 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1533,55 +1533,55 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute update_tlockmods;
-EXECUTE 5
+UPDATE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute delete_tlockmods;
-EXECUTE 5
+DELETE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute insert_tlockmods;
-EXECUTE 5
+INSERT 0 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute upsert_tlockmods;
-EXECUTE 1
+INSERT 0 1
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+-------------------
relation | RowExclusiveLock | t | t_lockmods_upsert
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 2.2 test for AO table
@@ -1614,7 +1614,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1644,7 +1644,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1674,7 +1674,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1704,7 +1704,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1732,7 +1732,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1760,7 +1760,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1788,7 +1788,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1816,7 +1816,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(4 rows)
1: abort;
-ABORT
+ROLLBACK
-- 2.2.2 update | delete should hold ExclusiveLock on result relations
1: begin;
@@ -1829,7 +1829,7 @@ UPDATE 8
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1841,20 +1841,20 @@ DELETE 8
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 2.2.3 insert should hold RowExclusiveLock on result relations
1: begin;
BEGIN
1: insert into t_lockmods_ao select * from generate_series(1, 5);
-INSERT 5
+INSERT 0 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+---------------
relation | RowExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 2.2.4 use cached plan should be consistent with no cached plan
1: prepare select_for_update_ao as select * from t_lockmods_ao for update;
@@ -1892,7 +1892,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1914,7 +1914,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1936,7 +1936,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1958,43 +1958,43 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute update_tlockmods_ao;
-EXECUTE 8
+UPDATE 8
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute delete_tlockmods_ao;
-EXECUTE 8
+DELETE 8
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute insert_tlockmods_ao;
-EXECUTE 5
+INSERT 0 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+---------------
relation | RowExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 2.3 With limit clause, such case should
-- acquire ExclusiveLock on the whole table and do not generate lockrows node
@@ -2026,7 +2026,7 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
-- 2.4 For replicated table, we should lock the entire table on ExclusiveLock
1: begin;
@@ -2049,7 +2049,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_rep
(2 rows)
1: abort;
-ABORT
+ROLLBACK
-- 2.5 test order-by's plan
1: begin;
@@ -2081,7 +2081,7 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
-- 2.6 select for update NOWAIT/SKIP LOCKED
-- with GDD, select for update could be optimized to not upgrade lock.
@@ -2123,7 +2123,7 @@ BEGIN
2: select * from t_lockmods for update nowait;
ERROR: could not obtain lock on row in relation "t_lockmods" (seg1 slice1 10.140.0.3:7003 pid=15182)
1: abort;
-ABORT
+ROLLBACK
1q: ...
2q: ...
@@ -2199,7 +2199,7 @@ ROLLBACK
1: BEGIN;
BEGIN
1: INSERT INTO t_lockmods_part_tbl_dml SELECT i, 1, i FROM generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
-- With GDD enabled, QD will only hold lock on root for insert
1: select * from show_locks_lockmodes;
locktype | mode | granted | relation
@@ -2210,14 +2210,34 @@ INSERT 10
ROLLBACK
1q: ...
+1: CREATE TABLE t_lockmods_aopart(i int, t text) USING ao_row PARTITION BY RANGE(i) (START(1) END(5) EVERY(1));
+CREATE TABLE
+1: BEGIN;
+BEGIN
+1: DELETE FROM t_lockmods_aopart WHERE i = 4;
+DELETE 0
+-- With GDD enabled, QD will only hold lock on root for delete
+1: select * from show_locks_lockmodes;
+ locktype | mode | granted | relation
+----------+-----------------+---------+---------------------------
+ relation | ExclusiveLock | t | t_lockmods_aopart_1_prt_4
+ relation | AccessShareLock | t | t_lockmods_aopart
+ relation | ExclusiveLock | t | t_lockmods_aopart
+(3 rows)
+1: COMMIT;
+COMMIT
+1: DROP TABLE t_lockmods_aopart;
+DROP TABLE
+1q: ...
+
-- 2.8 Verify behaviors of select with locking clause (i.e. select for update)
-- when running concurrently with index creation, for Heap tables.
-- For AO/CO tables, refer to create_index_allows_readonly.source.
1: CREATE TABLE create_index_select_for_update_tbl(a int, b int);
-CREATE
+CREATE TABLE
1: INSERT INTO create_index_select_for_update_tbl SELECT i,i FROM generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
1: set optimizer = off;
SET
@@ -2243,7 +2263,7 @@ SET
BEGIN
-- expect no blocking
2: CREATE INDEX create_index_select_for_update_idx ON create_index_select_for_update_tbl(a);
-CREATE
+CREATE INDEX
2: COMMIT;
COMMIT
@@ -2251,12 +2271,12 @@ COMMIT
COMMIT
2: DROP INDEX create_index_select_for_update_idx;
-DROP
+DROP INDEX
2: BEGIN;
BEGIN
2: CREATE INDEX create_index_select_for_update_idx ON create_index_select_for_update_tbl(a);
-CREATE
+CREATE INDEX
1: BEGIN;
BEGIN
@@ -2275,12 +2295,12 @@ COMMIT
COMMIT
2: DROP INDEX create_index_select_for_update_idx;
-DROP
+DROP INDEX
-- 2.8.2 with GDD disabled, expect blocking
-- reset gdd
2: ALTER SYSTEM RESET gp_enable_global_deadlock_detector;
-ALTER
+ALTER SYSTEM
-- close session to avoid renew session failure after restart
2q: ...
1U:SELECT pg_ctl(dir, 'restart') from lockmodes_datadir;
@@ -2317,17 +2337,17 @@ BEGIN
COMMIT
2<: <... completed>
-CREATE
+CREATE INDEX
2: COMMIT;
COMMIT
2: DROP INDEX create_index_select_for_update_idx;
-DROP
+DROP INDEX
2: BEGIN;
BEGIN
2: CREATE INDEX create_index_select_for_update_idx ON create_index_select_for_update_tbl(a);
-CREATE
+CREATE INDEX
1: BEGIN;
BEGIN
@@ -2346,7 +2366,7 @@ COMMIT
COMMIT
1: drop table lockmodes_datadir;
-DROP
+DROP TABLE
1q: ...
2q: ...
@@ -2355,13 +2375,13 @@ DROP
-- require an AccessExclusiveLock.
-- Case 1. The analyze result is expected when there's concurrent drop on child.
1:create table analyzedrop(a int) partition by range(a);
-CREATE
+CREATE TABLE
1:create table analyzedrop_1 partition of analyzedrop for values from (0) to (10);
-CREATE
+CREATE TABLE
1:create table analyzedrop_2 partition of analyzedrop for values from (10) to (20);
-CREATE
+CREATE TABLE
1:insert into analyzedrop select * from generate_series(0,19);
-INSERT 20
+INSERT 0 20
1:select gp_inject_fault_infinite('merge_leaf_stats_after_find_children', 'suspend', dbid) from gp_segment_configuration where content = -1 and role = 'p';
gp_inject_fault_infinite
--------------------------
@@ -2377,7 +2397,7 @@ INSERT 20
1<: <... completed>
ANALYZE
2<: <... completed>
-DROP
+DROP TABLE
3:select * from pg_stats where tablename like 'analyzedrop%';
schemaname | tablename | attname | inherited | null_frac | avg_width | n_distinct | most_common_vals | most_common_freqs | histogram_bounds | correlation | most_common_elems | most_common_elem_freqs | elem_count_histogram
------------+---------------+---------+-----------+-----------+-----------+------------+------------------+-------------------+--------------------------------------------------+-------------+-------------------+------------------------+----------------------
@@ -2401,9 +2421,9 @@ DROP
1<: <... completed>
ANALYZE
2<: <... completed>
-DROP
+DROP TABLE
3<: <... completed>
-DROP
+DROP TABLE
--empty as table is dropped
4:select * from pg_stats where tablename like 'analyzedrop%';
schemaname | tablename | attname | inherited | null_frac | avg_width | n_distinct | most_common_vals | most_common_freqs | histogram_bounds | correlation | most_common_elems | most_common_elem_freqs | elem_count_histogram
diff --git a/src/test/isolation2/expected/lockmodes_optimizer.out b/src/test/isolation2/expected/lockmodes_optimizer.out
index 9077e116cf2..420b205ad64 100644
--- a/src/test/isolation2/expected/lockmodes_optimizer.out
+++ b/src/test/isolation2/expected/lockmodes_optimizer.out
@@ -1,14 +1,14 @@
-- table to just store the master's data directory path on segment.
CREATE TABLE lockmodes_datadir(a int, dir text);
-CREATE
+CREATE TABLE
INSERT INTO lockmodes_datadir select 1,datadir from gp_segment_configuration where role='p' and content=-1;
-INSERT 1
+INSERT 0 1
1: set optimizer = off;
SET
create or replace view show_locks_lockmodes as select locktype, mode, granted, relation::regclass from pg_locks where gp_segment_id = -1 and locktype = 'relation' and relation::regclass::text like 't_lockmods%';
-CREATE
+CREATE VIEW
show gp_enable_global_deadlock_detector;
gp_enable_global_deadlock_detector
@@ -22,25 +22,25 @@ show gp_enable_global_deadlock_detector;
-- 1.1 test for heap tables
create table t_lockmods (c int) distributed randomly;
-CREATE
+CREATE TABLE
insert into t_lockmods select * from generate_series(1, 5);
-INSERT 5
+INSERT 0 5
analyze t_lockmods;
ANALYZE
create table t_lockmods1 (c int) distributed randomly;
-CREATE
+CREATE TABLE
create table t_lockmods_rep(c int) distributed replicated;
-CREATE
+CREATE TABLE
-- See github issue: https://github.com/greenplum-db/gpdb/issues/9449
-- upsert may lock tuples on segment, so we should upgrade lock level
-- on QD if GDD is disabled.
create table t_lockmods_upsert(a int, b int) distributed by (a);
-CREATE
+CREATE TABLE
create unique index uidx_t_lockmodes_upsert on t_lockmods_upsert(a, b);
-CREATE
+CREATE INDEX
-- add analyze to avoid auto vacuum when executing first insert
analyze t_lockmods_upsert;
ANALYZE
@@ -71,7 +71,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -98,7 +98,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -125,7 +125,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -152,7 +152,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -180,7 +180,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -208,7 +208,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -236,7 +236,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -264,7 +264,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(4 rows)
1: abort;
-ABORT
+ROLLBACK
-- 1.1.2 update | delete should hold ExclusiveLock on result relations
1: begin;
@@ -277,7 +277,7 @@ UPDATE 5
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -289,33 +289,33 @@ DELETE 5
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 1.1.3 insert should hold RowExclusiveLock on result relations
1: begin;
BEGIN
1: insert into t_lockmods select * from generate_series(1, 5);
-INSERT 5
+INSERT 0 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 1.1.4 upsert should hold ExclusiveLock on result relations
1: begin;
BEGIN
1: insert into t_lockmods_upsert values (1, 1) on conflict(a, b) do update set b = 99;
-INSERT 1
+INSERT 0 1
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+-------------------
relation | ExclusiveLock | t | t_lockmods_upsert
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 1.1.5 use cached plan should be consistent with no cached plan
1: prepare select_for_update as select * from t_lockmods for update;
@@ -352,7 +352,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -371,7 +371,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -390,7 +390,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -409,65 +409,65 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute update_tlockmods;
-EXECUTE 5
+UPDATE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+------------
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute delete_tlockmods;
-EXECUTE 5
+DELETE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+------------
relation | ExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute insert_tlockmods;
-EXECUTE 5
+INSERT 0 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute upsert_tlockmods;
-EXECUTE 1
+INSERT 0 1
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+-------------------
relation | ExclusiveLock | t | t_lockmods_upsert
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 1.2 test for AO table
create table t_lockmods_ao (c int) with (appendonly=true) distributed randomly;
-CREATE
+CREATE TABLE
insert into t_lockmods_ao select * from generate_series(1, 8);
-INSERT 8
+INSERT 0 8
analyze t_lockmods_ao;
ANALYZE
create table t_lockmods_ao1 (c int) with (appendonly=true) distributed randomly;
-CREATE
+CREATE TABLE
-- 1.2.1 select for (update|share|key share|no key update) should hold ExclusiveLock on range tables
1: begin;
@@ -498,7 +498,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -528,7 +528,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -558,7 +558,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -588,7 +588,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -616,7 +616,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -644,7 +644,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -672,7 +672,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -700,7 +700,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(4 rows)
1: abort;
-ABORT
+ROLLBACK
-- 1.2.2 update | delete should hold ExclusiveLock on result relations
1: begin;
@@ -713,7 +713,7 @@ UPDATE 8
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -725,20 +725,20 @@ DELETE 8
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 1.2.3 insert should hold RowExclusiveLock on result relations
1: begin;
BEGIN
1: insert into t_lockmods_ao select * from generate_series(1, 5);
-INSERT 5
+INSERT 0 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+---------------
relation | RowExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 1.2.4 use cached plan should be consistent with no cached plan
1: prepare select_for_update_ao as select * from t_lockmods_ao for update;
@@ -776,7 +776,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -798,7 +798,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -820,7 +820,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -842,43 +842,43 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute update_tlockmods_ao;
-EXECUTE 8
+UPDATE 8
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute delete_tlockmods_ao;
-EXECUTE 8
+DELETE 8
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute insert_tlockmods_ao;
-EXECUTE 5
+INSERT 0 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+---------------
relation | RowExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 1.3 With limit clause, such case should
-- acquire ExclusiveLock on the whole table and do not generate lockrows node
@@ -908,7 +908,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
-- 1.4 For replicated table, we should lock the entire table on ExclusiveLock
1: begin;
@@ -931,7 +931,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_rep
(2 rows)
1: abort;
-ABORT
+ROLLBACK
-- 1.5 test order-by's plan
1: begin;
@@ -962,7 +962,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
-- 1.6 select for update NOWAIT/SKIP LOCKED
-- NOWAIT/SKIP LOCKED should not affect the table-level lock
@@ -979,7 +979,7 @@ BEGIN
(5 rows)
2&: select * from t_lockmods for update nowait;
1: abort;
-ABORT
+ROLLBACK
2<: <... completed>
c
---
@@ -1003,7 +1003,7 @@ BEGIN
(5 rows)
2&: select * from t_lockmods for update skip locked;
1: abort;
-ABORT
+ROLLBACK
2<: <... completed>
c
---
@@ -1026,12 +1026,12 @@ ABORT
-- Details: https://groups.google.com/a/greenplum.org/g/gpdb-dev/c/wAPKpJzhbpM
-- Issue: https://github.com/greenplum-db/gpdb/issues/13652
1:DROP TABLE IF EXISTS t_lockmods_part_tbl_dml;
-DROP
+DROP TABLE
1:CREATE TABLE t_lockmods_part_tbl_dml (a int, b int, c int) PARTITION BY RANGE(b) (START(1) END(3) EVERY(1));
-CREATE
+CREATE TABLE
1:INSERT INTO t_lockmods_part_tbl_dml SELECT i, 1, i FROM generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
--
1: BEGIN;
@@ -1052,7 +1052,7 @@ ROLLBACK
1: BEGIN;
BEGIN
1: INSERT INTO t_lockmods_part_tbl_dml SELECT i, 1, i FROM generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
-- without GDD, it will lock all leaf partitions on QD
1: select * from show_locks_lockmodes;
locktype | mode | granted | relation
@@ -1135,8 +1135,8 @@ ROLLBACK
-- enable gdd
ALTER SYSTEM SET gp_enable_global_deadlock_detector TO on;
-ALTER
--- Use utility session on seg 0 to restart master. This way avoids the
+ALTER SYSTEM
+-- Use utility session on seg 0 to restart coordinator. This way avoids the
-- situation where session issuing the restart doesn't disappear
-- itself.
1U:SELECT pg_ctl(dir, 'restart') from lockmodes_datadir;
@@ -1192,7 +1192,7 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1220,7 +1220,7 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1248,7 +1248,7 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1276,7 +1276,7 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1304,7 +1304,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1332,7 +1332,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1360,7 +1360,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1388,7 +1388,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods
(4 rows)
1: abort;
-ABORT
+ROLLBACK
-- 2.1.2 update | delete should hold RowExclusiveLock on result relations
@@ -1402,7 +1402,7 @@ UPDATE 5
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1414,33 +1414,33 @@ DELETE 5
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 2.1.3 insert should hold RowExclusiveLock on result relations
1: begin;
BEGIN
1: insert into t_lockmods select * from generate_series(1, 5);
-INSERT 5
+INSERT 0 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 2.1.4 upsert should hold RowExclusiveLock on result relations
1: begin;
BEGIN
1: insert into t_lockmods_upsert values (1, 1) on conflict(a, b) do update set b = 99;
-INSERT 1
+INSERT 0 1
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+-------------------
relation | RowExclusiveLock | t | t_lockmods_upsert
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 2.1.5 use cached plan should be consistent with no cached plan
1: prepare select_for_update as select * from t_lockmods for update;
@@ -1477,7 +1477,7 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1496,7 +1496,7 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1515,7 +1515,7 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1534,55 +1534,55 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute update_tlockmods;
-EXECUTE 5
+UPDATE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute delete_tlockmods;
-EXECUTE 5
+DELETE 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute insert_tlockmods;
-EXECUTE 5
+INSERT 0 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+------------
relation | RowExclusiveLock | t | t_lockmods
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute upsert_tlockmods;
-EXECUTE 1
+INSERT 0 1
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+-------------------
relation | RowExclusiveLock | t | t_lockmods_upsert
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 2.2 test for AO table
@@ -1615,7 +1615,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1645,7 +1645,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1675,7 +1675,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1705,7 +1705,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(2 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1733,7 +1733,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1761,7 +1761,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1789,7 +1789,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(4 rows)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1817,7 +1817,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(4 rows)
1: abort;
-ABORT
+ROLLBACK
-- 2.2.2 update | delete should hold ExclusiveLock on result relations
1: begin;
@@ -1830,7 +1830,7 @@ UPDATE 8
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1842,20 +1842,20 @@ DELETE 8
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 2.2.3 insert should hold RowExclusiveLock on result relations
1: begin;
BEGIN
1: insert into t_lockmods_ao select * from generate_series(1, 5);
-INSERT 5
+INSERT 0 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+---------------
relation | RowExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 2.2.4 use cached plan should be consistent with no cached plan
1: prepare select_for_update_ao as select * from t_lockmods_ao for update;
@@ -1893,7 +1893,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1915,7 +1915,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1937,7 +1937,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
@@ -1959,43 +1959,43 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute update_tlockmods_ao;
-EXECUTE 8
+UPDATE 8
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute delete_tlockmods_ao;
-EXECUTE 8
+DELETE 8
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+---------------+---------+---------------
relation | ExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
1: begin;
BEGIN
1: execute insert_tlockmods_ao;
-EXECUTE 5
+INSERT 0 5
2: select * from show_locks_lockmodes;
locktype | mode | granted | relation
----------+------------------+---------+---------------
relation | RowExclusiveLock | t | t_lockmods_ao
(1 row)
1: abort;
-ABORT
+ROLLBACK
-- 2.3 With limit clause, such case should
-- acquire ExclusiveLock on the whole table and do not generate lockrows node
@@ -2028,7 +2028,7 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
-- 2.4 For replicated table, we should lock the entire table on ExclusiveLock
1: begin;
@@ -2051,7 +2051,7 @@ BEGIN
relation | ExclusiveLock | t | t_lockmods_rep
(2 rows)
1: abort;
-ABORT
+ROLLBACK
-- 2.5 test order-by's plan
1: begin;
@@ -2083,7 +2083,7 @@ BEGIN
relation | RowShareLock | t | t_lockmods
(2 rows)
1: abort;
-ABORT
+ROLLBACK
-- 2.6 select for update NOWAIT/SKIP LOCKED
-- with GDD, select for update could be optimized to not upgrade lock.
@@ -2125,7 +2125,7 @@ BEGIN
2: select * from t_lockmods for update nowait;
ERROR: could not obtain lock on row in relation "t_lockmods" (seg1 slice1 10.140.0.3:7003 pid=15182)
1: abort;
-ABORT
+ROLLBACK
1q: ...
2q: ...
@@ -2202,7 +2202,7 @@ ROLLBACK
1: BEGIN;
BEGIN
1: INSERT INTO t_lockmods_part_tbl_dml SELECT i, 1, i FROM generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
-- With GDD enabled, QD will only hold lock on root for insert
1: select * from show_locks_lockmodes;
locktype | mode | granted | relation
@@ -2214,14 +2214,37 @@ INSERT 10
ROLLBACK
1q: ...
+1: CREATE TABLE t_lockmods_aopart(i int, t text) USING ao_row PARTITION BY RANGE(i) (START(1) END(5) EVERY(1));
+CREATE TABLE
+1: BEGIN;
+BEGIN
+1: DELETE FROM t_lockmods_aopart WHERE i = 4;
+DELETE 0
+-- With GDD enabled, QD will only hold lock on root for delete
+1: select * from show_locks_lockmodes;
+ locktype | mode | granted | relation
+----------+-----------------+---------+---------------------------
+ relation | AccessShareLock | t | t_lockmods_aopart_1_prt_4
+ relation | AccessShareLock | t | t_lockmods_aopart_1_prt_3
+ relation | AccessShareLock | t | t_lockmods_aopart_1_prt_2
+ relation | AccessShareLock | t | t_lockmods_aopart_1_prt_1
+ relation | AccessShareLock | t | t_lockmods_aopart
+ relation | ExclusiveLock | t | t_lockmods_aopart
+(6 rows)
+1: COMMIT;
+COMMIT
+1: DROP TABLE t_lockmods_aopart;
+DROP TABLE
+1q: ...
+
-- 2.8 Verify behaviors of select with locking clause (i.e. select for update)
-- when running concurrently with index creation, for Heap tables.
-- For AO/CO tables, refer to create_index_allows_readonly.source.
1: CREATE TABLE create_index_select_for_update_tbl(a int, b int);
-CREATE
+CREATE TABLE
1: INSERT INTO create_index_select_for_update_tbl SELECT i,i FROM generate_series(1,10)i;
-INSERT 10
+INSERT 0 10
1: set optimizer = off;
SET
@@ -2247,7 +2270,7 @@ SET
BEGIN
-- expect no blocking
2: CREATE INDEX create_index_select_for_update_idx ON create_index_select_for_update_tbl(a);
-CREATE
+CREATE INDEX
2: COMMIT;
COMMIT
@@ -2255,12 +2278,12 @@ COMMIT
COMMIT
2: DROP INDEX create_index_select_for_update_idx;
-DROP
+DROP INDEX
2: BEGIN;
BEGIN
2: CREATE INDEX create_index_select_for_update_idx ON create_index_select_for_update_tbl(a);
-CREATE
+CREATE INDEX
1: BEGIN;
BEGIN
@@ -2279,12 +2302,12 @@ COMMIT
COMMIT
2: DROP INDEX create_index_select_for_update_idx;
-DROP
+DROP INDEX
-- 2.8.2 with GDD disabled, expect blocking
-- reset gdd
2: ALTER SYSTEM RESET gp_enable_global_deadlock_detector;
-ALTER
+ALTER SYSTEM
-- close session to avoid renew session failure after restart
2q: ...
1U:SELECT pg_ctl(dir, 'restart') from lockmodes_datadir;
@@ -2321,17 +2344,17 @@ BEGIN
COMMIT
2<: <... completed>
-CREATE
+CREATE INDEX
2: COMMIT;
COMMIT
2: DROP INDEX create_index_select_for_update_idx;
-DROP
+DROP INDEX
2: BEGIN;
BEGIN
2: CREATE INDEX create_index_select_for_update_idx ON create_index_select_for_update_tbl(a);
-CREATE
+CREATE INDEX
1: BEGIN;
BEGIN
@@ -2350,7 +2373,7 @@ COMMIT
COMMIT
1: drop table lockmodes_datadir;
-DROP
+DROP TABLE
1q: ...
2q: ...
@@ -2359,13 +2382,13 @@ DROP
-- require an AccessExclusiveLock.
-- Case 1. The analyze result is expected when there's concurrent drop on child.
1:create table analyzedrop(a int) partition by range(a);
-CREATE
+CREATE TABLE
1:create table analyzedrop_1 partition of analyzedrop for values from (0) to (10);
-CREATE
+CREATE TABLE
1:create table analyzedrop_2 partition of analyzedrop for values from (10) to (20);
-CREATE
+CREATE TABLE
1:insert into analyzedrop select * from generate_series(0,19);
-INSERT 20
+INSERT 0 20
1:select gp_inject_fault_infinite('merge_leaf_stats_after_find_children', 'suspend', dbid) from gp_segment_configuration where content = -1 and role = 'p';
gp_inject_fault_infinite
--------------------------
@@ -2381,7 +2404,7 @@ INSERT 20
1<: <... completed>
ANALYZE
2<: <... completed>
-DROP
+DROP TABLE
3:select * from pg_stats where tablename like 'analyzedrop%';
schemaname | tablename | attname | inherited | null_frac | avg_width | n_distinct | most_common_vals | most_common_freqs | histogram_bounds | correlation | most_common_elems | most_common_elem_freqs | elem_count_histogram
------------+---------------+---------+-----------+-----------+-----------+------------+------------------+-------------------+--------------------------------------------------+-------------+-------------------+------------------------+----------------------
@@ -2405,9 +2428,9 @@ DROP
1<: <... completed>
ANALYZE
2<: <... completed>
-DROP
+DROP TABLE
3<: <... completed>
-DROP
+DROP TABLE
--empty as table is dropped
4:select * from pg_stats where tablename like 'analyzedrop%';
schemaname | tablename | attname | inherited | null_frac | avg_width | n_distinct | most_common_vals | most_common_freqs | histogram_bounds | correlation | most_common_elems | most_common_elem_freqs | elem_count_histogram
diff --git a/src/test/isolation2/expected/mark_all_aoseg_await_drop.out b/src/test/isolation2/expected/mark_all_aoseg_await_drop.out
index 2bdc65236f8..77c1b98655c 100644
--- a/src/test/isolation2/expected/mark_all_aoseg_await_drop.out
+++ b/src/test/isolation2/expected/mark_all_aoseg_await_drop.out
@@ -3,7 +3,7 @@
-- AOSEG_STATE_AWAITING_DROP.
CREATE TABLE mark_all_aoseg_await_drop (a int) WITH (appendonly=true);
-CREATE
+CREATE TABLE
-- Create 3 aoseg entries
0: BEGIN;
@@ -13,11 +13,11 @@ BEGIN
2: BEGIN;
BEGIN
0: INSERT INTO mark_all_aoseg_await_drop SELECT i FROM generate_series(1, 10)i;
-INSERT 10
+INSERT 0 10
1: INSERT INTO mark_all_aoseg_await_drop SELECT i FROM generate_series(1, 10)i;
-INSERT 10
+INSERT 0 10
2: INSERT INTO mark_all_aoseg_await_drop SELECT i FROM generate_series(1, 10)i;
-INSERT 10
+INSERT 0 10
0: COMMIT;
COMMIT
1: COMMIT;
@@ -46,11 +46,11 @@ BEGIN
0
(1 row)
1: INSERT INTO mark_all_aoseg_await_drop SELECT i FROM generate_series(1, 10)i;
-INSERT 10
+INSERT 0 10
2: VACUUM mark_all_aoseg_await_drop;
VACUUM
1: END;
-END
+COMMIT
-- We should see segno 2 and 3 in state 2 (AOSEG_STATE_AWAITING_DROP)
-- and segno 1 and 4 in state 1 (AOSEG_STATE_DEFAULT). Segno 1 is not
diff --git a/src/test/isolation2/expected/misc.out b/src/test/isolation2/expected/misc.out
index c342e1ad5cf..bf65cf90c4a 100644
--- a/src/test/isolation2/expected/misc.out
+++ b/src/test/isolation2/expected/misc.out
@@ -5,11 +5,11 @@
-- preassigned in QD, if we create a table in utility mode in QE, the oid might
-- conflict with preassigned oid.
-1U: create table utilitymode_primary_key_tab (c1 int);
-CREATE
+CREATE TABLE
-1U: create unique index idx_utilitymode_c1 on utilitymode_primary_key_tab (c1);
-CREATE
+CREATE INDEX
-1U: drop table utilitymode_primary_key_tab;
-DROP
+DROP TABLE
-- Try a few queries in utility mode. (Once upon a time, there was a bug that
-- caused a crash on EXPLAIN ANALYZE on a Sort node in utility mode.)
@@ -62,7 +62,7 @@ ROLLBACK
--
-- We have changed the name to pg_temp_0 in utility mode.
0U: CREATE TEMP TABLE utilitymode_tmp_tab (c1 int) DISTRIBUTED BY (c1);
-CREATE
+CREATE TABLE
0U: SELECT substring(n.nspname FROM 1 FOR 9) FROM pg_namespace n JOIN pg_class c ON n.oid = c.relnamespace WHERE c.relname = 'utilitymode_tmp_tab';
substring
-----------
@@ -78,7 +78,7 @@ CREATE
-- gp_dist_random('') should not crash in utility mode
--
create or replace view misc_v as select 1;
-CREATE
+CREATE VIEW
0U: select 1 from gp_dist_random('misc_v') union select 1 from misc_v;
?column?
----------
@@ -91,12 +91,12 @@ CREATE
(1 row)
-- But views created in utility mode should not throw away gp_dist_random
0U: create or replace view misc_v2 as select 1 from gp_dist_random('pg_class');
-CREATE
+CREATE VIEW
0U: select definition from pg_views where viewname = 'misc_v2';
definition
-------------------------------------------------------------
- FROM gp_dist_random('pg_class');
SELECT 1 AS "?column?"
+ FROM gp_dist_random('pg_class');
(1 row)
0U: select count(*) > 0 from gp_dist_random('misc_v2');
?column?
@@ -104,7 +104,7 @@ CREATE
t
(1 row)
0U: drop view misc_v2;
-DROP
+DROP VIEW
drop view misc_v;
DROP
diff --git a/src/test/isolation2/expected/modify_table_data_corrupt.out b/src/test/isolation2/expected/modify_table_data_corrupt.out
index e5fc04f36f0..3af5476081e 100644
--- a/src/test/isolation2/expected/modify_table_data_corrupt.out
+++ b/src/test/isolation2/expected/modify_table_data_corrupt.out
@@ -24,18 +24,18 @@ DROP
-- and see if it is motioned to other segments.
create table tab1(a int, b int) distributed by (b);
-CREATE
+CREATE TABLE
create table tab2(a int, b int) distributed by (a);
-CREATE
+CREATE TABLE
create table tab3 (a int, b int) distributed by (b);
-CREATE
+CREATE TABLE
insert into tab1 values (1, 1);
-INSERT 1
+INSERT 0 1
insert into tab2 values (1, 1);
-INSERT 1
+INSERT 0 1
insert into tab3 values (1, 1);
-INSERT 1
+INSERT 0 1
analyze tab1;
ANALYZE
@@ -56,7 +56,7 @@ update pg_class set reltuples = 100000 where relname='tab3';
UPDATE 1
0U: insert into tab1 values (1, 1);
-INSERT 1
+INSERT 0 1
select gp_segment_id, * from tab1;
gp_segment_id | a | b
@@ -90,7 +90,7 @@ BEGIN
delete from tab1 using tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.b;
ERROR: distribution key of the tuple (0, 1) doesn't belong to current segment (actually from seg0) (nodeModifyTable.c:735) (seg1 127.0.1.1:7003 pid=89508) (nodeModifyTable.c:735)
abort;
-ABORT
+ROLLBACK
-- For planner, this will error out
explain (costs off) update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.b;
@@ -117,7 +117,7 @@ BEGIN
update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.b;
ERROR: distribution key of the tuple (0, 1) doesn't belong to current segment (actually from seg0) (nodeModifyTable.c:1276) (seg1 127.0.1.1:7003 pid=89508) (nodeModifyTable.c:1276)
abort;
-ABORT
+ROLLBACK
-- For orca, this will error out
explain (costs off) delete from tab1 using tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.a;
@@ -144,7 +144,7 @@ BEGIN
delete from tab1 using tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.a;
DELETE 2
abort;
-ABORT
+ROLLBACK
-- For orca, this will error out
explain (costs off) update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.a;
@@ -171,7 +171,7 @@ BEGIN
update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.a;
UPDATE 2
abort;
-ABORT
+ROLLBACK
-- test splitupdate.
-- For orca, the plan contains a redistribute motion, so that
@@ -193,11 +193,11 @@ BEGIN
update tab1 set b = b + 1;
UPDATE 2
abort;
-ABORT
+ROLLBACK
drop table tab1;
-DROP
+DROP TABLE
drop table tab2;
-DROP
+DROP TABLE
drop table tab3;
-DROP
+DROP TABLE
diff --git a/src/test/isolation2/expected/modify_table_data_corrupt_optimizer.out b/src/test/isolation2/expected/modify_table_data_corrupt_optimizer.out
index 2a647497cb0..b60bb24315b 100644
--- a/src/test/isolation2/expected/modify_table_data_corrupt_optimizer.out
+++ b/src/test/isolation2/expected/modify_table_data_corrupt_optimizer.out
@@ -24,18 +24,18 @@ ERROR: table "tab3" does not exist
-- and see if it is motioned to other segments.
create table tab1(a int, b int) distributed by (b);
-CREATE
+CREATE TABLE
create table tab2(a int, b int) distributed by (a);
-CREATE
+CREATE TABLE
create table tab3 (a int, b int) distributed by (b);
-CREATE
+CREATE TABLE
insert into tab1 values (1, 1);
-INSERT 1
+INSERT 0 1
insert into tab2 values (1, 1);
-INSERT 1
+INSERT 0 1
insert into tab3 values (1, 1);
-INSERT 1
+INSERT 0 1
analyze tab1;
ANALYZE
@@ -56,7 +56,7 @@ update pg_class set reltuples = 100000 where relname='tab3';
UPDATE 1
0U: insert into tab1 values (1, 1);
-INSERT 1
+INSERT 0 1
select gp_segment_id, * from tab1;
gp_segment_id | a | b
@@ -90,7 +90,7 @@ BEGIN
delete from tab1 using tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.b;
DELETE 1
abort;
-ABORT
+ROLLBACK
-- For planner, this will error out
explain (costs off) update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.b;
@@ -110,14 +110,14 @@ explain (costs off) update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2.
-> Seq Scan on tab3
-> Hash
-> Seq Scan on tab1
- Optimizer: Pivotal Optimizer (GPORCA)
+ Optimizer: Pivotal Optimizer (GPORCA) version 3.86.0
(15 rows)
begin;
BEGIN
update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.b;
UPDATE 1
abort;
-ABORT
+ROLLBACK
-- For orca, this will error out
explain (costs off) delete from tab1 using tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.a;
@@ -145,7 +145,7 @@ BEGIN
delete from tab1 using tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.a;
ERROR: distribution key of the tuple (0, 1) doesn't belong to current segment (actually from seg0) (nodeModifyTable.c:1156) (seg1 172.17.0.2:7003 pid=30251) (nodeModifyTable.c:1156)
abort;
-ABORT
+ROLLBACK
-- For orca, this will error out
explain (costs off) update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.a;
@@ -166,14 +166,14 @@ explain (costs off) update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2.
-> Hash
-> Broadcast Motion 3:3 (slice3; segments: 3)
-> Seq Scan on tab1
- Optimizer: Pivotal Optimizer (GPORCA)
+ Optimizer: Pivotal Optimizer (GPORCA) version 3.86.0
(16 rows)
begin;
BEGIN
update tab1 set a = 999 from tab2, tab3 where tab1.a = tab2.a and tab1.b = tab3.a;
ERROR: distribution key of the tuple (0, 1) doesn't belong to current segment (actually from seg0) (nodeModifyTable.c:1156) (seg1 172.17.0.2:7003 pid=30251) (nodeModifyTable.c:1156)
abort;
-ABORT
+ROLLBACK
-- test splitupdate.
-- For orca, the plan contains a redistribute motion, so that
@@ -199,11 +199,11 @@ BEGIN
update tab1 set b = b + 1;
ERROR: distribution key of the tuple (0, 1) doesn't belong to current segment (actually from seg0) (nodeModifyTable.c:1156) (seg1 172.17.0.2:7003 pid=30251) (nodeModifyTable.c:1156)
abort;
-ABORT
+ROLLBACK
drop table tab1;
-DROP
+DROP TABLE
drop table tab2;
-DROP
+DROP TABLE
drop table tab3;
-DROP
+DROP TABLE
diff --git a/src/test/isolation2/expected/orphan_temp_table.out b/src/test/isolation2/expected/orphan_temp_table.out
index 85cbe165008..a47a7642748 100644
--- a/src/test/isolation2/expected/orphan_temp_table.out
+++ b/src/test/isolation2/expected/orphan_temp_table.out
@@ -3,7 +3,7 @@
-- case 1: Before the fix, when backend process panic on the segment, the temp table will be left on the coordinator.
-- create a temp table
1: CREATE TEMP TABLE test_temp_table_cleanup(a int);
-CREATE
+CREATE TABLE
-- panic on segment 0
1: SELECT gp_inject_fault('before_exec_scan', 'panic', dbid) FROM gp_segment_configuration WHERE role='p' AND content = 0;
diff --git a/src/test/isolation2/expected/pg_rewind_fail_missing_xlog.out b/src/test/isolation2/expected/pg_rewind_fail_missing_xlog.out
index 845dc6be0f7..9520545d094 100644
--- a/src/test/isolation2/expected/pg_rewind_fail_missing_xlog.out
+++ b/src/test/isolation2/expected/pg_rewind_fail_missing_xlog.out
@@ -9,9 +9,9 @@ CREATE OR REPLACE FUNCTION connectSeg(n int, port int, hostname text) RETURNS bo
CREATE
CREATE TABLE tst_missing_tbl (a int);
-CREATE
+CREATE TABLE
INSERT INTO tst_missing_tbl values(2),(1),(5);
-INSERT 3
+INSERT 0 3
-- Make the test faster by not preserving any extra wal segment files
!\retcode gpconfig -c wal_keep_size -v 0;
@@ -42,7 +42,7 @@ CHECKPOINT
t
(1 row)
1: INSERT INTO tst_missing_tbl values(2),(1),(5);
-INSERT 3
+INSERT 0 3
-- Should be not needed mostly but let's 100% ensure since pg_switch_wal()
-- won't switch if it has been on the boundary (seldom though).
0U: SELECT pg_switch_wal is not null FROM pg_switch_wal();
@@ -51,7 +51,7 @@ INSERT 3
t
(1 row)
1: INSERT INTO tst_missing_tbl values(2),(1),(5);
-INSERT 3
+INSERT 0 3
0Uq: ...
-- Make sure primary/mirror pair is in sync, otherwise FTS can't promote mirror
@@ -87,9 +87,9 @@ CHECKPOINT
0M: BEGIN;
BEGIN
0M: DROP TABLE tst_missing_tbl;
-DROP
+DROP TABLE
0M: ABORT;
-ABORT
+ROLLBACK
0M: CHECKPOINT;
CHECKPOINT
0Mq: ...
@@ -99,7 +99,7 @@ CHECKPOINT
-- know that a wal divergence is explicitly triggered and 100% completed. Also
-- sanity check the tuple distribution (assumption of the test).
2: INSERT INTO tst_missing_tbl values(2),(1),(5);
-INSERT 3
+INSERT 0 3
2: SELECT gp_segment_id, count(*) from tst_missing_tbl group by gp_segment_id;
gp_segment_id | count
---------------+-------
@@ -147,21 +147,21 @@ CHECKPOINT
t
(1 row)
3: INSERT INTO tst_missing_tbl values(2),(1),(5);
-INSERT 3
+INSERT 0 3
1U: SELECT pg_switch_wal is not null FROM pg_switch_wal();
?column?
----------
t
(1 row)
3: INSERT INTO tst_missing_tbl values(2),(1),(5);
-INSERT 3
+INSERT 0 3
1U: SELECT pg_switch_wal is not null FROM pg_switch_wal();
?column?
----------
t
(1 row)
3: INSERT INTO tst_missing_tbl values(2),(1),(5);
-INSERT 3
+INSERT 0 3
-- Should be not needed mostly but let's 100% ensure since pg_switch_wal()
-- won't switch if it is on the boundary already (seldom though).
1U: SELECT pg_switch_wal is not null FROM pg_switch_wal();
@@ -170,7 +170,7 @@ INSERT 3
t
(1 row)
3: INSERT INTO tst_missing_tbl values(2),(1),(5);
-INSERT 3
+INSERT 0 3
-- Hang at checkpointer before writing checkpoint xlog.
3: SELECT gp_inject_fault('checkpoint_after_redo_calculated', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content = 1;
@@ -226,7 +226,7 @@ INSERT 3
(2 rows)
4: INSERT INTO tst_missing_tbl values(2),(1),(5);
-INSERT 3
+INSERT 0 3
4: SELECT gp_segment_id, count(*) from tst_missing_tbl group by gp_segment_id;
gp_segment_id | count
---------------+-------
@@ -284,7 +284,7 @@ CHECKPOINT
t
(1 row)
1: INSERT INTO tst_missing_tbl values(2),(1),(5);
-INSERT 3
+INSERT 0 3
-- Should be not needed mostly but let's 100% ensure since pg_switch_wal()
-- won't switch if it has been on the boundary (seldom though).
0U: SELECT pg_switch_wal is not null FROM pg_switch_wal();
@@ -293,7 +293,7 @@ INSERT 3
t
(1 row)
1: INSERT INTO tst_missing_tbl values(2),(1),(5);
-INSERT 3
+INSERT 0 3
0Uq: ...
-- Make sure primary/mirror pair is in sync, otherwise FTS can't promote mirror
@@ -327,17 +327,17 @@ INSERT 3
0M: BEGIN;
BEGIN
0M: DROP TABLE tst_missing_tbl;
-DROP
+DROP TABLE
0M: ABORT;
-ABORT
+ROLLBACK
0M: CHECKPOINT;
CHECKPOINT
0M: BEGIN;
BEGIN
0M: DROP TABLE tst_missing_tbl;
-DROP
+DROP TABLE
0M: ABORT;
-ABORT
+ROLLBACK
0M: CHECKPOINT;
CHECKPOINT
@@ -368,7 +368,7 @@ CHECKPOINT
-- know that a wal divergence is explicitly triggered and 100% completed. Also
-- sanity check the tuple distribution (assumption of the test).
2: INSERT INTO tst_missing_tbl values(2),(1),(5);
-INSERT 3
+INSERT 0 3
2: SELECT gp_segment_id, count(*) from tst_missing_tbl group by gp_segment_id;
gp_segment_id | count
---------------+-------
@@ -416,7 +416,7 @@ CHECKPOINT
t
(1 row)
3: INSERT INTO tst_missing_tbl values(2),(1),(5);
-INSERT 3
+INSERT 0 3
-- Should be not needed mostly but let's 100% ensure since pg_switch_wal()
-- won't switch if it is on the boundary already (seldom though).
1U: SELECT pg_switch_wal is not null FROM pg_switch_wal();
@@ -425,7 +425,7 @@ INSERT 3
t
(1 row)
3: INSERT INTO tst_missing_tbl values(2),(1),(5);
-INSERT 3
+INSERT 0 3
-- Have primary/mirror pair in sync before suspending the wal sender.
3: SELECT wait_until_all_segments_synchronized();
@@ -497,7 +497,7 @@ INSERT 3
-- Write something on the current primary
4: INSERT INTO tst_missing_tbl values(2),(1),(5);
-INSERT 3
+INSERT 0 3
4: SELECT gp_segment_id, count(*) from tst_missing_tbl group by gp_segment_id;
gp_segment_id | count
---------------+-------
@@ -550,9 +550,9 @@ server closed the connection unexpectedly
-- Create an unlogged table on the primary that remembers replication slot's last restart_lsn and number of WAL files.
1U: CREATE UNLOGGED TABLE unlogged_wal_retention_test(restart_lsn_before pg_lsn, wal_count_before int);
-CREATE
+CREATE TABLE
1U: INSERT INTO unlogged_wal_retention_test SELECT (select restart_lsn FROM pg_replication_slots WHERE slot_name = 'internal_wal_replication_slot') as restart_lsn_before, (select count(*) from pg_ls_waldir()) as wal_count_before;
-INSERT 1
+INSERT 0 1
5: CHECKPOINT;
CHECKPOINT
-- Replication slot's restart_lsn should advance to the checkpoint's redo location.
@@ -582,7 +582,7 @@ UPDATE 1
t
(1 row)
5: INSERT INTO tst_missing_tbl values(2),(1),(5);
-INSERT 3
+INSERT 0 3
-- Replication slot's restart_lsn should NOT change regardless mirror has received more wals.
1U: select pg_wal_lsn_diff(restart_lsn, restart_lsn_before) = 0 FROM pg_replication_slots, unlogged_wal_retention_test WHERE slot_name = 'internal_wal_replication_slot';
?column?
@@ -624,7 +624,7 @@ CHECKPOINT
(1 row)
-- Perform transaction to make sure wals are in sync.
5: INSERT INTO tst_missing_tbl values(2),(1),(5);
-INSERT 3
+INSERT 0 3
-- Replication slot's restart_lsn should now advance to the checkpoint's redo location.
1U: SELECT pg_wal_lsn_diff(restart_lsn, restart_lsn_before) > 0 from pg_replication_slots, unlogged_wal_retention_test WHERE slot_name = 'internal_wal_replication_slot';
?column?
@@ -648,13 +648,11 @@ CHECKPOINT
-- Cleanup
1U: DROP TABLE unlogged_wal_retention_test;
-DROP
+DROP TABLE
1Uq: ...
5: DROP TABLE tst_missing_tbl;
-DROP
-5: DROP FUNCTION connectSeg;
-DROP
+DROP TABLE
!\retcode gpconfig -r wal_keep_size;
(exited with code 0)
!\retcode gpconfig -r wal_recycle;
diff --git a/src/test/isolation2/expected/pg_terminate_backend.out b/src/test/isolation2/expected/pg_terminate_backend.out
index d3bb0b2ad2b..fc1e6f1d42c 100644
--- a/src/test/isolation2/expected/pg_terminate_backend.out
+++ b/src/test/isolation2/expected/pg_terminate_backend.out
@@ -1,5 +1,5 @@
1:create table terminate_backend_t (a int) distributed by (a);
-CREATE
+CREATE TABLE
-- fault on seg1 to block insert command into terminate_backend_t table
select gp_inject_fault('heap_insert', 'infinite_loop', '', '', 'terminate_backend_t', 1, 1, 0, dbid) from gp_segment_configuration where content = 1 and role = 'p';
diff --git a/src/test/isolation2/expected/pg_views_concurrent_drop.out b/src/test/isolation2/expected/pg_views_concurrent_drop.out
index aa3c86fc664..a2187664711 100644
--- a/src/test/isolation2/expected/pg_views_concurrent_drop.out
+++ b/src/test/isolation2/expected/pg_views_concurrent_drop.out
@@ -1,7 +1,7 @@
1:drop view if exists concurrent_drop_view cascade;
-DROP
+DROP VIEW
1:create view concurrent_drop_view as select * from pg_class;
-CREATE
+CREATE VIEW
1:select viewname from pg_views where viewname = 'concurrent_drop_view';
viewname
----------------------
@@ -19,7 +19,7 @@ CREATE
1:begin;
BEGIN
1:drop view concurrent_drop_view;
-DROP
+DROP VIEW
2&:select viewname, definition from pg_views where viewname = 'concurrent_drop_view';
-- wait till halts for AccessShareLock on QD
3: SELECT wait_until_waiting_for_required_lock('concurrent_drop_view', 'AccessShareLock', -1);
diff --git a/src/test/isolation2/expected/prepare_limit.out b/src/test/isolation2/expected/prepare_limit.out
index c9479617b27..8e188a57252 100644
--- a/src/test/isolation2/expected/prepare_limit.out
+++ b/src/test/isolation2/expected/prepare_limit.out
@@ -18,13 +18,13 @@
(exited with code 0)
5: create table prepare_limit1 (a int);
-CREATE
+CREATE TABLE
5: create table prepare_limit2 (a int);
-CREATE
+CREATE TABLE
5: create table prepare_limit3 (a int);
-CREATE
+CREATE TABLE
5: create table prepare_limit4 (a int);
-CREATE
+CREATE TABLE
5: select gp_inject_fault_infinite('dtm_before_insert_forget_comitted', 'suspend', 1);
gp_inject_fault_infinite
@@ -64,13 +64,13 @@ CREATE
Success:
(1 row)
1<: <... completed>
-INSERT 1
+INSERT 0 1
2<: <... completed>
-INSERT 1
+INSERT 0 1
3<: <... completed>
-INSERT 1
+INSERT 0 1
4<: <... completed>
-INSERT 1
+INSERT 0 1
-- verify that standby is correctly wal streaming.
5: select state from pg_stat_replication;
@@ -104,13 +104,13 @@ INSERT 1
-- cleanup
5: drop table prepare_limit1;
-DROP
+DROP TABLE
5: drop table prepare_limit2;
-DROP
+DROP TABLE
5: drop table prepare_limit3;
-DROP
+DROP TABLE
5: drop table prepare_limit4;
-DROP
+DROP TABLE
-- Not using gpconfig -r, else it makes max_prepared_transactions be default
-- (50) and some isolation2 tests will fail due to "too many clients". Hardcode
diff --git a/src/test/isolation2/expected/prepared_xact_deadlock_pg_rewind.out b/src/test/isolation2/expected/prepared_xact_deadlock_pg_rewind.out
index cb59647769e..56838af0657 100644
--- a/src/test/isolation2/expected/prepared_xact_deadlock_pg_rewind.out
+++ b/src/test/isolation2/expected/prepared_xact_deadlock_pg_rewind.out
@@ -5,9 +5,9 @@
-- start_ignore
-- set GUCs to speed-up the test
1: alter system set gp_fts_probe_retries to 2;
-ALTER
+ALTER SYSTEM
1: alter system set gp_fts_probe_timeout to 5;
-ALTER
+ALTER SYSTEM
1: select pg_reload_conf();
pg_reload_conf
----------------
@@ -109,9 +109,9 @@ select wait_until_all_segments_synchronized();
-- start_ignore
-- reset fts GUCs.
3: alter system reset gp_fts_probe_retries;
-ALTER
+ALTER SYSTEM
3: alter system reset gp_fts_probe_timeout;
-ALTER
+ALTER SYSTEM
3: select pg_reload_conf();
pg_reload_conf
----------------
diff --git a/src/test/isolation2/expected/prevent_ao_wal.out b/src/test/isolation2/expected/prevent_ao_wal.out
index 314d5b20440..6b76c11c8c8 100644
--- a/src/test/isolation2/expected/prevent_ao_wal.out
+++ b/src/test/isolation2/expected/prevent_ao_wal.out
@@ -23,9 +23,9 @@ GP_IGNORE: defined new match expression
-- Create tables (AO, AOCO)
-1U: CREATE TABLE ao_foo (n int) WITH (appendonly=true);
-CREATE
+CREATE TABLE
-1U: CREATE TABLE aoco_foo (n int, m int) WITH (appendonly=true, orientation=column);
-CREATE
+CREATE TABLE
-- Switch WAL file
-1U: SELECT true FROM pg_switch_wal();
@@ -35,10 +35,10 @@ CREATE
(1 row)
-- Insert data (AO)
-1U: INSERT INTO ao_foo SELECT generate_series(1,10);
-INSERT 10
+INSERT 0 10
-- Insert data (AOCO)
-1U: INSERT INTO aoco_foo SELECT generate_series(1,10), generate_series(1,10);
-INSERT 10
+INSERT 0 10
-- Delete data and run vacuum (AO)
-1U: DELETE FROM ao_foo WHERE n > 5;
DELETE 5
@@ -93,10 +93,10 @@ rmgr: Appendonly len (rec/tot): 50/ 50, tx: ##, lsn: #/########, prev #/
(1 row)
-- Insert data (AO)
-1U: INSERT INTO ao_foo SELECT generate_series(1,10);
-INSERT 10
+INSERT 0 10
-- Insert data (AOCO)
-1U: INSERT INTO aoco_foo SELECT generate_series(1,10), generate_series(1,10);
-INSERT 10
+INSERT 0 10
-- Delete data and run vacuum (AO)
-1U: DELETE FROM ao_foo WHERE n > 5;
DELETE 5
@@ -113,9 +113,9 @@ VACUUM
-1U: DROP TABLE ao_foo;
-DROP
+DROP TABLE
-1U: DROP TABLE aoco_foo;
-DROP
+DROP TABLE
-- Reset wal_level
!\retcode gpconfig -r wal_level --masteronly;
diff --git a/src/test/isolation2/expected/reader_waits_for_lock.out b/src/test/isolation2/expected/reader_waits_for_lock.out
index df9cf7de46f..98c8763ef00 100644
--- a/src/test/isolation2/expected/reader_waits_for_lock.out
+++ b/src/test/isolation2/expected/reader_waits_for_lock.out
@@ -4,17 +4,17 @@
-- setup
CREATE or REPLACE FUNCTION check_readers_are_blocked () RETURNS bool AS $$ declare retries int; /* in func */ begin retries := 1200; /* in func */ loop if (SELECT count(*) > 0 as reader_waits from pg_locks l join pg_stat_activity a on a.pid = l.pid and a.query like '%reader_waits_for_lock_table%' and not a.pid = pg_backend_pid() and l.granted = false and l.mppiswriter = false) then return true; /* in func */ end if; /* in func */ if retries <= 0 then return false; /* in func */ end if; /* in func */ perform pg_sleep(0.1); /* in func */ perform pg_stat_clear_snapshot(); /* in func */ retries := retries - 1; /* in func */ end loop; /* in func */ end; /* in func */ $$ language plpgsql;
-CREATE
+CREATE FUNCTION
1: create table reader_waits_for_lock_table(a int, b int) distributed by (a);
-CREATE
+CREATE TABLE
1: insert into reader_waits_for_lock_table select 1, 1;
-INSERT 1
+INSERT 0 1
-- Aquire a conflicting lock in utility mode on seg0.
0U: BEGIN;
BEGIN
0U: LOCK reader_waits_for_lock_table IN ACCESS EXCLUSIVE MODE;
-LOCK
+LOCK TABLE
-- A utility mode connection should not have valid gp_session_id, else
-- locks aquired by it may not confict with locks requested by a
-- normal mode backend.
diff --git a/src/test/isolation2/expected/reindex.out b/src/test/isolation2/expected/reindex.out
index a9eac24f660..b166a6ad591 100644
--- a/src/test/isolation2/expected/reindex.out
+++ b/src/test/isolation2/expected/reindex.out
@@ -1,6 +1,6 @@
-- Dropping table while reindex database should not fail reindex
CREATE DATABASE reindexdb1 TEMPLATE template1;
-CREATE
+CREATE DATABASE
-- halt reindex after scanning the pg_class and getting the relids
SELECT gp_inject_fault_infinite('reindex_db', 'suspend', 1);
gp_inject_fault_infinite
@@ -8,7 +8,7 @@ SELECT gp_inject_fault_infinite('reindex_db', 'suspend', 1);
Success:
(1 row)
1:@db_name reindexdb1: CREATE TABLE heap1(a INT, b INT);
-CREATE
+CREATE TABLE
1&:REINDEX DATABASE reindexdb1;
SELECT gp_wait_until_triggered_fault('reindex_db', 1, 1);
gp_wait_until_triggered_fault
@@ -16,7 +16,7 @@ SELECT gp_wait_until_triggered_fault('reindex_db', 1, 1);
Success:
(1 row)
2:@db_name reindexdb1:DROP TABLE heap1;
-DROP
+DROP TABLE
SELECT gp_inject_fault('reindex_db', 'reset', 1);
gp_inject_fault
-----------------
@@ -33,11 +33,11 @@ REINDEX
BEGIN;
BEGIN
CREATE TABLE reindex_index1(a int, b int);
-CREATE
+CREATE TABLE
CREATE INDEX reindex_index1_idx1 on reindex_index1 (b);
-CREATE
+CREATE INDEX
insert into reindex_index1 select i,i+1 from generate_series(1, 10)i;
-INSERT 10
+INSERT 0 10
COMMIT;
COMMIT
SELECT gp_inject_fault_infinite('reindex_relation', 'suspend', 1);
@@ -53,7 +53,7 @@ SELECT gp_wait_until_triggered_fault('reindex_relation', 1, 1);
(1 row)
-- create one more index
CREATE INDEX reindex_index1_idx2 on reindex_index1 (a);
-CREATE
+CREATE INDEX
SELECT gp_inject_fault('reindex_relation', 'reset', 1);
gp_inject_fault
-----------------
@@ -63,4 +63,4 @@ SELECT gp_inject_fault('reindex_relation', 'reset', 1);
REINDEX
DROP DATABASE reindexdb1;
-DROP
+DROP DATABASE
diff --git a/src/test/isolation2/expected/reindex/abort_reindex.out b/src/test/isolation2/expected/reindex/abort_reindex.out
index ad5c7d1cef2..3a8e2a73701 100644
--- a/src/test/isolation2/expected/reindex/abort_reindex.out
+++ b/src/test/isolation2/expected/reindex/abort_reindex.out
@@ -1,18 +1,18 @@
DROP TABLE IF EXISTS reindex_abort_ao;
-DROP
+DROP TABLE
CREATE TABLE reindex_abort_ao (a INT) WITH (appendonly=true, orientation=column);
-CREATE
+CREATE TABLE
insert into reindex_abort_ao select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
create index idx_btree_reindex_abort_ao on reindex_abort_ao(a);
-CREATE
+CREATE INDEX
-- start_ignore
drop table if exists reindex_abort_ao_old;
-DROP
+DROP TABLE
create table reindex_abort_ao_old as (select oid as c_oid, gp_segment_id as c_gp_segment_id, relfilenode as c_relfilenode from pg_class where relname = 'idx_btree_reindex_abort_ao' union all select oid as c_oid, gp_segment_id as c_gp_segment_id, relfilenode as c_relfilenode from gp_dist_random('pg_class') where relname = 'idx_btree_reindex_abort_ao');
-CREATE 4
+SELECT 4
-- end_ignore
select 1 as have_same_number_of_rows from reindex_abort_ao_old where c_gp_segment_id > -1 group by c_oid having count(*) = (select count(*) from gp_segment_configuration where role = 'p' and content > -1);
diff --git a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_ao_bitmap.out b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_ao_bitmap.out
index ebda60ec248..707f70681c5 100644
--- a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_ao_bitmap.out
+++ b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_ao_bitmap.out
@@ -1,14 +1,14 @@
DROP TABLE IF EXISTS reindex_crtab_ao_bitmap;
-DROP
+DROP TABLE
CREATE TABLE reindex_crtab_ao_bitmap (a INT) WITH (appendonly=true);
-CREATE
+CREATE TABLE
insert into reindex_crtab_ao_bitmap select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
insert into reindex_crtab_ao_bitmap select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
create index idx_reindex_crtab_ao_bitmap on reindex_crtab_ao_bitmap USING bitmap(a);
-CREATE
+CREATE INDEX
select 1 as oid_same_on_all_segs from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_ao_bitmap' group by oid having count(*) = (select count(*) from gp_segment_configuration where role='p' and content > -1);
oid_same_on_all_segs
----------------------
@@ -30,6 +30,8 @@ REINDEX
CREATE
1: COMMIT;
COMMIT
+2<: <... completed>
+CREATE INDEX
2: COMMIT;
COMMIT
3: SELECT 1 AS oid_same_on_all_segs from gp_dist_random('pg_class') WHERE relname = 'idx_reindex_crtab_ao_bitmap' GROUP BY oid having count(*) = (SELECT count(*) FROM gp_segment_configuration WHERE role='p' AND content > -1);
diff --git a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_ao_btree.out b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_ao_btree.out
index 65b18f1147e..d9edc941eec 100644
--- a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_ao_btree.out
+++ b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_ao_btree.out
@@ -1,14 +1,14 @@
DROP TABLE IF EXISTS reindex_crtab_ao_btree;
-DROP
+DROP TABLE
CREATE TABLE reindex_crtab_ao_btree (a INT) WITH (appendonly=true);
-CREATE
+CREATE TABLE
insert into reindex_crtab_ao_btree select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
insert into reindex_crtab_ao_btree select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
create index idx_reindex_crtab_ao_btree on reindex_crtab_ao_btree(a);
-CREATE
+CREATE INDEX
select 1 as oid_same_on_all_segs from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_ao_btree' group by oid having count(*) = (select count(*) from gp_segment_configuration where role='p' and content > -1);
oid_same_on_all_segs
----------------------
@@ -30,6 +30,8 @@ REINDEX
CREATE
1: COMMIT;
COMMIT
+2<: <... completed>
+CREATE INDEX
2: COMMIT;
COMMIT
3: SELECT 1 AS oid_same_on_all_segs from gp_dist_random('pg_class') WHERE relname = 'idx_reindex_crtab_ao_btree' GROUP BY oid having count(*) = (SELECT count(*) FROM gp_segment_configuration WHERE role='p' AND content > -1);
diff --git a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_aoco_bitmap.out b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_aoco_bitmap.out
index 4f7b6f9bd23..5fab7906557 100644
--- a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_aoco_bitmap.out
+++ b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_aoco_bitmap.out
@@ -1,14 +1,14 @@
DROP TABLE IF EXISTS reindex_crtab_aoco_bitmap;
-DROP
+DROP TABLE
CREATE TABLE reindex_crtab_aoco_bitmap (a INT) WITH (appendonly=true, orientation=column);
-CREATE
+CREATE TABLE
insert into reindex_crtab_aoco_bitmap select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
insert into reindex_crtab_aoco_bitmap select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
create index idx_reindex_crtab_aoco_bitmap on reindex_crtab_aoco_bitmap USING bitmap(a);
-CREATE
+CREATE INDEX
select 1 as oid_same_on_all_segs from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_aoco_bitmap' group by oid having count(*) = (select count(*) from gp_segment_configuration where role='p' and content > -1);
oid_same_on_all_segs
----------------------
@@ -30,6 +30,8 @@ REINDEX
CREATE
1: COMMIT;
COMMIT
+2<: <... completed>
+CREATE INDEX
2: COMMIT;
COMMIT
3: SELECT 1 AS oid_same_on_all_segs from gp_dist_random('pg_class') WHERE relname = 'idx_reindex_crtab_aoco_bitmap' GROUP BY oid having count(*) = (SELECT count(*) FROM gp_segment_configuration WHERE role='p' AND content > -1);
diff --git a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_aoco_btree.out b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_aoco_btree.out
index c7a70b9ccd0..639ed4c0553 100644
--- a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_aoco_btree.out
+++ b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_aoco_btree.out
@@ -1,14 +1,14 @@
DROP TABLE IF EXISTS reindex_crtab_aoco_btree;
-DROP
+DROP TABLE
CREATE TABLE reindex_crtab_aoco_btree (a INT) WITH (appendonly=true, orientation=column);
-CREATE
+CREATE TABLE
insert into reindex_crtab_aoco_btree select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
insert into reindex_crtab_aoco_btree select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
create index idx_reindex_crtab_aoco_btree on reindex_crtab_aoco_btree(a);
-CREATE
+CREATE INDEX
select 1 as oid_same_on_all_segs from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_aoco_btree' group by oid having count(*) = (select count(*) from gp_segment_configuration where role='p' and content > -1);
oid_same_on_all_segs
----------------------
@@ -30,6 +30,8 @@ REINDEX
CREATE
1: COMMIT;
COMMIT
+2<: <... completed>
+CREATE INDEX
2: COMMIT;
COMMIT
3: SELECT 1 AS oid_same_on_all_segs from gp_dist_random('pg_class') WHERE relname = 'idx_reindex_crtab_aoco_btree' GROUP BY oid having count(*) = (SELECT count(*) FROM gp_segment_configuration WHERE role='p' AND content > -1);
diff --git a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_heap_bitmap.out b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_heap_bitmap.out
index 15bbec1e6a7..2ae538a7b0c 100644
--- a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_heap_bitmap.out
+++ b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_heap_bitmap.out
@@ -1,14 +1,14 @@
DROP TABLE IF EXISTS reindex_crtab_heap_bitmap;
-DROP
+DROP TABLE
CREATE TABLE reindex_crtab_heap_bitmap (a INT);
-CREATE
+CREATE TABLE
insert into reindex_crtab_heap_bitmap select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
insert into reindex_crtab_heap_bitmap select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
create index idx_reindex_crtab_heap_bitmap on reindex_crtab_heap_bitmap(a);
-CREATE
+CREATE INDEX
select 1 as oid_same_on_all_segs from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_heap_bitmap' group by oid having count(*) = (select count(*) from gp_segment_configuration where role='p' and content > -1);
oid_same_on_all_segs
----------------------
@@ -27,7 +27,7 @@ BEGIN
1: REINDEX index idx_reindex_crtab_heap_bitmap;
REINDEX
2: create index idx_reindex_crtab_heap_bitmap2 on reindex_crtab_heap_bitmap(a);
-CREATE
+CREATE INDEX
1: COMMIT;
COMMIT
2: COMMIT;
diff --git a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_heap_btree.out b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_heap_btree.out
index 7bae7e1d940..a25e65b12ea 100644
--- a/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_heap_btree.out
+++ b/src/test/isolation2/expected/reindex/createidx_while_reindex_idx_heap_btree.out
@@ -1,14 +1,14 @@
DROP TABLE IF EXISTS reindex_crtab_heap_btree;
-DROP
+DROP TABLE
CREATE TABLE reindex_crtab_heap_btree (a INT);
-CREATE
+CREATE TABLE
insert into reindex_crtab_heap_btree select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
insert into reindex_crtab_heap_btree select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
create index idx_reindex_crtab_heap_btree on reindex_crtab_heap_btree(a);
-CREATE
+CREATE INDEX
select 1 as oid_same_on_all_segs from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_heap_btree' group by oid having count(*) = (select count(*) from gp_segment_configuration where role='p' and content > -1);
oid_same_on_all_segs
----------------------
@@ -27,7 +27,7 @@ BEGIN
1: REINDEX index idx_reindex_crtab_heap_btree;
REINDEX
2: create index idx_reindex_crtab_heap_btree2 on reindex_crtab_heap_btree(a);
-CREATE
+CREATE INDEX
1: COMMIT;
COMMIT
2: COMMIT;
diff --git a/src/test/isolation2/expected/reindex/reindextable_while_addpart_ao_part_btree.out b/src/test/isolation2/expected/reindex/reindextable_while_addpart_ao_part_btree.out
index cefd1bf866b..8da2942791f 100644
--- a/src/test/isolation2/expected/reindex/reindextable_while_addpart_ao_part_btree.out
+++ b/src/test/isolation2/expected/reindex/reindextable_while_addpart_ao_part_btree.out
@@ -1,24 +1,24 @@
DROP TABLE IF EXISTS reindex_crtabforadd_part_ao_btree;
-DROP
+DROP TABLE
CREATE TABLE reindex_crtabforadd_part_ao_btree (id int, date date, amt decimal(10,2)) with (appendonly=true) DISTRIBUTED BY (id) PARTITION BY RANGE (date) ( PARTITION sales_Jul13 START (date '2013-07-01') INCLUSIVE , PARTITION sales_Aug13 START (date '2013-08-01') INCLUSIVE , PARTITION sales_Sep13 START (date '2013-09-01') INCLUSIVE END (date '2014-01-01') EXCLUSIVE );
-CREATE
+CREATE TABLE
Insert into reindex_crtabforadd_part_ao_btree select i, to_date('2013-07-'||i, 'YYYY/MM/DD') , 19.21+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
Insert into reindex_crtabforadd_part_ao_btree select i, to_date('2013-08-'||i, 'YYYY/MM/DD') , 9.31+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
Insert into reindex_crtabforadd_part_ao_btree select i, to_date('2013-09-'||i, 'YYYY/MM/DD') , 12.25+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
Insert into reindex_crtabforadd_part_ao_btree select i, to_date('2013-11-'||i, 'YYYY/MM/DD') , 29.51+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
create index on reindex_crtabforadd_part_ao_btree(id);
-CREATE
+CREATE INDEX
-- start_ignore
create table before_reindex_crtabforadd_part_ao_btree as select oid as c_oid, gp_segment_id as c_gp_segment_id, relfilenode as c_relfilenode, relname as c_relname from gp_dist_random('pg_class') where relname like 'reindex_crtabforadd_part_ao_btree%_idx';
-CREATE 12
+SELECT 12
-- end_ignore
select c_relname, 1 as have_same_number_of_rows from before_reindex_crtabforadd_part_ao_btree group by c_oid, c_relname having count(*) = (select count(*) from gp_segment_configuration where role = 'p' and content > -1);
@@ -38,7 +38,7 @@ DELETE 8
1: BEGIN;
BEGIN
1: LOCK reindex_crtabforadd_part_ao_btree IN ACCESS EXCLUSIVE MODE;
-LOCK
+LOCK TABLE
2&: REINDEX TABLE reindex_crtabforadd_part_ao_btree;
3&: alter table reindex_crtabforadd_part_ao_btree add partition p1 START (date '2013-05-01') INCLUSIVE with(appendonly=true);
1: COMMIT;
@@ -46,9 +46,9 @@ COMMIT
2<: <... completed>
REINDEX
3<: <... completed>
-ALTER
+ALTER TABLE
3: Insert into reindex_crtabforadd_part_ao_btree values(9,'2013-05-22',14.22);
-INSERT 1
+INSERT 0 1
3: select count(*) from reindex_crtabforadd_part_ao_btree where id = 29;
count
-------
diff --git a/src/test/isolation2/expected/reindex/reindextable_while_addpart_aoco_part_btree.out b/src/test/isolation2/expected/reindex/reindextable_while_addpart_aoco_part_btree.out
index 89d80f1a9fb..6db2d2d1c52 100644
--- a/src/test/isolation2/expected/reindex/reindextable_while_addpart_aoco_part_btree.out
+++ b/src/test/isolation2/expected/reindex/reindextable_while_addpart_aoco_part_btree.out
@@ -1,24 +1,24 @@
DROP TABLE IF EXISTS reindex_crtabforadd_part_aoco_btree;
-DROP
+DROP TABLE
CREATE TABLE reindex_crtabforadd_part_aoco_btree (id int, date date, amt decimal(10,2)) with (appendonly=true, orientation=column) DISTRIBUTED BY (id) PARTITION BY RANGE (date) ( PARTITION sales_Jul13 START (date '2013-07-01') INCLUSIVE , PARTITION sales_Aug13 START (date '2013-08-01') INCLUSIVE , PARTITION sales_Sep13 START (date '2013-09-01') INCLUSIVE END (date '2014-01-01') EXCLUSIVE );
-CREATE
+CREATE TABLE
Insert into reindex_crtabforadd_part_aoco_btree select i, to_date('2013-07-'||i, 'YYYY/MM/DD') , 19.21+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
Insert into reindex_crtabforadd_part_aoco_btree select i, to_date('2013-08-'||i, 'YYYY/MM/DD') , 9.31+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
Insert into reindex_crtabforadd_part_aoco_btree select i, to_date('2013-09-'||i, 'YYYY/MM/DD') , 12.25+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
Insert into reindex_crtabforadd_part_aoco_btree select i, to_date('2013-11-'||i, 'YYYY/MM/DD') , 29.51+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
create index on reindex_crtabforadd_part_aoco_btree(id);
-CREATE
+CREATE INDEX
-- start_ignore
create table before_reindex_crtabforadd_part_aoco_btree as select oid as c_oid, gp_segment_id as c_gp_segment_id, relfilenode as c_relfilenode, relname as c_relname from gp_dist_random('pg_class') where relname like 'reindex_crtabforadd_part_aoco_btree%_idx';
-CREATE 12
+SELECT 12
-- end_ignore
select c_relname, 1 as have_same_number_of_rows from before_reindex_crtabforadd_part_aoco_btree group by c_oid, c_relname having count(*) = (select count(*) from gp_segment_configuration where role = 'p' and content > -1);
@@ -38,7 +38,7 @@ DELETE 8
1: BEGIN;
BEGIN
1: LOCK reindex_crtabforadd_part_aoco_btree IN ACCESS EXCLUSIVE MODE;
-LOCK
+LOCK TABLE
2&: REINDEX TABLE reindex_crtabforadd_part_aoco_btree;
3&: alter table reindex_crtabforadd_part_aoco_btree add default partition part_others with(appendonly=true, orientation=column);
1: COMMIT;
@@ -46,9 +46,9 @@ COMMIT
2<: <... completed>
REINDEX
3<: <... completed>
-ALTER
+ALTER TABLE
3: Insert into reindex_crtabforadd_part_aoco_btree values(29,'2013-04-22',12.52);
-INSERT 1
+INSERT 0 1
3: select count(*) from reindex_crtabforadd_part_aoco_btree where id = 29;
count
-------
diff --git a/src/test/isolation2/expected/reindex/reindextable_while_addpart_heap_part_btree.out b/src/test/isolation2/expected/reindex/reindextable_while_addpart_heap_part_btree.out
index eed3c9c024e..49ed4aa95b4 100644
--- a/src/test/isolation2/expected/reindex/reindextable_while_addpart_heap_part_btree.out
+++ b/src/test/isolation2/expected/reindex/reindextable_while_addpart_heap_part_btree.out
@@ -1,24 +1,24 @@
DROP TABLE IF EXISTS reindex_crtabforadd_part_heap_btree;
-DROP
+DROP TABLE
CREATE TABLE reindex_crtabforadd_part_heap_btree (id int, date date, amt decimal(10,2)) DISTRIBUTED BY (id) PARTITION BY RANGE (date) ( PARTITION sales_Jul13 START (date '2013-07-01') INCLUSIVE , PARTITION sales_Aug13 START (date '2013-08-01') INCLUSIVE , PARTITION sales_Sep13 START (date '2013-09-01') INCLUSIVE END (date '2014-01-01') EXCLUSIVE );
-CREATE
+CREATE TABLE
Insert into reindex_crtabforadd_part_heap_btree select i, to_date('2013-07-'||i, 'YYYY/MM/DD') , 19.21+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
Insert into reindex_crtabforadd_part_heap_btree select i, to_date('2013-08-'||i, 'YYYY/MM/DD') , 9.31+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
Insert into reindex_crtabforadd_part_heap_btree select i, to_date('2013-09-'||i, 'YYYY/MM/DD') , 12.25+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
Insert into reindex_crtabforadd_part_heap_btree select i, to_date('2013-11-'||i, 'YYYY/MM/DD') , 29.51+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
create index on reindex_crtabforadd_part_heap_btree(id);
-CREATE
+CREATE INDEX
-- start_ignore
create table before_reindex_crtabforadd_part_heap_btree as select oid as c_oid, gp_segment_id as c_gp_segment_id, relfilenode as c_relfilenode, relname as c_relname from gp_dist_random('pg_class') where relname like 'reindex_crtabforadd_part_heap_btree%_idx';
-CREATE 12
+SELECT 12
-- end_ignore
select c_relname, 1 as have_same_number_of_rows from before_reindex_crtabforadd_part_heap_btree group by c_oid, c_relname having count(*) = (select count(*) from gp_segment_configuration where role = 'p' and content > -1);
@@ -38,7 +38,7 @@ DELETE 8
1: BEGIN;
BEGIN
1: LOCK reindex_crtabforadd_part_heap_btree IN ACCESS EXCLUSIVE MODE;
-LOCK
+LOCK TABLE
2&: REINDEX TABLE reindex_crtabforadd_part_heap_btree;
3&: alter table reindex_crtabforadd_part_heap_btree add partition new_p START (date '2013-06-01') INCLUSIVE ;
1: COMMIT;
@@ -46,9 +46,9 @@ COMMIT
2<: <... completed>
REINDEX
3<: <... completed>
-ALTER
+ALTER TABLE
3: Insert into reindex_crtabforadd_part_heap_btree values(29,'2013-06-09',14.20);
-INSERT 1
+INSERT 0 1
3: select count(*) from reindex_crtabforadd_part_heap_btree where id = 29;
count
-------
diff --git a/src/test/isolation2/expected/reindex/reindextable_while_altertable_ao_part_btree.out b/src/test/isolation2/expected/reindex/reindextable_while_altertable_ao_part_btree.out
index 52b56766ec5..d467c88c2b6 100644
--- a/src/test/isolation2/expected/reindex/reindextable_while_altertable_ao_part_btree.out
+++ b/src/test/isolation2/expected/reindex/reindextable_while_altertable_ao_part_btree.out
@@ -1,24 +1,24 @@
DROP TABLE IF EXISTS reindex_crtabforalter_part_ao_btree;
-DROP
+DROP TABLE
CREATE TABLE reindex_crtabforalter_part_ao_btree (id int, date date, amt decimal(10,2)) with (appendonly=true) DISTRIBUTED BY (id) PARTITION BY RANGE (date) ( PARTITION sales_Jul13 START (date '2013-07-01') INCLUSIVE , PARTITION sales_Aug13 START (date '2013-08-01') INCLUSIVE , PARTITION sales_Sep13 START (date '2013-09-01') INCLUSIVE END (date '2014-01-01') EXCLUSIVE );
-CREATE
+CREATE TABLE
Insert into reindex_crtabforalter_part_ao_btree select i, to_date('2013-07-'||i, 'YYYY/MM/DD') , 19.21+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
Insert into reindex_crtabforalter_part_ao_btree select i, to_date('2013-08-'||i, 'YYYY/MM/DD') , 9.31+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
Insert into reindex_crtabforalter_part_ao_btree select i, to_date('2013-09-'||i, 'YYYY/MM/DD') , 12.25+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
Insert into reindex_crtabforalter_part_ao_btree select i, to_date('2013-11-'||i, 'YYYY/MM/DD') , 29.51+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
create index on reindex_crtabforalter_part_ao_btree(id);
-CREATE
+CREATE INDEX
-- start_ignore
create table before_reindex_crtabforalter_part_ao_btree as select oid as c_oid, gp_segment_id as c_gp_segment_id, relfilenode as c_relfilenode, relname as c_relname from gp_dist_random('pg_class') where relname like 'reindex_crtabforalter_part_ao_btree%_idx';
-CREATE 12
+SELECT 12
-- end_ignore
select c_relname, 1 as have_same_number_of_rows from before_reindex_crtabforalter_part_ao_btree group by c_oid, c_relname having count(*) = (select count(*) from gp_segment_configuration where role = 'p' and content > -1);
@@ -38,7 +38,7 @@ DELETE 8
1: BEGIN;
BEGIN
1: LOCK reindex_crtabforalter_part_ao_btree IN ACCESS EXCLUSIVE MODE;
-LOCK
+LOCK TABLE
2&: REINDEX TABLE reindex_crtabforalter_part_ao_btree;
3&: alter table reindex_crtabforalter_part_ao_btree drop column amt;
1: COMMIT;
@@ -46,7 +46,7 @@ COMMIT
2<: <... completed>
REINDEX
3<: <... completed>
-ALTER
+ALTER TABLE
3: select count(*) from reindex_crtabforalter_part_ao_btree where id = 29;
count
-------
diff --git a/src/test/isolation2/expected/reindex/reindextable_while_altertable_aoco_part_btree.out b/src/test/isolation2/expected/reindex/reindextable_while_altertable_aoco_part_btree.out
index 6780e137f16..00a6b537348 100644
--- a/src/test/isolation2/expected/reindex/reindextable_while_altertable_aoco_part_btree.out
+++ b/src/test/isolation2/expected/reindex/reindextable_while_altertable_aoco_part_btree.out
@@ -1,24 +1,24 @@
DROP TABLE IF EXISTS reindex_crtabforalter_part_aoco_btree;
-DROP
+DROP TABLE
CREATE TABLE reindex_crtabforalter_part_aoco_btree (id int, date date, amt decimal(10,2)) with (appendonly=true, orientation=column) DISTRIBUTED BY (id) PARTITION BY RANGE (date) ( PARTITION sales_Jul13 START (date '2013-07-01') INCLUSIVE , PARTITION sales_Aug13 START (date '2013-08-01') INCLUSIVE , PARTITION sales_Sep13 START (date '2013-09-01') INCLUSIVE END (date '2014-01-01') EXCLUSIVE );
-CREATE
+CREATE TABLE
Insert into reindex_crtabforalter_part_aoco_btree select i, to_date('2013-07-'||i, 'YYYY/MM/DD') , 19.21+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
Insert into reindex_crtabforalter_part_aoco_btree select i, to_date('2013-08-'||i, 'YYYY/MM/DD') , 9.31+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
Insert into reindex_crtabforalter_part_aoco_btree select i, to_date('2013-09-'||i, 'YYYY/MM/DD') , 12.25+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
Insert into reindex_crtabforalter_part_aoco_btree select i, to_date('2013-11-'||i, 'YYYY/MM/DD') , 29.51+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
create index on reindex_crtabforalter_part_aoco_btree(id);
-CREATE
+CREATE INDEX
-- start_ignore
create table before_reindex_crtabforalter_part_aoco_btree as select oid as c_oid, gp_segment_id as c_gp_segment_id, relfilenode as c_relfilenode, relname as c_relname from gp_dist_random('pg_class') where relname like 'reindex_crtabforalter_part_aoco_btree%_idx';
-CREATE 12
+SELECT 12
-- end_ignore
select c_relname, 1 as have_same_number_of_rows from before_reindex_crtabforalter_part_aoco_btree group by c_oid, c_relname having count(*) = (select count(*) from gp_segment_configuration where role = 'p' and content > -1);
@@ -38,7 +38,7 @@ DELETE 8
1: BEGIN;
BEGIN
1: LOCK reindex_crtabforalter_part_aoco_btree IN ACCESS EXCLUSIVE MODE;
-LOCK
+LOCK TABLE
2&: REINDEX TABLE reindex_crtabforalter_part_aoco_btree;
3&: alter table reindex_crtabforalter_part_aoco_btree drop column amt;
1: COMMIT;
@@ -46,7 +46,7 @@ COMMIT
2<: <... completed>
REINDEX
3<: <... completed>
-ALTER
+ALTER TABLE
3: select count(*) from reindex_crtabforalter_part_aoco_btree where id = 29;
count
-------
diff --git a/src/test/isolation2/expected/reindex/reindextable_while_altertable_heap_part_btree.out b/src/test/isolation2/expected/reindex/reindextable_while_altertable_heap_part_btree.out
index 0a63be4559e..975e2c93301 100644
--- a/src/test/isolation2/expected/reindex/reindextable_while_altertable_heap_part_btree.out
+++ b/src/test/isolation2/expected/reindex/reindextable_while_altertable_heap_part_btree.out
@@ -1,24 +1,24 @@
DROP TABLE IF EXISTS reindex_crtabforalter_part_heap_btree;
-DROP
+DROP TABLE
CREATE TABLE reindex_crtabforalter_part_heap_btree (id int, date date, amt decimal(10,2)) DISTRIBUTED BY (id) PARTITION BY RANGE (date) ( PARTITION sales_Jul13 START (date '2013-07-01') INCLUSIVE , PARTITION sales_Aug13 START (date '2013-08-01') INCLUSIVE , PARTITION sales_Sep13 START (date '2013-09-01') INCLUSIVE END (date '2014-01-01') EXCLUSIVE );
-CREATE
+CREATE TABLE
Insert into reindex_crtabforalter_part_heap_btree select i, to_date('2013-07-'||i, 'YYYY/MM/DD') , 19.21+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
Insert into reindex_crtabforalter_part_heap_btree select i, to_date('2013-08-'||i, 'YYYY/MM/DD') , 9.31+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
Insert into reindex_crtabforalter_part_heap_btree select i, to_date('2013-09-'||i, 'YYYY/MM/DD') , 12.25+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
Insert into reindex_crtabforalter_part_heap_btree select i, to_date('2013-11-'||i, 'YYYY/MM/DD') , 29.51+i from generate_series(10,30) i;
-INSERT 21
+INSERT 0 21
create index on reindex_crtabforalter_part_heap_btree(id);
-CREATE
+CREATE INDEX
-- start_ignore
create table before_reindex_crtabforalter_part_heap_btree as select oid as c_oid, gp_segment_id as c_gp_segment_id, relfilenode as c_relfilenode, relname as c_relname from gp_dist_random('pg_class') where relname like 'reindex_crtabforalter_part_heap_btree%_idx';
-CREATE 12
+SELECT 12
-- end_ignore
select c_relname, 1 as have_same_number_of_rows from before_reindex_crtabforalter_part_heap_btree group by c_oid, c_relname having count(*) = (select count(*) from gp_segment_configuration where role = 'p' and content > -1);
@@ -38,7 +38,7 @@ DELETE 8
1: BEGIN;
BEGIN
1: LOCK reindex_crtabforalter_part_heap_btree IN ACCESS EXCLUSIVE MODE;
-LOCK
+LOCK TABLE
2&: REINDEX TABLE reindex_crtabforalter_part_heap_btree;
3&: alter table reindex_crtabforalter_part_heap_btree drop column amt;
1: COMMIT;
@@ -46,7 +46,7 @@ COMMIT
2<: <... completed>
REINDEX
3<: <... completed>
-ALTER
+ALTER TABLE
3: select count(*) from reindex_crtabforalter_part_heap_btree where id = 29;
count
-------
diff --git a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_ao_bitmap.out b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_ao_bitmap.out
index ed48fe86ad7..c78baee8b1e 100644
--- a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_ao_bitmap.out
+++ b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_ao_bitmap.out
@@ -1,14 +1,14 @@
DROP TABLE IF EXISTS reindex_crtab_ao_bitmap;
-DROP
+DROP TABLE
CREATE TABLE reindex_crtab_ao_bitmap (a INT) WITH (appendonly=true);
-CREATE
+CREATE TABLE
insert into reindex_crtab_ao_bitmap select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
insert into reindex_crtab_ao_bitmap select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
create index idx_reindex_crtab_ao_bitmap on reindex_crtab_ao_bitmap USING BITMAP(a);
-CREATE
+CREATE INDEX
-- @Description Ensures that a reindex table during reindex index operations is ok
--
@@ -17,7 +17,7 @@ DELETE 254
1: BEGIN;
BEGIN
1: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_ao_bitmap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_reindex_crtab_ao_bitmap');
-CREATE 4
+SELECT 4
2: BEGIN;
BEGIN
1: REINDEX index idx_reindex_crtab_ao_bitmap;
@@ -32,7 +32,7 @@ REINDEX
-- validates that reindex command in session 1 indeed generates new
-- relfilenode for the index.
1: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_ao_bitmap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_reindex_crtab_ao_bitmap');
-INSERT 4
+INSERT 0 4
-- Expect two distinct relfilenodes per segment in old_relfilenodes table.
1: select distinct count(distinct relfilenode), relname from old_relfilenodes group by dbid, relname;
count | relname
@@ -44,7 +44,7 @@ COMMIT
-- After session 2 commits, the relfilenode it assigned to the index
-- is visible to session 1.
1: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_ao_bitmap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_reindex_crtab_ao_bitmap');
-INSERT 4
+INSERT 0 4
-- Expect three distinct relfilenodes per segment in old_relfilenodes table.
1: select distinct count(distinct relfilenode), relname from old_relfilenodes group by dbid, relname;
count | relname
diff --git a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_ao_part_btree.out b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_ao_part_btree.out
index d17fb59d8fd..f9864b0f48b 100644
--- a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_ao_part_btree.out
+++ b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_ao_part_btree.out
@@ -1,14 +1,14 @@
DROP TABLE IF EXISTS reindex_crtab_part_ao_btree;
-DROP
+DROP TABLE
CREATE TABLE reindex_crtab_part_ao_btree ( id INTEGER, owner VARCHAR, description VARCHAR, property BOX, poli POLYGON, target CIRCLE, v VARCHAR, t TEXT, f FLOAT, p POINT, c CIRCLE, filler VARCHAR DEFAULT 'Big data is difficult to work with using most relational database management systems and desktop statistics and visualization packages, requiring instead massively parallel software running on tens, hundreds, or even thousands of servers.What is considered big data varies depending on the capabilities of the organization managing the set, and on the capabilities of the applications.This is here just to take up space so that we use more pages of data and sequential scans take a lot more time. ') with (appendonly=true) DISTRIBUTED BY (id) PARTITION BY RANGE (id) ( PARTITION p_one START('1') INCLUSIVE END ('10') EXCLUSIVE, DEFAULT PARTITION de_fault );
-CREATE
+CREATE TABLE
insert into reindex_crtab_part_ao_btree (id, owner, description, property, poli, target) select i, 'user' || i, 'Testing GiST Index', '((3, 1300), (33, 1330))','( (22,660), (57, 650), (68, 660) )', '( (76, 76), 76)' from generate_series(1,1000) i ;
-INSERT 1000
+INSERT 0 1000
insert into reindex_crtab_part_ao_btree (id, owner, description, property, poli, target) select i, 'user' || i, 'Testing GiST Index', '((3, 1300), (33, 1330))','( (22,660), (57, 650), (68, 660) )', '( (76, 76), 76)' from generate_series(1,1000) i ;
-INSERT 1000
+INSERT 0 1000
create index on reindex_crtab_part_ao_btree(id);
-CREATE
+CREATE INDEX
-- @product_version gpdb: [4.3.4.0 -],4.3.4.0O2
-- @Description Ensures that a reindex table during reindex index operations is ok
--
@@ -16,11 +16,11 @@ CREATE
DELETE FROM reindex_crtab_part_ao_btree WHERE id < 128;
DELETE 254
3: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'reindex_crtab_part_ao_btree%_idx' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'reindex_crtab_part_ao_btree%_idx');
-CREATE 12
+SELECT 12
1: BEGIN;
BEGIN
1: LOCK reindex_crtab_part_ao_btree IN ACCESS EXCLUSIVE MODE;
-LOCK
+LOCK TABLE
2&: REINDEX TABLE reindex_crtab_part_ao_btree;
3:BEGIN;
BEGIN
@@ -34,7 +34,7 @@ REINDEX
-- validates that reindex command in session 3 indeed generates new
-- relfilenode for the index.
3: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'reindex_crtab_part_ao_btree%_idx' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'reindex_crtab_part_ao_btree%_idx');
-INSERT 12
+INSERT 0 12
-- Expect two distinct relfilenodes for one segment in old_relfilenodes table.
-- CBDB#26: This test actually assumes when txn1 commits, its lock is acquired by
-- txn3, and txn2 is blocked by it. Normally this is the case, but when the system
@@ -60,7 +60,7 @@ COMMIT
2<: <... completed>
REINDEX
3: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'reindex_crtab_part_ao_btree%_idx' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'reindex_crtab_part_ao_btree%_idx');
-INSERT 12
+INSERT 0 12
-- Expect three distinct relfilenodes per segment for "1_prt_de_fault" index.
-- CBDB#26: Same as L45.
3: select relname, relname = 'reindex_crtab_part_ao_btree_1_prt_de_fault_id_idx' and res.cnt in (2, 3) as special_case_for_de_fault_id_idx, case when relname = 'reindex_crtab_part_ao_btree_1_prt_de_fault_id_idx' then -1 else res.cnt end as count from (select distinct count(distinct relfilenode) as cnt, relname from old_relfilenodes group by dbid, relname) as res;
diff --git a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_aoco_bitmap.out b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_aoco_bitmap.out
index 8c766da8fc8..85238f7aa65 100644
--- a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_aoco_bitmap.out
+++ b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_aoco_bitmap.out
@@ -1,14 +1,14 @@
DROP TABLE IF EXISTS reindex_crtab_aoco_bitmap;
-DROP
+DROP TABLE
CREATE TABLE reindex_crtab_aoco_bitmap (a INT) WITH (appendonly=true, orientation=column);
-CREATE
+CREATE TABLE
insert into reindex_crtab_aoco_bitmap select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
insert into reindex_crtab_aoco_bitmap select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
create index idx_reindex_crtab_aoco_bitmap on reindex_crtab_aoco_bitmap USING BITMAP(a);
-CREATE
+CREATE INDEX
-- @Description Ensures that a reindex table during reindex index operations is ok
--
@@ -17,7 +17,7 @@ DELETE 254
1: BEGIN;
BEGIN
1: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_aoco_bitmap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_reindex_crtab_aoco_bitmap');
-CREATE 4
+SELECT 4
2: BEGIN;
BEGIN
1: REINDEX index idx_reindex_crtab_aoco_bitmap;
@@ -32,7 +32,7 @@ REINDEX
-- validates that reindex command in session 1 indeed generates new
-- relfilenode for the index.
1: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_aoco_bitmap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_reindex_crtab_aoco_bitmap');
-INSERT 4
+INSERT 0 4
-- Expect two distinct relfilenodes per segment in old_relfilenodes table.
1: select distinct count(distinct relfilenode), relname from old_relfilenodes group by dbid, relname;
count | relname
@@ -44,7 +44,7 @@ COMMIT
-- After session 2 commits, the relfilenode it assigned to the index
-- is visible to session 1.
1: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_aoco_bitmap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_reindex_crtab_aoco_bitmap');
-INSERT 4
+INSERT 0 4
-- Expect three distinct relfilenodes per segment in old_relfilenodes table.
1: select distinct count(distinct relfilenode), relname from old_relfilenodes group by dbid, relname;
count | relname
diff --git a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_aoco_part_btree.out b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_aoco_part_btree.out
index 7dd094ee895..627a989e6df 100644
--- a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_aoco_part_btree.out
+++ b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_aoco_part_btree.out
@@ -1,25 +1,25 @@
DROP TABLE IF EXISTS reindex_crtab_part_aoco_btree;
-DROP
+DROP TABLE
CREATE TABLE reindex_crtab_part_aoco_btree ( id INTEGER, owner VARCHAR, description VARCHAR, property BOX, poli POLYGON, target CIRCLE, v VARCHAR, t TEXT, f FLOAT, p POINT, c CIRCLE, filler VARCHAR DEFAULT 'Big data is difficult to work with using most relational database management systems and desktop statistics and visualization packages, requiring instead massively parallel software running on tens, hundreds, or even thousands of servers.What is considered big data varies depending on the capabilities of the organization managing the set, and on the capabilities of the applications.This is here just to take up space so that we use more pages of data and sequential scans take a lot more time. ') with (appendonly=true,orientation=column) DISTRIBUTED BY (id) PARTITION BY RANGE (id) ( PARTITION p_one START('1') INCLUSIVE END ('10') EXCLUSIVE, DEFAULT PARTITION de_fault );
-CREATE
+CREATE TABLE
insert into reindex_crtab_part_aoco_btree (id, owner, description, property, poli, target) select i, 'user' || i, 'Testing GiST Index', '((3, 1300), (33, 1330))','( (22,660), (57, 650), (68, 660) )', '( (76, 76), 76)' from generate_series(1,1000) i ;
-INSERT 1000
+INSERT 0 1000
insert into reindex_crtab_part_aoco_btree (id, owner, description, property, poli, target) select i, 'user' || i, 'Testing GiST Index', '((3, 1300), (33, 1330))','( (22,660), (57, 650), (68, 660) )', '( (76, 76), 76)' from generate_series(1,1000) i ;
-INSERT 1000
+INSERT 0 1000
create index on reindex_crtab_part_aoco_btree(id);
-CREATE
+CREATE INDEX
-- @Description Ensures that a reindex table during reindex index operations is ok
--
DELETE FROM reindex_crtab_part_aoco_btree WHERE id < 128;
DELETE 254
3: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'reindex_crtab_part_aoco_btree%_idx' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'reindex_crtab_part_aoco_btree%_idx');
-CREATE 12
+SELECT 12
1: BEGIN;
BEGIN
1: LOCK reindex_crtab_part_aoco_btree IN ACCESS EXCLUSIVE MODE;
-LOCK
+LOCK TABLE
2&: REINDEX TABLE reindex_crtab_part_aoco_btree;
3: BEGIN;
BEGIN
@@ -33,7 +33,7 @@ REINDEX
-- validates that reindex command in session 3 indeed generates new
-- relfilenode for the index.
3: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'reindex_crtab_part_aoco_btree%_idx' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'reindex_crtab_part_aoco_btree%_idx');
-INSERT 12
+INSERT 0 12
-- Expect two distinct relfilenodes for one segment in old_relfilenodes table.
-- CBDB#26: This test actually assumes when txn1 commits, its lock is acquired by
-- txn3, and txn2 is blocked by it. Normally this is the case, but when the system
@@ -59,7 +59,7 @@ COMMIT
2<: <... completed>
REINDEX
3: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'reindex_crtab_part_aoco_btree%_idx' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'reindex_crtab_part_aoco_btree%_idx');
-INSERT 12
+INSERT 0 12
-- Expect three distinct relfilenodes per segment for "1_prt_de_fault" index.
-- CBDB#26: Same as L45.
3: select relname, relname = 'reindex_crtab_part_aoco_btree_1_prt_de_fault_id_idx' and res.cnt in (2, 3) as special_case_for_de_fault_id_idx, case when relname = 'reindex_crtab_part_aoco_btree_1_prt_de_fault_id_idx' then -1 else res.cnt end as count from (select distinct count(distinct relfilenode) as cnt, relname from old_relfilenodes group by dbid, relname) as res;
diff --git a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_heap_bitmap.out b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_heap_bitmap.out
index 0a9689cb030..c19661cea66 100644
--- a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_heap_bitmap.out
+++ b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_heap_bitmap.out
@@ -1,14 +1,14 @@
DROP TABLE IF EXISTS reindex_crtab_heap_bitmap;
-DROP
+DROP TABLE
CREATE TABLE reindex_crtab_heap_bitmap (a INT);
-CREATE
+CREATE TABLE
insert into reindex_crtab_heap_bitmap select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
insert into reindex_crtab_heap_bitmap select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
create index idx_reindex_crtab_heap_bitmap on reindex_crtab_heap_bitmap USING BITMAP(a);
-CREATE
+CREATE INDEX
-- @Description Ensures that a reindex table during reindex index operations is ok
--
@@ -17,7 +17,7 @@ DELETE 254
1: BEGIN;
BEGIN
1: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_heap_bitmap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_reindex_crtab_heap_bitmap');
-CREATE 4
+SELECT 4
2: BEGIN;
BEGIN
1: REINDEX index idx_reindex_crtab_heap_bitmap;
@@ -32,7 +32,7 @@ REINDEX
-- validates that reindex command in session 1 indeed generates new
-- relfilenode for the index.
1: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_heap_bitmap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_reindex_crtab_heap_bitmap');
-INSERT 4
+INSERT 0 4
-- Expect two distinct relfilenodes per segment in old_relfilenodes table.
1: select distinct count(distinct relfilenode), relname from old_relfilenodes group by dbid, relname;
count | relname
@@ -44,7 +44,7 @@ COMMIT
-- After session 2 commits, the relfilenode it assigned to the index
-- is visible to session 1.
1: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_reindex_crtab_heap_bitmap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_reindex_crtab_heap_bitmap');
-INSERT 4
+INSERT 0 4
-- Expect three distinct relfilenodes per segment in old_relfilenodes table.
1: select distinct count(distinct relfilenode), relname from old_relfilenodes group by dbid, relname;
count | relname
diff --git a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_heap_part_btree.out b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_heap_part_btree.out
index cf671261bce..31d58838e33 100644
--- a/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_heap_part_btree.out
+++ b/src/test/isolation2/expected/reindex/reindextable_while_reindex_idx_heap_part_btree.out
@@ -1,25 +1,25 @@
DROP TABLE IF EXISTS reindex_crtab_part_heap_btree;
-DROP
+DROP TABLE
CREATE TABLE reindex_crtab_part_heap_btree ( id INTEGER, owner VARCHAR, description VARCHAR, property BOX, poli POLYGON, target CIRCLE, v VARCHAR, t TEXT, f FLOAT, p POINT, c CIRCLE, filler VARCHAR DEFAULT 'Big data is difficult to work with using most relational database management systems and desktop statistics and visualization packages, requiring instead massively parallel software running on tens, hundreds, or even thousands of servers.What is considered big data varies depending on the capabilities of the organization managing the set, and on the capabilities of the applications.This is here just to take up space so that we use more pages of data and sequential scans take a lot more time. ')DISTRIBUTED BY (id) PARTITION BY RANGE (id) ( PARTITION p_one START('1') INCLUSIVE END ('10') EXCLUSIVE, DEFAULT PARTITION de_fault );
-CREATE
+CREATE TABLE
insert into reindex_crtab_part_heap_btree (id, owner, description, property, poli, target) select i, 'user' || i, 'Testing GiST Index', '((3, 1300), (33, 1330))','( (22,660), (57, 650), (68, 660) )', '( (76, 76), 76)' from generate_series(1,1000) i ;
-INSERT 1000
+INSERT 0 1000
insert into reindex_crtab_part_heap_btree (id, owner, description, property, poli, target) select i, 'user' || i, 'Testing GiST Index', '((3, 1300), (33, 1330))','( (22,660), (57, 650), (68, 660) )', '( (76, 76), 76)' from generate_series(1,1000) i ;
-INSERT 1000
+INSERT 0 1000
create index on reindex_crtab_part_heap_btree(id);
-CREATE
+CREATE INDEX
-- @Description Ensures that a reindex table during reindex index operations is ok
--
DELETE FROM reindex_crtab_part_heap_btree WHERE id < 128;
DELETE 254
3: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'reindex_crtab_part_heap_btree%_idx' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'reindex_crtab_part_heap_btree%_idx');
-CREATE 12
+SELECT 12
1: BEGIN;
BEGIN
1: LOCK reindex_crtab_part_heap_btree IN ACCESS EXCLUSIVE MODE;
-LOCK
+LOCK TABLE
2&: REINDEX TABLE reindex_crtab_part_heap_btree;
3: BEGIN;
BEGIN
@@ -33,7 +33,7 @@ REINDEX
-- validates that reindex command in session 3 indeed generates new
-- relfilenode for the index.
3: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'reindex_crtab_part_heap_btree%_idx' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'reindex_crtab_part_heap_btree%_idx');
-INSERT 12
+INSERT 0 12
-- Expect two distinct relfilenodes for one segment in old_relfilenodes table.
-- CBDB#26: This test actually assumes when txn1 commits, its lock is acquired by
-- txn3, and txn2 is blocked by it. Normally this is the case, but when the system
@@ -59,7 +59,7 @@ COMMIT
2<: <... completed>
REINDEX
3: insert into old_relfilenodes (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'reindex_crtab_part_heap_btree%_idx' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'reindex_crtab_part_heap_btree%_idx');
-INSERT 12
+INSERT 0 12
-- Expect three distinct relfilenodes per segment for "1_prt_de_fault" index.
-- CBDB#26: Same as L45.
3: select relname, relname = 'reindex_crtab_part_heap_btree_1_prt_de_fault_id_idx' and res.cnt in (2, 3) as special_case_for_de_fault_id_idx, case when relname = 'reindex_crtab_part_heap_btree_1_prt_de_fault_id_idx' then -1 else res.cnt end as count from (select distinct count(distinct relfilenode) as cnt, relname from old_relfilenodes group by dbid, relname) as res;
diff --git a/src/test/isolation2/expected/reindex/repeatable_read_reindex_with_insert_heap.out b/src/test/isolation2/expected/reindex/repeatable_read_reindex_with_insert_heap.out
index 03fd18bee3f..c6a8ffd473a 100644
--- a/src/test/isolation2/expected/reindex/repeatable_read_reindex_with_insert_heap.out
+++ b/src/test/isolation2/expected/reindex/repeatable_read_reindex_with_insert_heap.out
@@ -1,24 +1,24 @@
DROP TABLE IF EXISTS reindex_serialize_tab_heap;
-DROP
+DROP TABLE
CREATE TABLE reindex_serialize_tab_heap (a INT, b text, c date, d numeric, e bigint, f char(10), g float) distributed by (a);
-CREATE
+CREATE TABLE
insert into reindex_serialize_tab_heap select i, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,10) i;
-INSERT 10
+INSERT 0 10
create index idxa_reindex_serialize_tab_heap on reindex_serialize_tab_heap(a);
-CREATE
+CREATE INDEX
create index idxb_reindex_serialize_tab_heap on reindex_serialize_tab_heap(b);
-CREATE
+CREATE INDEX
create index idxc_reindex_serialize_tab_heap on reindex_serialize_tab_heap(c);
-CREATE
+CREATE INDEX
create index idxd_reindex_serialize_tab_heap on reindex_serialize_tab_heap(d);
-CREATE
+CREATE INDEX
create index idxe_reindex_serialize_tab_heap on reindex_serialize_tab_heap(e);
-CREATE
+CREATE INDEX
create index idxf_reindex_serialize_tab_heap on reindex_serialize_tab_heap(f);
-CREATE
+CREATE INDEX
create index idxg_reindex_serialize_tab_heap on reindex_serialize_tab_heap(g);
-CREATE
+CREATE INDEX
-- start_ignore
SET gp_create_table_random_default_distribution=off;
SET
@@ -35,7 +35,7 @@ SET
2: BEGIN;
BEGIN
2: insert into reindex_serialize_tab_heap values(99,'green',now(),10,15.10);
-INSERT 1
+INSERT 0 1
2: COMMIT;
COMMIT
1: select a,b,d,e,f,g from reindex_serialize_tab_heap order by 1;
diff --git a/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_column_heap.out b/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_column_heap.out
index e4218e9ccd5..160bfca9f4f 100644
--- a/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_column_heap.out
+++ b/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_column_heap.out
@@ -1,30 +1,30 @@
DROP TABLE IF EXISTS reindex_serialize_tab_heap;
-DROP
+DROP TABLE
CREATE TABLE reindex_serialize_tab_heap (a INT, b text, c date, d numeric, e bigint, f char(10), g float) distributed by (a);
-CREATE
+CREATE TABLE
insert into reindex_serialize_tab_heap select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i;
-INSERT 1000
+INSERT 0 1000
insert into reindex_serialize_tab_heap select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i;
-INSERT 1000
+INSERT 0 1000
insert into reindex_serialize_tab_heap select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i;
-INSERT 1000
+INSERT 0 1000
insert into reindex_serialize_tab_heap select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i;
-INSERT 1000
+INSERT 0 1000
create index idxa_reindex_serialize_tab_heap on reindex_serialize_tab_heap(a);
-CREATE
+CREATE INDEX
create index idxb_reindex_serialize_tab_heap on reindex_serialize_tab_heap(b);
-CREATE
+CREATE INDEX
create index idxc_reindex_serialize_tab_heap on reindex_serialize_tab_heap(c);
-CREATE
+CREATE INDEX
create index idxd_reindex_serialize_tab_heap on reindex_serialize_tab_heap(d);
-CREATE
+CREATE INDEX
create index idxe_reindex_serialize_tab_heap on reindex_serialize_tab_heap(e);
-CREATE
+CREATE INDEX
create index idxf_reindex_serialize_tab_heap on reindex_serialize_tab_heap(f);
-CREATE
+CREATE INDEX
create index idxg_reindex_serialize_tab_heap on reindex_serialize_tab_heap(g);
-CREATE
+CREATE INDEX
-- start_ignore
SET gp_create_table_random_default_distribution=off;
SET
@@ -41,13 +41,13 @@ SET
dummy select to establish snapshot
(1 row)
1: alter table reindex_serialize_tab_heap drop column c;
-ALTER
+ALTER TABLE
1: COMMIT;
COMMIT
-- Remember index relfilenodes from master and segments before
-- reindex.
2: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'idx%_reindex_serialize_tab_heap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'idx%_reindex_serialize_tab_heap');
-CREATE 28
+SELECT 28
2: reindex table reindex_serialize_tab_heap;
REINDEX
2: COMMIT;
diff --git a/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_index_ao.out b/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_index_ao.out
index 93c1a82b07e..63a4317af6b 100644
--- a/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_index_ao.out
+++ b/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_index_ao.out
@@ -1,30 +1,30 @@
DROP TABLE IF EXISTS reindex_serialize_tab_ao;
-DROP
+DROP TABLE
CREATE TABLE reindex_serialize_tab_ao (a INT, b text, c date, d numeric, e bigint, f char(10), g float) with (appendonly=True) distributed by (a);
-CREATE
+CREATE TABLE
insert into reindex_serialize_tab_ao select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i;
-INSERT 1000
+INSERT 0 1000
insert into reindex_serialize_tab_ao select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i;
-INSERT 1000
+INSERT 0 1000
insert into reindex_serialize_tab_ao select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i;
-INSERT 1000
+INSERT 0 1000
insert into reindex_serialize_tab_ao select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i;
-INSERT 1000
+INSERT 0 1000
create index idxa_reindex_serialize_tab_ao on reindex_serialize_tab_ao(a);
-CREATE
+CREATE INDEX
create index idxb_reindex_serialize_tab_ao on reindex_serialize_tab_ao(b);
-CREATE
+CREATE INDEX
create index idxc_reindex_serialize_tab_ao on reindex_serialize_tab_ao(c);
-CREATE
+CREATE INDEX
create index idxd_reindex_serialize_tab_ao on reindex_serialize_tab_ao(d);
-CREATE
+CREATE INDEX
create index idxe_reindex_serialize_tab_ao on reindex_serialize_tab_ao(e);
-CREATE
+CREATE INDEX
create index idxf_reindex_serialize_tab_ao on reindex_serialize_tab_ao(f);
-CREATE
+CREATE INDEX
create index idxg_reindex_serialize_tab_ao on reindex_serialize_tab_ao(g);
-CREATE
+CREATE INDEX
-- start_ignore
SET gp_create_table_random_default_distribution=off;
SET
@@ -41,13 +41,13 @@ SET
dummy select to establish snapshot
(1 row)
1: drop index idxg_reindex_serialize_tab_ao;
-DROP
+DROP INDEX
1: COMMIT;
COMMIT
-- Remember index relfilenodes from master and segments before
-- reindex.
2: create table old_ao_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'idx%_reindex_serialize_tab_ao' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'idx%_reindex_serialize_tab_ao');
-CREATE 28
+SELECT 28
2: reindex table reindex_serialize_tab_ao;
REINDEX
2: COMMIT;
diff --git a/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_index_heap.out b/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_index_heap.out
index 16382071948..fa3476c6cd5 100644
--- a/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_index_heap.out
+++ b/src/test/isolation2/expected/reindex/serializable_reindex_with_drop_index_heap.out
@@ -1,27 +1,27 @@
CREATE TABLE reindex_dropindex_serialize_tab_heap (a INT, b text, c date, d numeric, e bigint, f char(10), g float) distributed by (a);
-CREATE
+CREATE TABLE
insert into reindex_dropindex_serialize_tab_heap select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i;
-INSERT 1000
+INSERT 0 1000
insert into reindex_dropindex_serialize_tab_heap select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i;
-INSERT 1000
+INSERT 0 1000
insert into reindex_dropindex_serialize_tab_heap select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i;
-INSERT 1000
+INSERT 0 1000
insert into reindex_dropindex_serialize_tab_heap select 1, 'abc'||i, now(),i*100.43, i*-187, 'a'|| i*-1, i*2.23 from generate_series(1,1000) i;
-INSERT 1000
+INSERT 0 1000
create index idxa_reindex_dropindex_serialize_tab_heap on reindex_dropindex_serialize_tab_heap(a);
-CREATE
+CREATE INDEX
create index idxb_reindex_dropindex_serialize_tab_heap on reindex_dropindex_serialize_tab_heap(b);
-CREATE
+CREATE INDEX
create index idxc_reindex_dropindex_serialize_tab_heap on reindex_dropindex_serialize_tab_heap(c);
-CREATE
+CREATE INDEX
create index idxd_reindex_dropindex_serialize_tab_heap on reindex_dropindex_serialize_tab_heap(d);
-CREATE
+CREATE INDEX
create index idxe_reindex_dropindex_serialize_tab_heap on reindex_dropindex_serialize_tab_heap(e);
-CREATE
+CREATE INDEX
create index idxf_reindex_dropindex_serialize_tab_heap on reindex_dropindex_serialize_tab_heap(f);
-CREATE
+CREATE INDEX
create index idxg_reindex_dropindex_serialize_tab_heap on reindex_dropindex_serialize_tab_heap(g);
-CREATE
+CREATE INDEX
-- start_ignore
SET gp_create_table_random_default_distribution=off;
SET
@@ -38,13 +38,13 @@ SET
dummy select to establish snapshot
(1 row)
1: drop index idxg_reindex_dropindex_serialize_tab_heap;
-DROP
+DROP INDEX
1: COMMIT;
COMMIT
-- Remember index relfilenodes from master and segments before
-- reindex.
2: create table old_heap_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname like 'idx%_reindex_dropindex_serialize_tab_heap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname like 'idx%_reindex_dropindex_serialize_tab_heap');
-CREATE 28
+SELECT 28
2: reindex table reindex_dropindex_serialize_tab_heap;
REINDEX
2: COMMIT;
diff --git a/src/test/isolation2/expected/reindex/vacuum_analyze_while_reindex_ao_btree.out b/src/test/isolation2/expected/reindex/vacuum_analyze_while_reindex_ao_btree.out
index a7ab01a3611..fc69bb61f5b 100644
--- a/src/test/isolation2/expected/reindex/vacuum_analyze_while_reindex_ao_btree.out
+++ b/src/test/isolation2/expected/reindex/vacuum_analyze_while_reindex_ao_btree.out
@@ -1,10 +1,10 @@
DROP TABLE IF EXISTS reindex_ao;
-DROP
+DROP TABLE
CREATE TABLE reindex_ao (a INT) WITH (appendonly=true);
-CREATE
+CREATE TABLE
insert into reindex_ao select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
analyze reindex_ao;
ANALYZE
select 1 as reltuples_same_as_count from pg_class where relname = 'reindex_ao' and reltuples = (select count(*) from reindex_ao);
@@ -13,13 +13,13 @@ select 1 as reltuples_same_as_count from pg_class where relname = 'reindex_ao'
1
(1 row)
insert into reindex_ao select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
select 1 as reltuples_same_as_count from pg_class where relname = 'reindex_ao' and reltuples = (select count(*) from reindex_ao);
reltuples_same_as_count
-------------------------
(0 rows)
create index idx_btree_reindex_vacuum_analyze_ao on reindex_ao(a);
-CREATE
+CREATE INDEX
-- @Description Ensures that a vacuum during reindex operations is ok
--
@@ -30,7 +30,7 @@ BEGIN
-- Remember index relfilenodes from master and segments before
-- reindex.
1: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_btree_reindex_vacuum_analyze_ao' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_btree_reindex_vacuum_analyze_ao');
-CREATE 4
+SELECT 4
1: REINDEX index idx_btree_reindex_vacuum_analyze_ao;
REINDEX
2&: VACUUM ANALYZE reindex_ao;
@@ -58,4 +58,4 @@ COMMIT
1
(1 row)
3: INSERT INTO reindex_ao VALUES (0);
-INSERT 1
+INSERT 0 1
diff --git a/src/test/isolation2/expected/reindex/vacuum_while_reindex_ao_bitmap.out b/src/test/isolation2/expected/reindex/vacuum_while_reindex_ao_bitmap.out
index 6c3c5b14514..f8e2e01ae57 100644
--- a/src/test/isolation2/expected/reindex/vacuum_while_reindex_ao_bitmap.out
+++ b/src/test/isolation2/expected/reindex/vacuum_while_reindex_ao_bitmap.out
@@ -1,14 +1,14 @@
DROP TABLE IF EXISTS reindex_ao;
-DROP
+DROP TABLE
CREATE TABLE reindex_ao (a INT) WITH (appendonly=true);
-CREATE
+CREATE TABLE
insert into reindex_ao select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
insert into reindex_ao select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
create index idx_bitmap_reindex_ao on reindex_ao USING bitmap(a);
-CREATE
+CREATE INDEX
-- @Description Ensures that a vacuum during reindex operations is ok
--
@@ -19,7 +19,7 @@ BEGIN
-- Remember index relfilenodes from master and segments before
-- reindex.
1: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_bitmap_reindex_ao' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_bitmap_reindex_ao');
-CREATE 4
+SELECT 4
1: REINDEX index idx_bitmap_reindex_ao;
REINDEX
2&: VACUUM reindex_ao;
@@ -41,4 +41,4 @@ COMMIT
0
(1 row)
3: INSERT INTO reindex_ao VALUES (0);
-INSERT 1
+INSERT 0 1
diff --git a/src/test/isolation2/expected/reindex/vacuum_while_reindex_heap_btree.out b/src/test/isolation2/expected/reindex/vacuum_while_reindex_heap_btree.out
index c049f48081f..cf8e6e3dbd9 100644
--- a/src/test/isolation2/expected/reindex/vacuum_while_reindex_heap_btree.out
+++ b/src/test/isolation2/expected/reindex/vacuum_while_reindex_heap_btree.out
@@ -1,14 +1,14 @@
DROP TABLE IF EXISTS reindex_heap;
-DROP
+DROP TABLE
CREATE TABLE reindex_heap (a INT);
-CREATE
+CREATE TABLE
insert into reindex_heap select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
insert into reindex_heap select generate_series(1,1000);
-INSERT 1000
+INSERT 0 1000
create index idx_btree_reindex_heap on reindex_heap(a);
-CREATE
+CREATE INDEX
-- @Description Ensures that a vacuum during reindex operations is ok
--
@@ -19,7 +19,7 @@ BEGIN
-- Remember index relfilenodes from master and segments before
-- reindex.
1: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_btree_reindex_heap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_btree_reindex_heap');
-CREATE 4
+SELECT 4
1: REINDEX index idx_btree_reindex_heap;
REINDEX
2&: VACUUM reindex_heap;
@@ -41,4 +41,4 @@ COMMIT
0
(1 row)
3: INSERT INTO reindex_heap VALUES (0);
-INSERT 1
+INSERT 0 1
diff --git a/src/test/isolation2/expected/reindex/vacuum_while_reindex_heap_btree_toast.out b/src/test/isolation2/expected/reindex/vacuum_while_reindex_heap_btree_toast.out
index 6b6254e7230..e17213f0b22 100644
--- a/src/test/isolation2/expected/reindex/vacuum_while_reindex_heap_btree_toast.out
+++ b/src/test/isolation2/expected/reindex/vacuum_while_reindex_heap_btree_toast.out
@@ -1,14 +1,14 @@
DROP TABLE IF EXISTS reindex_toast_heap;
-DROP
+DROP TABLE
CREATE TABLE reindex_toast_heap (a text, b int);
-CREATE
+CREATE TABLE
alter table reindex_toast_heap alter column a set storage external;
-ALTER
+ALTER TABLE
insert into reindex_toast_heap select repeat('123456789',10000), i from generate_series(1,100) i;
-INSERT 100
+INSERT 0 100
create index idx_btree_reindex_toast_heap on reindex_toast_heap(b);
-CREATE
+CREATE INDEX
-- @Description Ensures that a vacuum during reindex operations is ok
--
@@ -19,7 +19,7 @@ BEGIN
-- Remember index relfilenodes from master and segments before
-- reindex.
1: create temp table old_relfilenodes as (select gp_segment_id as dbid, relfilenode, oid, relname from gp_dist_random('pg_class') where relname = 'idx_btree_reindex_toast_heap' union all select gp_segment_id as dbid, relfilenode, oid, relname from pg_class where relname = 'idx_btree_reindex_toast_heap');
-CREATE 4
+SELECT 4
1: REINDEX index idx_btree_reindex_toast_heap;
REINDEX
2&: VACUUM reindex_toast_heap;
@@ -41,4 +41,4 @@ COMMIT
0
(1 row)
3: INSERT INTO reindex_toast_heap VALUES (0);
-INSERT 1
+INSERT 0 1
diff --git a/src/test/isolation2/expected/reindex_gpfastsequence.out b/src/test/isolation2/expected/reindex_gpfastsequence.out
index bb25ca9f4f0..e49fb145130 100644
--- a/src/test/isolation2/expected/reindex_gpfastsequence.out
+++ b/src/test/isolation2/expected/reindex_gpfastsequence.out
@@ -1,11 +1,11 @@
-- Test concurrent reindex gp_fastsequence and insert on an AO table
create table test_fastseqence ( a int, b char(20)) with (appendonly = true, orientation=column);
-CREATE
+CREATE TABLE
create index test_fastseqence_idx on test_fastseqence(b);
-CREATE
+CREATE INDEX
insert into test_fastseqence select i , 'aa'||i from generate_series(1,100) i;
-INSERT 100
+INSERT 0 100
select gp_inject_fault_infinite('reindex_relation', 'suspend', 2);
gp_inject_fault_infinite
@@ -32,7 +32,7 @@ select gp_inject_fault('reindex_relation', 'reset', 2);
1<: <... completed>
REINDEX
2<: <... completed>
-INSERT 100
+INSERT 0 100
-- Validate that gp_fastsequence works as expected after reindex
SELECT 1 AS oid_same_on_all_segs from gp_dist_random('pg_class') WHERE relname = 'gp_fastsequence_objid_objmod_index' GROUP BY oid having count(*) = (SELECT count(*) FROM gp_segment_configuration WHERE role='p' AND content > -1);
@@ -53,7 +53,7 @@ select last_sequence from gp_dist_random('gp_fastsequence') where objid = (selec
(6 rows)
insert into test_fastseqence select i , 'aa'||i from generate_series(1,100) i;
-INSERT 100
+INSERT 0 100
select last_sequence from gp_dist_random('gp_fastsequence') where objid = (select segrelid from pg_appendonly where relid = (select oid from pg_class where relname = 'test_fastseqence'));
last_sequence
diff --git a/src/test/isolation2/expected/reorganize_after_ao_vacuum_skip_drop.out b/src/test/isolation2/expected/reorganize_after_ao_vacuum_skip_drop.out
index 604c84856a0..18cc24c26d1 100644
--- a/src/test/isolation2/expected/reorganize_after_ao_vacuum_skip_drop.out
+++ b/src/test/isolation2/expected/reorganize_after_ao_vacuum_skip_drop.out
@@ -3,9 +3,9 @@
-- rewrites the relation differently than other ALTER operations.
CREATE TABLE reorganize_after_ao_vacuum_skip_drop (a INT, b INT) WITH (appendonly=true);
-CREATE
+CREATE TABLE
INSERT INTO reorganize_after_ao_vacuum_skip_drop SELECT i as a, i as b FROM generate_series(1, 10) AS i;
-INSERT 10
+INSERT 0 10
DELETE FROM reorganize_after_ao_vacuum_skip_drop;
DELETE 10
@@ -28,7 +28,7 @@ BEGIN
2: VACUUM reorganize_after_ao_vacuum_skip_drop;
VACUUM
1: END;
-END
+COMMIT
-- We should see an aoseg in state 2 (AOSEG_STATE_AWAITING_DROP)
0U: SELECT segno, state FROM gp_toolkit.__gp_aoseg('reorganize_after_ao_vacuum_skip_drop');
@@ -40,7 +40,7 @@ END
-- The AO relation should be rewritten and AppendOnlyHash entry invalidated
1: ALTER TABLE reorganize_after_ao_vacuum_skip_drop SET WITH (reorganize=true);
-ALTER
+ALTER TABLE
0U: SELECT segno, state FROM gp_toolkit.__gp_aoseg('reorganize_after_ao_vacuum_skip_drop');
segno | state
-------+-------
@@ -49,7 +49,7 @@ ALTER
-- Check if insert goes into segno 1 instead of segno 2. If it did not
-- go into segno 1, there was a leak in the AppendOnlyHash entry.
1: INSERT INTO reorganize_after_ao_vacuum_skip_drop SELECT i as a, i as b FROM generate_series(1, 100) AS i;
-INSERT 100
+INSERT 0 100
0U: SELECT segno, tupcount > 0, state FROM gp_toolkit.__gp_aoseg('reorganize_after_ao_vacuum_skip_drop');
segno | ?column? | state
-------+----------+-------
diff --git a/src/test/isolation2/expected/resgroup/resgroup_alter_concurrency.out b/src/test/isolation2/expected/resgroup/resgroup_alter_concurrency.out
index 7eecfacc404..817dc592962 100644
--- a/src/test/isolation2/expected/resgroup/resgroup_alter_concurrency.out
+++ b/src/test/isolation2/expected/resgroup/resgroup_alter_concurrency.out
@@ -1,28 +1,28 @@
-- create a resource group when gp_resource_manager is queue
DROP ROLE IF EXISTS role_concurrency_test;
-DROP
+DROP ROLE
-- start_ignore
DROP RESOURCE GROUP rg_concurrency_test;
ERROR: resource group "rg_concurrency_test" does not exist
-- end_ignore
CREATE RESOURCE GROUP rg_concurrency_test WITH(concurrency=1, cpu_max_percent=20);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test;
-CREATE
+CREATE ROLE
-- After a 'q' command the client connection is disconnected but the
-- QD may still be alive, if we then query pg_stat_activity quick enough
-- we might still see this session with query ''.
-- A filter is put to filter out this kind of quitted sessions.
CREATE OR REPLACE VIEW rg_activity_status AS SELECT rsgname, wait_event_type, state, query FROM pg_stat_activity WHERE rsgname='rg_concurrency_test' AND query <> '';
-CREATE
+CREATE VIEW
--
-- 1. increase concurrency after pending queries
--
ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 1;
-ALTER
+ALTER RESOURCE GROUP
11:SET ROLE role_concurrency_test;
SET
@@ -37,10 +37,10 @@ SET
22&:BEGIN;
ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 2;
-ALTER
+ALTER RESOURCE GROUP
11:END;
-END
+COMMIT
11q: ...
21<: <... completed>
BEGIN
@@ -55,9 +55,9 @@ SELECT * FROM rg_activity_status;
(2 rows)
21:END;
-END
+COMMIT
22:END;
-END
+COMMIT
21q: ...
22q: ...
@@ -71,7 +71,7 @@ SELECT * FROM rg_activity_status;
--
ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 1;
-ALTER
+ALTER RESOURCE GROUP
11:SET ROLE role_concurrency_test;
SET
@@ -79,7 +79,7 @@ SET
BEGIN
ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 2;
-ALTER
+ALTER RESOURCE GROUP
21:SET ROLE role_concurrency_test;
SET
@@ -98,7 +98,7 @@ SELECT * FROM rg_activity_status;
(3 rows)
11:END;
-END
+COMMIT
11q: ...
22<: <... completed>
BEGIN
@@ -111,9 +111,9 @@ SELECT * FROM rg_activity_status;
(2 rows)
21:END;
-END
+COMMIT
22:END;
-END
+COMMIT
21q: ...
22q: ...
@@ -126,7 +126,7 @@ SELECT * FROM rg_activity_status;
-- 3. decrease concurrency
--
ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 10;
-ALTER
+ALTER RESOURCE GROUP
11:SET ROLE role_concurrency_test;
SET
11:BEGIN;
@@ -153,7 +153,7 @@ SET
BEGIN
ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 1;
-ALTER
+ALTER RESOURCE GROUP
11q: ...
12q: ...
@@ -174,13 +174,13 @@ SELECT pg_sleep(1);
-- 4. increase concurrency from 0
--
DROP ROLE role_concurrency_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg_concurrency_test;
-DROP
+DROP RESOURCE GROUP
CREATE RESOURCE GROUP rg_concurrency_test WITH(concurrency=0, cpu_max_percent=20);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test;
-CREATE
+CREATE ROLE
11:SET ROLE role_concurrency_test;
SET
@@ -192,7 +192,7 @@ SELECT * FROM rg_activity_status;
(1 row)
ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 1;
-ALTER
+ALTER RESOURCE GROUP
11<: <... completed>
BEGIN
@@ -203,7 +203,7 @@ SELECT * FROM rg_activity_status;
(1 row)
11:END;
-END
+COMMIT
11q: ...
--
@@ -213,14 +213,14 @@ END
--
ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 1;
-ALTER
+ALTER RESOURCE GROUP
SELECT * FROM rg_activity_status;
rsgname | wait_event_type | state | query
---------+-----------------+-------+-------
(0 rows)
ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 0;
-ALTER
+ALTER RESOURCE GROUP
SELECT * FROM rg_activity_status;
rsgname | wait_event_type | state | query
---------+-----------------+-------+-------
@@ -233,7 +233,7 @@ SELECT * FROM rg_activity_status;
--
ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 1;
-ALTER
+ALTER RESOURCE GROUP
SELECT * FROM rg_activity_status;
rsgname | wait_event_type | state | query
---------+-----------------+-------+-------
@@ -250,7 +250,7 @@ SELECT * FROM rg_activity_status;
(1 row)
ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 0;
-ALTER
+ALTER RESOURCE GROUP
SELECT * FROM rg_activity_status;
rsgname | wait_event_type | state | query
---------------------+-----------------+---------------------+--------
@@ -258,7 +258,7 @@ SELECT * FROM rg_activity_status;
(1 row)
11:END;
-END
+COMMIT
11q: ...
--
@@ -268,7 +268,7 @@ END
--
ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 1;
-ALTER
+ALTER RESOURCE GROUP
SELECT * FROM rg_activity_status;
rsgname | wait_event_type | state | query
---------+-----------------+-------+-------
@@ -289,7 +289,7 @@ SELECT * FROM rg_activity_status;
(2 rows)
ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 0;
-ALTER
+ALTER RESOURCE GROUP
SELECT * FROM rg_activity_status;
rsgname | wait_event_type | state | query
---------------------+-----------------+---------------------+--------
@@ -298,7 +298,7 @@ SELECT * FROM rg_activity_status;
(2 rows)
11:END;
-END
+COMMIT
11q: ...
SELECT * FROM rg_activity_status;
rsgname | wait_event_type | state | query
@@ -321,51 +321,51 @@ SELECT * FROM rg_activity_status;
-- 6: drop a resgroup with concurrency=0 and pending queries
DROP ROLE IF EXISTS role_concurrency_test;
-DROP
+DROP ROLE
-- start_ignore
DROP RESOURCE GROUP rg_concurrency_test;
-DROP
+DROP RESOURCE GROUP
-- end_ignore
CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=0, cpu_max_percent=20);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test;
-CREATE
+CREATE ROLE
61:SET ROLE role_concurrency_test;
SET
61&:BEGIN;
ALTER ROLE role_concurrency_test RESOURCE GROUP none;
-ALTER
+ALTER ROLE
DROP RESOURCE GROUP rg_concurrency_test;
-DROP
+DROP RESOURCE GROUP
61<: <... completed>
BEGIN
61:END;
-END
+COMMIT
61q: ...
-- 7: drop a role with concurrency=0 and pending queries
DROP ROLE IF EXISTS role_concurrency_test;
-DROP
+DROP ROLE
-- start_ignore
DROP RESOURCE GROUP rg_concurrency_test;
ERROR: resource group "rg_concurrency_test" does not exist
-- end_ignore
CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=0, cpu_max_percent=20);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test;
-CREATE
+CREATE ROLE
61:SET ROLE role_concurrency_test;
SET
61&:BEGIN;
DROP ROLE role_concurrency_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg_concurrency_test;
-DROP
+DROP RESOURCE GROUP
61<: <... completed>
ERROR: role with Oid 213301 was dropped
@@ -375,7 +375,7 @@ DETAIL: Cannot execute commands anymore, please terminate this session.
-- cleanup
-- start_ignore
DROP VIEW rg_activity_status;
-DROP
+DROP VIEW
DROP ROLE role_concurrency_test;
DROP
DROP RESOURCE GROUP rg_concurrency_test;
diff --git a/src/test/isolation2/expected/resgroup/resgroup_assign_slot_fail.out b/src/test/isolation2/expected/resgroup/resgroup_assign_slot_fail.out
index a0f8f82984d..0d137952aea 100644
--- a/src/test/isolation2/expected/resgroup/resgroup_assign_slot_fail.out
+++ b/src/test/isolation2/expected/resgroup/resgroup_assign_slot_fail.out
@@ -2,15 +2,15 @@
-- test the slot will be unassigned correctly.
DROP ROLE IF EXISTS role_test;
-DROP
+DROP ROLE
-- start_ignore
DROP RESOURCE GROUP rg_test;
ERROR: resource group "rg_test" does not exist
-- end_ignore
CREATE RESOURCE GROUP rg_test WITH (concurrency=2, cpu_max_percent=10);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_test RESOURCE GROUP rg_test;
-CREATE
+CREATE ROLE
1: SET ROLE role_test;
SET
@@ -35,14 +35,14 @@ ERROR: fault triggered, fault name:'resgroup_assigned_on_master' fault type:'er
2: BEGIN;
BEGIN
1: END;
-END
+COMMIT
2: END;
-END
+COMMIT
1q: ...
2q: ...
--clean up
DROP ROLE role_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg_test;
-DROP
+DROP RESOURCE GROUP
diff --git a/src/test/isolation2/expected/resgroup/resgroup_auxiliary_tools_v1.out b/src/test/isolation2/expected/resgroup/resgroup_auxiliary_tools_v1.out
index 1120930c158..01e019377d6 100644
--- a/src/test/isolation2/expected/resgroup/resgroup_auxiliary_tools_v1.out
+++ b/src/test/isolation2/expected/resgroup/resgroup_auxiliary_tools_v1.out
@@ -75,7 +75,7 @@ def check_group_shares(name): cpu_weight = int(plpy.execute(''' SELECT value FRO
# check default groups check_group_shares('default_group') check_group_shares('admin_group') check_group_shares('system_group')
# check user groups check_group_shares('rg1_cpu_test') check_group_shares('rg2_cpu_test')
return True $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
-- check whether the queries running on the specific core set
@@ -92,7 +92,7 @@ expect_cpu = []
for token in tokens: if token.find('-') != -1: interval = token.split("-") num1 = interval[0] num2 = interval[1] for num in range(int(num1), int(num2) + 1): expect_cpu.append(str(num)) else: expect_cpu.append(token) sess_ids = get_all_sess_ids_in_group(grp)
for i in range(1000): time.sleep(0.01) if not check(expect_cpu, sess_ids): return False
return True $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
-- create a resource group that contains all the cpu cores
0: CREATE OR REPLACE FUNCTION create_allcores_group(grp TEXT) RETURNS BOOL AS $$ import subprocess
@@ -100,7 +100,7 @@ file = "/sys/fs/cgroup/cpuset/gpdb/cpuset.cpus" fd = open(file) line = fd.readli
# plpy SPI will always start a transaction, but res group cannot be created in a transaction. ret = subprocess.run(['psql', 'postgres', '-c' , '{}'.format(sql)], stdout=subprocess.PIPE) if ret.returncode != 0: plpy.error('failed to create resource group.\n {} \n {}'.format(ret.stdout, ret.stderr))
file = "/sys/fs/cgroup/cpuset/gpdb/1/cpuset.cpus" fd = open(file) line = fd.readline() fd.close() line = line.strip('\n') if line != "0": return False
return True $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
-- check whether the cpuset value in cgroup is valid according to the rule
0: CREATE OR REPLACE FUNCTION check_cpuset_rules() RETURNS BOOL AS $$ def get_all_group_which_cpuset_is_set(): sql = "select groupid,cpuset from gp_toolkit.gp_resgroup_config where cpuset != '-1'" result = plpy.execute(sql) return result
@@ -111,7 +111,7 @@ config_groups = get_all_group_which_cpuset_is_set() groups_cpuset = set([])
if not (config_cpuset.issubset(cgroup_cpuset) and cgroup_cpuset.issubset(config_cpuset)): return False
# check whether cpuset in resource group union default group is universal set default_cpuset = get_cgroup_cpuset(1) all_cpuset = get_cgroup_cpuset(0) if not (default_cpuset | groups_cpuset).issubset(all_cpuset): return False if not all_cpuset.issubset(default_cpuset | groups_cpuset): return False # if all the cores are allocated to resource group, default group must has a core left if len(default_cpuset & groups_cpuset) > 0 and (len(default_cpuset) != 1 or (not default_cpuset.issubset(all_cpuset))): return False
return True $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
0: CREATE OR REPLACE FUNCTION is_session_in_group(pid integer, groupname text) RETURNS BOOL AS $$ import subprocess
@@ -123,4 +123,4 @@ path = "/sys/fs/cgroup/cpu/gpdb/{}/cgroup.procs".format(groupid) stdout = subpro
return set(session_pids).issubset(set(cgroups_pids))
for host in hosts: if not get_result(host): return False return True
$$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
diff --git a/src/test/isolation2/expected/resgroup/resgroup_auxiliary_tools_v2.out b/src/test/isolation2/expected/resgroup/resgroup_auxiliary_tools_v2.out
index 779e4dd1bcf..1ca18c1414c 100644
--- a/src/test/isolation2/expected/resgroup/resgroup_auxiliary_tools_v2.out
+++ b/src/test/isolation2/expected/resgroup/resgroup_auxiliary_tools_v2.out
@@ -74,7 +74,7 @@ def check_group_shares(name): cpu_weight = int(plpy.execute(''' SELECT value FRO
# check default groups check_group_shares('default_group') check_group_shares('admin_group') check_group_shares('system_group')
# check user groups check_group_shares('rg1_cpu_test') check_group_shares('rg2_cpu_test')
return True $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
-- check whether the queries running on the specific core set
@@ -91,7 +91,7 @@ expect_cpu = []
for token in tokens: if token.find('-') != -1: interval = token.split("-") num1 = interval[0] num2 = interval[1] for num in range(int(num1), int(num2) + 1): expect_cpu.append(str(num)) else: expect_cpu.append(token) sess_ids = get_all_sess_ids_in_group(grp)
for i in range(1000): time.sleep(0.01) if not check(expect_cpu, sess_ids): return False
return True $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
-- create a resource group that contains all the cpu cores
0: CREATE OR REPLACE FUNCTION create_allcores_group(grp TEXT) RETURNS BOOL AS $$ import subprocess
@@ -99,7 +99,7 @@ file = "/sys/fs/cgroup/gpdb/cpuset.cpus" fd = open(file) line = fd.readline() fd
# plpy SPI will always start a transaction, but res group cannot be created in a transaction. ret = subprocess.run(['psql', 'postgres', '-c' , '{}'.format(sql)], stdout=subprocess.PIPE) if ret.returncode != 0: plpy.error('failed to create resource group.\n {} \n {}'.format(ret.stdout, ret.stderr))
file = "/sys/fs/cgroup/gpdb/1/cpuset.cpus" fd = open(file) line = fd.readline() fd.close() line = line.strip('\n') if line != "0": return False
return True $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
-- check whether the cpuset value in cgroup is valid according to the rule
0: CREATE OR REPLACE FUNCTION check_cpuset_rules() RETURNS BOOL AS $$ def get_all_group_which_cpuset_is_set(): sql = "select groupid,cpuset from gp_toolkit.gp_resgroup_config where cpuset != '-1'" result = plpy.execute(sql) return result
@@ -110,7 +110,7 @@ config_groups = get_all_group_which_cpuset_is_set() groups_cpuset = set([])
if not (config_cpuset.issubset(cgroup_cpuset) and cgroup_cpuset.issubset(config_cpuset)): return False
# check whether cpuset in resource group union default group is universal set default_cpuset = get_cgroup_cpuset(1) all_cpuset = get_cgroup_cpuset(0) if not (default_cpuset | groups_cpuset).issubset(all_cpuset): return False if not all_cpuset.issubset(default_cpuset | groups_cpuset): return False # if all the cores are allocated to resource group, default group must has a core left if len(default_cpuset & groups_cpuset) > 0 and (len(default_cpuset) != 1 or (not default_cpuset.issubset(all_cpuset))): return False
return True $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
0: CREATE OR REPLACE FUNCTION is_session_in_group(pid integer, groupname text) RETURNS BOOL AS $$ import subprocess
@@ -122,26 +122,4 @@ path = "/sys/fs/cgroup/gpdb/{}/cgroup.procs".format(groupid) stdout = subprocess
return set(session_pids).issubset(set(cgroups_pids))
for host in hosts: if not get_result(host): return False return True
$$ LANGUAGE plpython3u;
-CREATE
-
-0: CREATE OR REPLACE FUNCTION check_cgroup_io_max(groupname text, tablespace_name text, parameters text) RETURNS BOOL AS $$ import ctypes import os
-postgres = ctypes.CDLL(None) get_bdi_of_path = postgres['get_bdi_of_path'] get_tablespace_path = postgres['get_tablespace_path'] get_tablespace_oid = postgres['get_tablespace_oid']
-# get group oid sql = "select groupid from gp_toolkit.gp_resgroup_config where groupname = '%s'" % groupname result = plpy.execute(sql) groupid = result[0]['groupid']
-cgroup_path = "/sys/fs/cgroup/gpdb/%d" % groupid
-# get path of tablespace spcoid = get_tablespace_oid(tablespace_name.encode('utf-8'), False) location = ctypes.cast(get_tablespace_path(spcoid), ctypes.c_char_p).value
-if location == "": return False
-bdi = get_bdi_of_path(location) major = os.major(bdi) minor = os.minor(bdi)
-match_string = "{}:{} {}".format(major, minor, parameters) match = False with open(os.path.join(cgroup_path, "io.max")) as f: for line in f.readlines(): line = line.strip() if match_string == line: match = True break
-return match
-$$ LANGUAGE plpython3u;
-CREATE
-
-0: CREATE OR REPLACE FUNCTION mkdir(dirname text) RETURNS BOOL AS $$ import os
-if os.path.exists(dirname): return True
-try: os.makedirs(dirname) except Exception as e: plpy.error("cannot create dir {}".format(e)) else: return True $$ LANGUAGE plpython3u;
-CREATE
-
-0: CREATE OR REPLACE FUNCTION rmdir(dirname text) RETURNS BOOL AS $$ import shutil import os
-if not os.path.exists(dirname): return True
-try: shutil.rmtree(dirname) except Exception as e: plpy.error("cannot remove dir {}".format(e)) else: return True $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
diff --git a/src/test/isolation2/expected/resgroup/resgroup_bypass.out b/src/test/isolation2/expected/resgroup/resgroup_bypass.out
index 5cff41d745f..a9c294edb2c 100644
--- a/src/test/isolation2/expected/resgroup/resgroup_bypass.out
+++ b/src/test/isolation2/expected/resgroup/resgroup_bypass.out
@@ -11,14 +11,20 @@ ERROR: resource group "rg_bypass" does not exist
-- create a resource group with concurrency = 1.
CREATE RESOURCE GROUP rg_bypass WITH(cpu_max_percent=20, concurrency=1);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_bypass RESOURCE GROUP rg_bypass;
-CREATE
+CREATE ROLE
SET ROLE role_bypass;
SET
CREATE TABLE t_bypass(a int) distributed by (a);
-CREATE
+CREATE TABLE
+
+-- gp_resource_group_bypass can only be set by super user
+-- below set statement will error out
+set gp_resource_group_bypass = 1;
+ERROR: permission denied to set parameter "gp_resource_group_bypass"
+
RESET ROLE;
RESET
@@ -164,7 +170,7 @@ SELECT gp_inject_fault('func_init_plan_end', 'reset', 1);
(1 row)
1<: <... completed>
-INSERT 1
+INSERT 0 1
1q: ...
2q: ...
@@ -221,7 +227,7 @@ SELECT gp_inject_fault('func_init_plan_end', 'reset', 1);
-- min_cost will not work for above.
-- alter resource group's min_cost
ALTER RESOURCE GROUP rg_bypass SET min_cost 500;
-ALTER
+ALTER RESOURCE GROUP
ANALYZE t_bypass;
ANALYZE
-- Session1: for quries with cost under the min_cost limit, they will be unassigned and bypassed.
@@ -279,9 +285,9 @@ SELECT gp_inject_fault('func_init_plan_end', 'reset', 1);
-- cleanup
-- start_ignore
DROP TABLE t_bypass;
-DROP
+DROP TABLE
DROP ROLE role_bypass;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg_bypass;
-DROP
+DROP RESOURCE GROUP
-- end_ignore
diff --git a/src/test/isolation2/expected/resgroup/resgroup_bypass_catalog.out b/src/test/isolation2/expected/resgroup/resgroup_bypass_catalog.out
index f32527713f7..2299c072a5c 100644
--- a/src/test/isolation2/expected/resgroup/resgroup_bypass_catalog.out
+++ b/src/test/isolation2/expected/resgroup/resgroup_bypass_catalog.out
@@ -1,10 +1,10 @@
CREATE RESOURCE GROUP rg_test_catalog WITH (CONCURRENCY=2, CPU_MAX_PERCENT=10);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_test_catalog RESOURCE GROUP rg_test_catalog;
-CREATE
+CREATE ROLE
CREATE FUNCTION rg_test_udf() RETURNS integer AS $$ return 1 $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
-- take 1 slot
1: SET ROLE role_test_catalog;
diff --git a/src/test/isolation2/expected/resgroup/resgroup_cancel_terminate_concurrency.out b/src/test/isolation2/expected/resgroup/resgroup_cancel_terminate_concurrency.out
index e1c3c77403c..25e5363080b 100644
--- a/src/test/isolation2/expected/resgroup/resgroup_cancel_terminate_concurrency.out
+++ b/src/test/isolation2/expected/resgroup/resgroup_cancel_terminate_concurrency.out
@@ -1,18 +1,18 @@
-- test1: cancel a query that is waiting for a slot
DROP ROLE IF EXISTS role_concurrency_test;
-DROP
+DROP ROLE
-- start_ignore
DROP RESOURCE GROUP rg_concurrency_test;
ERROR: resource group "rg_concurrency_test" does not exist
-- end_ignore
CREATE OR REPLACE VIEW rg_concurrency_view AS SELECT wait_event_type IS NOT NULL as waiting, wait_event_type, state, query, rsgname FROM pg_stat_activity WHERE rsgname='rg_concurrency_test';
-CREATE
+CREATE VIEW
CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=1, cpu_max_percent=20);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test;
-CREATE
+CREATE ROLE
1:SET ROLE role_concurrency_test;
SET
1:BEGIN;
@@ -37,7 +37,7 @@ SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE wait_event_type='Resou
t
(2 rows)
1:END;
-END
+COMMIT
2<: <... completed>
ERROR: canceling statement due to user request
3<: <... completed>
@@ -52,22 +52,22 @@ SELECT * FROM rg_concurrency_view;
2q: ...
3q: ...
DROP ROLE role_concurrency_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg_concurrency_test;
-DROP
+DROP RESOURCE GROUP
-- test2: terminate a query that is waiting for a slot
DROP ROLE IF EXISTS role_concurrency_test;
-DROP
+DROP ROLE
-- start_ignore
DROP RESOURCE GROUP rg_concurrency_test;
ERROR: resource group "rg_concurrency_test" does not exist
-- end_ignore
CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=1, cpu_max_percent=20);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test;
-CREATE
+CREATE ROLE
1:SET ROLE role_concurrency_test;
SET
1:BEGIN;
@@ -92,7 +92,7 @@ SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE wait_event_type='Re
t
(2 rows)
1:END;
-END
+COMMIT
2<: <... completed>
FATAL: terminating connection due to administrator command
server closed the connection unexpectedly
@@ -111,22 +111,22 @@ SELECT * FROM rg_concurrency_view;
2q: ...
3q: ...
DROP ROLE role_concurrency_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg_concurrency_test;
-DROP
+DROP RESOURCE GROUP
-- test3: cancel a query that is running
DROP ROLE IF EXISTS role_concurrency_test;
-DROP
+DROP ROLE
-- start_ignore
DROP RESOURCE GROUP rg_concurrency_test;
ERROR: resource group "rg_concurrency_test" does not exist
-- end_ignore
CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=2, cpu_max_percent=20);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test;
-CREATE
+CREATE ROLE
1:SET ROLE role_concurrency_test;
SET
1&:SELECT pg_sleep(10000);
@@ -172,22 +172,22 @@ SELECT * FROM rg_concurrency_view;
6q: ...
7q: ...
DROP ROLE role_concurrency_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg_concurrency_test;
-DROP
+DROP RESOURCE GROUP
-- test4: terminate a query that is running
DROP ROLE IF EXISTS role_concurrency_test;
-DROP
+DROP ROLE
-- start_ignore
DROP RESOURCE GROUP rg_concurrency_test;
ERROR: resource group "rg_concurrency_test" does not exist
-- end_ignore
CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=2, cpu_max_percent=20);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test;
-CREATE
+CREATE ROLE
1:SET ROLE role_concurrency_test;
SET
1&:SELECT pg_sleep(10000);
@@ -239,26 +239,26 @@ SELECT * FROM rg_concurrency_view;
6q: ...
7q: ...
DROP ROLE role_concurrency_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg_concurrency_test;
-DROP
+DROP RESOURCE GROUP
-- test5: terminate a query waiting for a slot, that opens a transaction on exit callback
DROP ROLE IF EXISTS role_concurrency_test;
-DROP
+DROP ROLE
-- start_ignore
DROP RESOURCE GROUP rg_concurrency_test;
ERROR: resource group "rg_concurrency_test" does not exist
-- end_ignore
CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=1, cpu_max_percent=20);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test;
-CREATE
+CREATE ROLE
1:SET ROLE role_concurrency_test;
SET
1:CREATE TEMP TABLE tmp(a INT);
-CREATE
+CREATE TABLE
2:SET ROLE role_concurrency_test;
SET
2:BEGIN;
@@ -292,9 +292,9 @@ SELECT * FROM rg_concurrency_view;
1q: ...
2q: ...
DROP ROLE role_concurrency_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg_concurrency_test;
-DROP
+DROP RESOURCE GROUP
DROP VIEW rg_concurrency_view;
-DROP
+DROP VIEW
diff --git a/src/test/isolation2/expected/resgroup/resgroup_concurrency.out b/src/test/isolation2/expected/resgroup/resgroup_concurrency.out
index dda3764ba3d..556d8a757b4 100644
--- a/src/test/isolation2/expected/resgroup/resgroup_concurrency.out
+++ b/src/test/isolation2/expected/resgroup/resgroup_concurrency.out
@@ -1,15 +1,15 @@
-- test1: test gp_toolkit.gp_resgroup_status and pg_stat_activity
-- create a resource group when gp_resource_manager is queue
DROP ROLE IF EXISTS role_concurrency_test;
-DROP
+DROP ROLE
-- start_ignore
DROP RESOURCE GROUP rg_concurrency_test;
ERROR: resource group "rg_concurrency_test" does not exist
-- end_ignore
CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=2, cpu_max_percent=20);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test;
-CREATE
+CREATE ROLE
-- no query has been assigned to the this group
@@ -44,13 +44,13 @@ SELECT wait_event from pg_stat_activity where query = 'BEGIN;' and state = 'acti
rg_concurrency_test
(1 row)
2:END;
-END
+COMMIT
3:END;
-END
+COMMIT
4<: <... completed>
BEGIN
4:END;
-END
+COMMIT
2q: ...
3q: ...
4q: ...
@@ -60,24 +60,24 @@ SELECT r.rsgname, num_running, num_queueing, num_queued, num_executed FROM gp_to
rg_concurrency_test | 0 | 0 | 1 | 3
(1 row)
DROP ROLE role_concurrency_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg_concurrency_test;
-DROP
+DROP RESOURCE GROUP
-- test2: test alter concurrency
-- Create a resource group with concurrency=2. Prepare 2 running transactions and 1 queueing transactions.
-- Alter concurrency 2->3, the queueing transaction will be woken up, the 'value' of pg_resgroupcapability
-- will be set to 3.
DROP ROLE IF EXISTS role_concurrency_test;
-DROP
+DROP ROLE
-- start_ignore
DROP RESOURCE GROUP rg_concurrency_test;
ERROR: resource group "rg_concurrency_test" does not exist
-- end_ignore
CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=2, cpu_max_percent=20);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test;
-CREATE
+CREATE ROLE
12:SET ROLE role_concurrency_test;
SET
12:BEGIN;
@@ -101,7 +101,7 @@ SELECT concurrency FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_concur
2
(1 row)
ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 3;
-ALTER
+ALTER RESOURCE GROUP
SELECT r.rsgname, num_running, num_queueing, num_queued, num_executed FROM gp_toolkit.gp_resgroup_status s, pg_resgroup r WHERE s.groupid=r.oid AND r.rsgname='rg_concurrency_test';
rsgname | num_running | num_queueing | num_queued | num_executed
---------------------+-------------+--------------+------------+--------------
@@ -113,33 +113,33 @@ SELECT concurrency FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_concur
3
(1 row)
12:END;
-END
+COMMIT
13:END;
-END
+COMMIT
14<: <... completed>
BEGIN
14:END;
-END
+COMMIT
12q: ...
13q: ...
14q: ...
DROP ROLE role_concurrency_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg_concurrency_test;
-DROP
+DROP RESOURCE GROUP
-- test3: test alter concurrency
-- Create a resource group with concurrency=3. Prepare 3 running transactions, and 1 queueing transaction.
DROP ROLE IF EXISTS role_concurrency_test;
-DROP
+DROP ROLE
-- start_ignore
DROP RESOURCE GROUP rg_concurrency_test;
ERROR: resource group "rg_concurrency_test" does not exist
-- end_ignore
CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=3, cpu_max_percent=20);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test;
-CREATE
+CREATE ROLE
22:SET ROLE role_concurrency_test;
SET
22:BEGIN;
@@ -167,7 +167,7 @@ SELECT concurrency FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_concur
(1 row)
-- Alter concurrency 3->2, the 'value' of pg_resgroupcapability will be set to 2.
ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 2;
-ALTER
+ALTER RESOURCE GROUP
SELECT concurrency FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_concurrency_test';
concurrency
-------------
@@ -175,7 +175,7 @@ SELECT concurrency FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_concur
(1 row)
-- When one transaction is finished, queueing transaction won't be woken up. There're 2 running transactions and 1 queueing transaction.
24:END;
-END
+COMMIT
SELECT r.rsgname, num_running, num_queueing, num_queued, num_executed FROM gp_toolkit.gp_resgroup_status s, pg_resgroup r WHERE s.groupid=r.oid AND r.rsgname='rg_concurrency_test';
rsgname | num_running | num_queueing | num_queued | num_executed
---------------------+-------------+--------------+------------+--------------
@@ -190,7 +190,7 @@ SELECT r.rsgname, num_running, num_queueing, num_queued, num_executed FROM gp_to
(1 row)
-- Finish another transaction, one queueing transaction will be woken up, there're 2 running transactions and 1 queueing transaction.
22:END;
-END
+COMMIT
SELECT r.rsgname, num_running, num_queueing, num_queued, num_executed FROM gp_toolkit.gp_resgroup_status s, pg_resgroup r WHERE s.groupid=r.oid AND r.rsgname='rg_concurrency_test';
rsgname | num_running | num_queueing | num_queued | num_executed
---------------------+-------------+--------------+------------+--------------
@@ -198,7 +198,7 @@ SELECT r.rsgname, num_running, num_queueing, num_queued, num_executed FROM gp_to
(1 row)
-- Alter concurrency 2->2, the 'value' of pg_resgroupcapability will be set to 2.
ALTER RESOURCE GROUP rg_concurrency_test SET CONCURRENCY 2;
-ALTER
+ALTER RESOURCE GROUP
SELECT concurrency FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_concurrency_test';
concurrency
-------------
@@ -206,7 +206,7 @@ SELECT concurrency FROM gp_toolkit.gp_resgroup_config WHERE groupname='rg_concur
(1 row)
-- Finish another transaction, one queueing transaction will be woken up, there're 2 running transactions and 0 queueing transaction.
23:END;
-END
+COMMIT
SELECT r.rsgname, num_running, num_queueing, num_queued, num_executed FROM gp_toolkit.gp_resgroup_status s, pg_resgroup r WHERE s.groupid=r.oid AND r.rsgname='rg_concurrency_test';
rsgname | num_running | num_queueing | num_queued | num_executed
---------------------+-------------+--------------+------------+--------------
@@ -217,30 +217,30 @@ BEGIN
25<: <... completed>
BEGIN
25:END;
-END
+COMMIT
24:END;
-END
+COMMIT
22q: ...
23q: ...
24q: ...
25q: ...
DROP ROLE role_concurrency_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg_concurrency_test;
-DROP
+DROP RESOURCE GROUP
-- test4: concurrently drop resource group
DROP ROLE IF EXISTS role_concurrency_test;
-DROP
+DROP ROLE
-- start_ignore
DROP RESOURCE GROUP rg_concurrency_test;
ERROR: resource group "rg_concurrency_test" does not exist
-- end_ignore
CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=2, cpu_max_percent=20);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test;
-CREATE
+CREATE ROLE
-- DROP should fail if there're running transactions
32:SET ROLE role_concurrency_test;
@@ -248,32 +248,32 @@ SET
32:BEGIN;
BEGIN
DROP ROLE role_concurrency_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg_concurrency_test;
ERROR: cannot drop resource group "rg_concurrency_test"
HINT: The resource group is currently managing 1 query(ies) and cannot be dropped.
Terminate the queries first or try dropping the group later.
The view pg_stat_activity tracks the queries managed by resource groups.
32:END;
-END
+COMMIT
DROP ROLE IF EXISTS role_concurrency_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg_concurrency_test;
-DROP
+DROP RESOURCE GROUP
-- test5: QD exit before QE
DROP ROLE IF EXISTS role_concurrency_test;
-DROP
+DROP ROLE
-- start_ignore
DROP RESOURCE GROUP rg_concurrency_test;
ERROR: resource group "rg_concurrency_test" does not exist
-- end_ignore
CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=1, cpu_max_percent=20);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test;
-CREATE
+CREATE ROLE
51:SET ROLE role_concurrency_test;
SET
51:BEGIN;
@@ -299,22 +299,22 @@ ERROR: canceling statement due to user request
51q: ...
52q: ...
DROP ROLE role_concurrency_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg_concurrency_test;
-DROP
+DROP RESOURCE GROUP
-- test6: cancel a query that is waiting for a slot
DROP ROLE IF EXISTS role_concurrency_test;
-DROP
+DROP ROLE
-- start_ignore
DROP RESOURCE GROUP rg_concurrency_test;
ERROR: resource group "rg_concurrency_test" does not exist
-- end_ignore
CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=1, cpu_max_percent=20);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test;
-CREATE
+CREATE ROLE
51:SET ROLE role_concurrency_test;
SET
51:BEGIN;
@@ -327,23 +327,23 @@ SET
BEGIN
52q: ...
DROP ROLE role_concurrency_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg_concurrency_test;
-DROP
+DROP RESOURCE GROUP
-- test7: terminate a query that is waiting for a slot
DROP ROLE IF EXISTS role_concurrency_test;
-DROP
+DROP ROLE
-- start_ignore
DROP RESOURCE GROUP rg_concurrency_test;
ERROR: resource group "rg_concurrency_test" does not exist
-- end_ignore
CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=1, cpu_max_percent=20);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test;
-CREATE
+CREATE ROLE
61:SET ROLE role_concurrency_test;
SET
61:BEGIN;
@@ -364,22 +364,22 @@ server closed the connection unexpectedly
61q: ...
62q: ...
DROP ROLE role_concurrency_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg_concurrency_test;
-DROP
+DROP RESOURCE GROUP
-- test8: create a resgroup with concurrency=0
DROP ROLE IF EXISTS role_concurrency_test;
-DROP
+DROP ROLE
-- start_ignore
DROP RESOURCE GROUP rg_concurrency_test;
ERROR: resource group "rg_concurrency_test" does not exist
-- end_ignore
CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=0, cpu_max_percent=20);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test;
-CREATE
+CREATE ROLE
61:SET ROLE role_concurrency_test;
SET
61&:BEGIN;
@@ -392,34 +392,34 @@ SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE wait_event_type='Resou
ERROR: canceling statement due to user request
61q: ...
DROP ROLE role_concurrency_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg_concurrency_test;
-DROP
+DROP RESOURCE GROUP
--
-- Test cursors, pl/* functions only take one slot.
--
-- set concurrency to 1
CREATE RESOURCE GROUP rg_concurrency_test WITH (concurrency=1, cpu_max_percent=20);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_concurrency_test RESOURCE GROUP rg_concurrency_test;
-CREATE
+CREATE ROLE
-- declare cursors and verify that it only takes one resource group slot
71:SET ROLE TO role_concurrency_test;
SET
71:CREATE TABLE foo_concurrency_test as select i as c1 , i as c2 from generate_series(1, 1000) i;
-CREATE 1000
+SELECT 1000
71:CREATE TABLE bar_concurrency_test as select i as c1 , i as c2 from generate_series(1, 1000) i;
-CREATE 1000
+SELECT 1000
71:BEGIN;
BEGIN
71:DECLARE c1 CURSOR for select c1, c2 from foo_concurrency_test order by c1 limit 10;
-DECLARE
+DECLARE CURSOR
71:DECLARE c2 CURSOR for select c1, c2 from bar_concurrency_test order by c1 limit 10;
-DECLARE
+DECLARE CURSOR
71:DECLARE c3 CURSOR for select count(*) from foo_concurrency_test t1, bar_concurrency_test t2 where t1.c2 = t2.c2;
-DECLARE
+DECLARE CURSOR
71:Fetch ALL FROM c1;
c1 | c2
----+----
@@ -454,11 +454,11 @@ DECLARE
1000
(1 row)
71:END;
-END
+COMMIT
-- create a pl function and verify that it only takes one resource group slot.
CREATE OR REPLACE FUNCTION func_concurrency_test () RETURNS integer as /*in func*/ $$ /*in func*/ DECLARE /*in func*/ tmprecord RECORD; /*in func*/ ret integer; /*in func*/ BEGIN /*in func*/ SELECT count(*) INTO ret FROM foo_concurrency_test; /*in func*/ FOR tmprecord IN SELECT * FROM bar_concurrency_test LOOP /*in func*/ SELECT count(*) INTO ret FROM foo_concurrency_test; /*in func*/ END LOOP; /*in func*/ /*in func*/ select 1/0; /*in func*/ EXCEPTION /*in func*/ WHEN division_by_zero THEN /*in func*/ SELECT count(*) INTO ret FROM foo_concurrency_test; /*in func*/ raise NOTICE 'divided by zero'; /*in func*/ RETURN ret; /*in func*/ END; /*in func*/ $$ /*in func*/ LANGUAGE plpgsql;
-CREATE
+CREATE FUNCTION
71: select func_concurrency_test();
func_concurrency_test
@@ -484,7 +484,7 @@ PREPARE
2 | 2
(1 row)
71:END;
-END
+COMMIT
71:PREPARE p3 (integer) as select * from foo_concurrency_test where c2=$1;
PREPARE
71:PREPARE p4 (integer) as select * from bar_concurrency_test where c2=$1;
@@ -501,10 +501,10 @@ PREPARE
(1 row)
DROP TABLE foo_concurrency_test;
-DROP
+DROP TABLE
DROP TABLE bar_concurrency_test;
-DROP
+DROP TABLE
DROP ROLE role_concurrency_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg_concurrency_test;
-DROP
+DROP RESOURCE GROUP
diff --git a/src/test/isolation2/expected/resgroup/resgroup_cpu_max_percent.out b/src/test/isolation2/expected/resgroup/resgroup_cpu_max_percent.out
index ae21059fcb6..9c5c2d47741 100644
--- a/src/test/isolation2/expected/resgroup/resgroup_cpu_max_percent.out
+++ b/src/test/isolation2/expected/resgroup/resgroup_cpu_max_percent.out
@@ -1,17 +1,17 @@
-- start_ignore
DROP VIEW IF EXISTS cancel_all;
-DROP
+DROP VIEW
DROP ROLE IF EXISTS role1_cpu_test;
-DROP
+DROP ROLE
DROP ROLE IF EXISTS role2_cpu_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg1_cpu_test;
ERROR: resource group "rg1_cpu_test" does not exist
DROP RESOURCE GROUP rg2_cpu_test;
ERROR: resource group "rg2_cpu_test" does not exist
CREATE LANGUAGE plpython3u;
-CREATE
+ERROR: language "plpython3u" already exists
-- end_ignore
--
@@ -19,16 +19,16 @@ CREATE
--
DROP TABLE IF EXISTS cpu_usage_samples;
-DROP
+DROP TABLE
CREATE TABLE cpu_usage_samples (sample text);
-CREATE
+CREATE TABLE
-- fetch_sample: select cpu_usage from gp_toolkit.gp_resgroup_status
-- and dump them into text in json format then save them in db for
-- further analysis.
CREATE OR REPLACE FUNCTION fetch_sample() RETURNS text AS $$ import json
group_cpus = plpy.execute(''' SELECT groupname, cpu_usage FROM gp_toolkit.gp_resgroup_status_per_host ''') plpy.notice(group_cpus) json_text = json.dumps(dict([(row['groupname'], float(row['cpu_usage'])) for row in group_cpus])) plpy.execute(''' INSERT INTO cpu_usage_samples VALUES ('{value}') '''.format(value=json_text)) return json_text $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
-- verify_cpu_usage: calculate each QE's average cpu usage using all the data in
-- the table cpu_usage_sample. And compare the average value to the expected value.
@@ -36,22 +36,22 @@ CREATE
CREATE OR REPLACE FUNCTION verify_cpu_usage(groupname TEXT, expect_cpu_usage INT, err_rate INT) RETURNS BOOL AS $$ import json import functools
all_info = plpy.execute(''' SELECT sample::json->'{name}' AS cpu FROM cpu_usage_samples '''.format(name=groupname)) usage = float(all_info[0]['cpu'])
return abs(usage - expect_cpu_usage) <= err_rate $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
CREATE OR REPLACE FUNCTION busy() RETURNS void AS $$ import os import signal
n = 15 for i in range(n): if os.fork() == 0: # children must quit without invoking the atexit hooks signal.signal(signal.SIGINT, lambda a, b: os._exit(0)) signal.signal(signal.SIGQUIT, lambda a, b: os._exit(0)) signal.signal(signal.SIGTERM, lambda a, b: os._exit(0))
# generate pure cpu load while True: pass
os.wait() $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
CREATE VIEW cancel_all AS SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE query LIKE 'SELECT * FROM % WHERE busy%';
-CREATE
+CREATE VIEW
-- create two resource groups
CREATE RESOURCE GROUP rg1_cpu_test WITH (concurrency=5, cpu_max_percent=-1, cpu_weight=100);
-CREATE
+CREATE RESOURCE GROUP
CREATE RESOURCE GROUP rg2_cpu_test WITH (concurrency=5, cpu_max_percent=-1, cpu_weight=200);
-CREATE
+CREATE RESOURCE GROUP
--
-- check gpdb cgroup configuration
@@ -65,13 +65,13 @@ select check_cgroup_configuration();
-- lower admin_group's cpu_max_percent to minimize its side effect
ALTER RESOURCE GROUP admin_group SET cpu_max_percent 1;
-ALTER
+ALTER RESOURCE GROUP
-- create two roles and assign them to above groups
CREATE ROLE role1_cpu_test RESOURCE GROUP rg1_cpu_test;
-CREATE
+CREATE ROLE
CREATE ROLE role2_cpu_test RESOURCE GROUP rg2_cpu_test;
-CREATE
+CREATE ROLE
GRANT ALL ON FUNCTION busy() TO role1_cpu_test;
GRANT
GRANT ALL ON FUNCTION busy() TO role2_cpu_test;
@@ -115,7 +115,7 @@ SET
-- start_ignore
-- Gather CPU usage statistics into cpu_usage_samples
TRUNCATE TABLE cpu_usage_samples;
-TRUNCATE
+TRUNCATE TABLE
SELECT fetch_sample();
fetch_sample
---------------------------------------------------------------------------------------------------------------
@@ -167,7 +167,7 @@ SELECT pg_sleep(1.7);
(1 row)
TRUNCATE TABLE cpu_usage_samples;
-TRUNCATE
+TRUNCATE TABLE
SELECT fetch_sample();
fetch_sample
---------------------------------------------------------------------------------------------------------------
@@ -290,7 +290,7 @@ SET
-- start_ignore
TRUNCATE TABLE cpu_usage_samples;
-TRUNCATE
+TRUNCATE TABLE
SELECT fetch_sample();
fetch_sample
----------------------------------------------------------------------------------------------------------------
@@ -342,7 +342,7 @@ SELECT pg_sleep(1.7);
(1 row)
TRUNCATE TABLE cpu_usage_samples;
-TRUNCATE
+TRUNCATE TABLE
SELECT fetch_sample();
fetch_sample
----------------------------------------------------------------------------------------------------------------
@@ -462,9 +462,9 @@ ERROR: canceling statement due to user request
-- Test cpu max percent
ALTER RESOURCE GROUP rg1_cpu_test set cpu_max_percent 10;
-ALTER
+ALTER RESOURCE GROUP
ALTER RESOURCE GROUP rg2_cpu_test set cpu_max_percent 20;
-ALTER
+ALTER RESOURCE GROUP
-- prepare parallel queries in the two groups
10: SET ROLE TO role1_cpu_test;
@@ -509,7 +509,7 @@ SET
-- start_ignore
1:TRUNCATE TABLE cpu_usage_samples;
-TRUNCATE
+TRUNCATE TABLE
1:SELECT fetch_sample();
fetch_sample
---------------------------------------------------------------------------------------------------------------
@@ -685,7 +685,7 @@ SET
-- start_ignore
1:TRUNCATE TABLE cpu_usage_samples;
-TRUNCATE
+TRUNCATE TABLE
1:SELECT fetch_sample();
fetch_sample
----------------------------------------------------------------------------------------------------------------
@@ -737,7 +737,7 @@ TRUNCATE
(1 row)
1:TRUNCATE TABLE cpu_usage_samples;
-TRUNCATE
+TRUNCATE TABLE
1:SELECT fetch_sample();
fetch_sample
---------------------------------------------------------------------------------------------------------------
@@ -857,7 +857,7 @@ ERROR: canceling statement due to user request
-- restore admin_group's cpu_max_percent
2:ALTER RESOURCE GROUP admin_group SET cpu_max_percent 10;
-ALTER
+ALTER RESOURCE GROUP
-- cleanup
2:REVOKE ALL ON FUNCTION busy() FROM role1_cpu_test;
@@ -865,10 +865,10 @@ REVOKE
2:REVOKE ALL ON FUNCTION busy() FROM role2_cpu_test;
REVOKE
2:DROP ROLE role1_cpu_test;
-DROP
+DROP ROLE
2:DROP ROLE role2_cpu_test;
-DROP
+DROP ROLE
2:DROP RESOURCE GROUP rg1_cpu_test;
-DROP
+DROP RESOURCE GROUP
2:DROP RESOURCE GROUP rg2_cpu_test;
-DROP
+DROP RESOURCE GROUP
diff --git a/src/test/isolation2/expected/resgroup/resgroup_cpuset.out b/src/test/isolation2/expected/resgroup/resgroup_cpuset.out
index 0ff087de3a6..6969d621422 100644
--- a/src/test/isolation2/expected/resgroup/resgroup_cpuset.out
+++ b/src/test/isolation2/expected/resgroup/resgroup_cpuset.out
@@ -1,30 +1,30 @@
-- start_ignore
DROP VIEW IF EXISTS busy;
-DROP
+DROP VIEW
DROP VIEW IF EXISTS cancel_all;
-DROP
+DROP VIEW
DROP TABLE IF EXISTS bigtable;
-DROP
+DROP TABLE
CREATE LANGUAGE plpython3u;
CREATE
-- end_ignore
CREATE TABLE bigtable AS SELECT i AS c1, 'abc' AS c2 FROM generate_series(1,50000) i;
-CREATE 50000
+SELECT 50000
CREATE OR REPLACE FUNCTION get_cpu_cores() RETURNS INTEGER AS $$ import os return os.cpu_count() $$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
-CREATE VIEW busy AS SELECT count(*) FROM bigtable t1, bigtable t2, bigtable t3, bigtable t4, bigtable t5 WHERE 0 != (t1.c1 % 2 + 10000) AND 0 != (t2.c1 % 2 + 10000) AND 0 != (t3.c1 % 2 + 10000) AND 0 != (t4.c1 % 2 + 10000) AND 0 != (t5.c1 % 2 + 10000) ;
-CREATE
+CREATE VIEW busy AS SELECT count(*) FROM bigtable t1, bigtable t2, bigtable t3, bigtable t4, bigtable t5 WHERE 0 = (t1.c1 % 2 + 10000)! AND 0 = (t2.c1 % 2 + 10000)! AND 0 = (t3.c1 % 2 + 10000)! AND 0 = (t4.c1 % 2 + 10000)! AND 0 = (t5.c1 % 2 + 10000)! ;
+CREATE VIEW
CREATE VIEW cancel_all AS SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE query LIKE 'SELECT * FROM busy%';
-CREATE
+CREATE VIEW
CREATE RESOURCE GROUP rg1_cpuset_test WITH (cpuset='0');
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role1_cpuset_test RESOURCE GROUP rg1_cpuset_test;
-CREATE
+CREATE ROLE
GRANT ALL ON busy TO role1_cpuset_test;
GRANT
@@ -52,7 +52,7 @@ BEGIN
(1 row)
ALTER RESOURCE GROUP rg1_cpuset_test SET cpuset '1';
-ALTER
+ALTER RESOURCE GROUP
select pg_sleep(2);
pg_sleep
----------
@@ -66,7 +66,7 @@ select pg_sleep(2);
(1 row)
ALTER RESOURCE GROUP rg1_cpuset_test SET cpuset '0,1';
-ALTER
+ALTER RESOURCE GROUP
select pg_sleep(2);
pg_sleep
----------
@@ -79,11 +79,11 @@ select pg_sleep(2);
t
(1 row)
11: END;
-END
+COMMIT
-- change to cpu_max_percent while the transaction is running
ALTER RESOURCE GROUP rg1_cpuset_test SET cpu_max_percent 70;
-ALTER
+ALTER RESOURCE GROUP
-- cancel the transaction
-- start_ignore
@@ -180,49 +180,49 @@ SELECT check_cpuset_rules();
t
(1 row)
CREATE RESOURCE GROUP rg1_test_group WITH (cpuset='0');
-CREATE
+CREATE RESOURCE GROUP
SELECT check_cpuset_rules();
check_cpuset_rules
--------------------
t
(1 row)
CREATE RESOURCE GROUP rg2_test_group WITH (cpuset='1');
-CREATE
+CREATE RESOURCE GROUP
SELECT check_cpuset_rules();
check_cpuset_rules
--------------------
t
(1 row)
ALTER RESOURCE GROUP rg1_test_group SET cpu_max_percent 1;
-ALTER
+ALTER RESOURCE GROUP
SELECT check_cpuset_rules();
check_cpuset_rules
--------------------
t
(1 row)
ALTER RESOURCE GROUP rg1_test_group SET cpuset '0';
-ALTER
+ALTER RESOURCE GROUP
SELECT check_cpuset_rules();
check_cpuset_rules
--------------------
t
(1 row)
ALTER RESOURCE GROUP rg1_test_group SET cpu_max_percent 1;
-ALTER
+ALTER RESOURCE GROUP
SELECT check_cpuset_rules();
check_cpuset_rules
--------------------
t
(1 row)
DROP RESOURCE GROUP rg1_test_group;
-DROP
+DROP RESOURCE GROUP
SELECT check_cpuset_rules();
check_cpuset_rules
--------------------
t
(1 row)
DROP RESOURCE GROUP rg2_test_group;
-DROP
+DROP RESOURCE GROUP
SELECT check_cpuset_rules();
check_cpuset_rules
--------------------
@@ -241,7 +241,7 @@ SELECT check_cpuset_rules();
t
(1 row)
DROP RESOURCE GROUP rg1_test_group;
-DROP
+DROP RESOURCE GROUP
SELECT check_cpuset_rules();
check_cpuset_rules
--------------------
@@ -277,9 +277,9 @@ ERROR: resource group "rg1_test_group" does not exist
-- test segment/master cpuset
CREATE RESOURCE GROUP rg_multi_cpuset1 WITH (concurrency=2, cpuset='0;0');
-CREATE
+CREATE RESOURCE GROUP
ALTER RESOURCE GROUP rg_multi_cpuset1 set CPUSET '1;1';
-ALTER
+ALTER RESOURCE GROUP
select groupname,cpuset from gp_toolkit.gp_resgroup_config where groupname='rg_multi_cpuset1';
groupname | cpuset
------------------+--------
@@ -287,23 +287,23 @@ select groupname,cpuset from gp_toolkit.gp_resgroup_config where groupname='rg_m
(1 row)
DROP RESOURCE GROUP rg_multi_cpuset1;
-DROP
+DROP RESOURCE GROUP
REVOKE ALL ON busy FROM role1_cpuset_test;
REVOKE
DROP ROLE role1_cpuset_test;
-DROP
+DROP ROLE
DROP RESOURCE GROUP rg1_cpuset_test;
-DROP
+DROP RESOURCE GROUP
DROP FUNCTION check_cpuset_rules();
-DROP
+DROP FUNCTION
DROP FUNCTION check_cpuset(TEXT, TEXT);
-DROP
+DROP FUNCTION
DROP FUNCTION create_allcores_group(TEXT);
-DROP
+DROP FUNCTION
DROP VIEW cancel_all;
-DROP
+DROP VIEW
DROP VIEW busy;
-DROP
+DROP VIEW
DROP TABLE bigtable;
-DROP
+DROP TABLE
diff --git a/src/test/isolation2/expected/resgroup/resgroup_cpuset_empty_default.out b/src/test/isolation2/expected/resgroup/resgroup_cpuset_empty_default.out
index c84e98b5779..1ef006a10f6 100644
--- a/src/test/isolation2/expected/resgroup/resgroup_cpuset_empty_default.out
+++ b/src/test/isolation2/expected/resgroup/resgroup_cpuset_empty_default.out
@@ -24,7 +24,7 @@ CREATE RESOURCE GROUP
-- Alter a resource group from / to all the cpu cores should also work.
ALTER RESOURCE GROUP rg1_cpuset_test SET cpuset '0';
-ALTER
+ALTER RESOURCE GROUP
! psql -d isolation2resgrouptest -Ac "ALTER RESOURCE GROUP rg1_cpuset_test SET cpuset '0-$(($(nproc)-1))'";
ALTER RESOURCE GROUP
@@ -35,4 +35,4 @@ ALTER RESOURCE GROUP
-- Cleanup in a new connection as the default one is disconnected by gpstop
10: DROP RESOURCE GROUP rg1_cpuset_test;
-DROP
+DROP RESOURCE GROUP
diff --git a/src/test/isolation2/expected/resgroup/resgroup_disable_resgroup.out b/src/test/isolation2/expected/resgroup/resgroup_disable_resgroup.out
index ae3324c605d..1ccaf65d732 100644
--- a/src/test/isolation2/expected/resgroup/resgroup_disable_resgroup.out
+++ b/src/test/isolation2/expected/resgroup/resgroup_disable_resgroup.out
@@ -13,6 +13,6 @@ SHOW gp_resource_manager;
-- reset settings
ALTER RESOURCE GROUP admin_group SET concurrency 10;
-ALTER
+ALTER RESOURCE GROUP
ALTER RESOURCE GROUP default_group SET concurrency 20;
-ALTER
+ALTER RESOURCE GROUP
diff --git a/src/test/isolation2/expected/resgroup/resgroup_dumpinfo.out b/src/test/isolation2/expected/resgroup/resgroup_dumpinfo.out
index dc57a23fa17..c04ece07ef6 100644
--- a/src/test/isolation2/expected/resgroup/resgroup_dumpinfo.out
+++ b/src/test/isolation2/expected/resgroup/resgroup_dumpinfo.out
@@ -1,7 +1,7 @@
DROP ROLE IF EXISTS role_dumpinfo_test;
-DROP
+DROP ROLE
DROP ROLE IF EXISTS role_permission;
-DROP
+DROP ROLE
-- start_ignore
DROP RESOURCE GROUP rg_dumpinfo_test;
ERROR: resource group "rg_dumpinfo_test" does not exist
@@ -20,12 +20,12 @@ r = plpy.execute("select value from pg_resgroup_get_status_kv('dump');") json_te
plpy.execute("""CREATE TEMPORARY TABLE t_pg_resgroup_get_status_kv AS SELECT * FROM pg_resgroup_get_status_kv('dump');""") r = plpy.execute("SELECT value FROM t_pg_resgroup_get_status_kv;") json_text = r[0]['value'] json_obj = json.loads(json_text)
return validate(json_obj, n)
$$ LANGUAGE plpython3u;
-CREATE
+CREATE FUNCTION
CREATE RESOURCE GROUP rg_dumpinfo_test WITH (concurrency=2, cpu_max_percent=20);
-CREATE
+CREATE RESOURCE GROUP
CREATE ROLE role_dumpinfo_test RESOURCE GROUP rg_dumpinfo_test;
-CREATE
+CREATE ROLE
2:SET ROLE role_dumpinfo_test;
SET
@@ -46,19 +46,19 @@ SELECT dump_test_check();
(1 row)
2:END;
-END
+COMMIT
3:END;
-END
+COMMIT
4<: <... completed>
BEGIN
4:END;
-END
+COMMIT
2q: ...