diff --git a/gpAux/gpdemo/demo_cluster.sh b/gpAux/gpdemo/demo_cluster.sh index 866fbcf0641..33f78106156 100755 --- a/gpAux/gpdemo/demo_cluster.sh +++ b/gpAux/gpdemo/demo_cluster.sh @@ -174,7 +174,7 @@ if [ -z "${GPHOME}" ]; then echo "FATAL: The GPHOME environment variable is not set." echo "" echo " You can set it by sourcing the greenplum_path.sh" - echo " file in your Cloudberry installation directory." + echo " file in your CloudberryDB installation directory." echo "" exit 1 fi @@ -214,17 +214,17 @@ GPPATH=`find -H $GPHOME -name gpstart| tail -1` RETVAL=$? if [ "$RETVAL" -ne 0 ]; then - echo "Error attempting to find Cloudberry executables in $GPHOME" + echo "Error attempting to find CloudberryDB executables in $GPHOME" exit 1 fi if [ ! -x "$GPPATH" ]; then - echo "No executables found for Cloudberry installation in $GPHOME" + echo "No executables found for CloudberryDB installation in $GPHOME" exit 1 fi GPPATH=`dirname $GPPATH` if [ ! -x $GPPATH/gpinitsystem ]; then - echo "No mgmt executables(gpinitsystem) found for Cloudberry installation in $GPPATH" + echo "No mgmt executables(gpinitsystem) found for CloudberryDB installation in $GPPATH" exit 1 fi diff --git a/gpMgmt/bin/gpactivatestandby b/gpMgmt/bin/gpactivatestandby index 949905f4155..813d6cd3f75 100755 --- a/gpMgmt/bin/gpactivatestandby +++ b/gpMgmt/bin/gpactivatestandby @@ -157,7 +157,7 @@ def print_results(array, hostname, options): logger.info('to set this value.') logger.info('COORDINATOR_PORT is now %d, if this has changed, you' % array.coordinator.getSegmentPort()) logger.info('may need to make additional configuration changes to allow access') - logger.info('to the Cloudberry instance.') + logger.info('to the CloudberryDB instance.') logger.info('Refer to the Administrator Guide for instructions on how to re-activate') logger.info('the coordinator to its previous state once it becomes available.') logger.info('Query planner statistics must be updated on all databases') @@ -302,14 +302,14 @@ def start_coordinator(options): """Starts the coordinator.""" logger.info('Starting standby coordinator database in utility mode...') - gp.NewGpStart.local('Start GPDB', coordinatorOnly=True, coordinatorDirectory=options.coordinator_data_dir) + gp.NewGpStart.local('Start CBDB', coordinatorOnly=True, coordinatorDirectory=options.coordinator_data_dir) #------------------------------------------------------------------------- def stop_coordinator(): """Stops the coordinator.""" logger.info('Stopping standby coordinator...') - gp.GpStop.local('Stop GPDB', coordinatorOnly=True, fast=True) + gp.GpStop.local('Stop CBDB', coordinatorOnly=True, fast=True) #------------------------------------------------------------------------- def promote_standby(coordinator_data_dir): @@ -381,7 +381,7 @@ try: # If we forced to start utility coordinator, this is the time to restart # cluster so that the new coordinator becomes dispatch mode. if requires_restart: - cmd = gp.GpStop.local('GPDB restart', restart=True, datadir=options_.coordinator_data_dir) + cmd = gp.GpStop.local('CBDB restart', restart=True, datadir=options_.coordinator_data_dir) # At this point, cancel isn't all that bad so re-enable # keyboard interrupt. diff --git a/gpMgmt/bin/gpdeletesystem b/gpMgmt/bin/gpdeletesystem index 8299d86d820..e7832518f18 100755 --- a/gpMgmt/bin/gpdeletesystem +++ b/gpMgmt/bin/gpdeletesystem @@ -149,19 +149,19 @@ def parseargs(): # ------------------------------------------------------------------------- def display_params(options, dburl, standby, segments, dumpDirsExist): global g_warnings_generated - logger.info('Cloudberry Instance Deletion Parameters') + logger.info('CloudberryDB Instance Deletion Parameters') logger.info('--------------------------------------') - logger.info('Cloudberry Coordinator hostname = %s' % dburl.pghost) - logger.info('Cloudberry Coordinator data directory = %s' % options.coordinator_data_dir) - logger.info('Cloudberry Coordinator port = %s' % dburl.pgport) + logger.info('CloudberryDB Coordinator hostname = %s' % dburl.pghost) + logger.info('CloudberryDB Coordinator data directory = %s' % options.coordinator_data_dir) + logger.info('CloudberryDB Coordinator port = %s' % dburl.pgport) if standby: - logger.info('Cloudberry Coordinator standby host = %s' % standby.getSegmentHostName()) - logger.info('Cloudberry Coordinator standby data directory = %s' % standby.getSegmentDataDirectory()) - logger.info('Cloudberry Coordinator standby port = %s' % standby.getSegmentPort()) + logger.info('CloudberryDB Coordinator standby host = %s' % standby.getSegmentHostName()) + logger.info('CloudberryDB Coordinator standby data directory = %s' % standby.getSegmentDataDirectory()) + logger.info('CloudberryDB Coordinator standby port = %s' % standby.getSegmentPort()) if options.force: - logger.info('Cloudberry Force delete of dump files = ON') + logger.info('CloudberryDB Force delete of dump files = ON') else: - logger.info('Cloudberry Force delete of dump files = OFF') + logger.info('CloudberryDB Force delete of dump files = OFF') logger.info('Batch size = %s' % options.batch_size) logger.info('--------------------------------------') logger.info(' Segment Instance List ') @@ -173,14 +173,14 @@ def display_params(options, dburl, standby, segments, dumpDirsExist): port = segdb.getSegmentPort() logger.info('%s:%s:%s' % (host, datadir, port)) - yn = ask_yesno('', 'Continue with Cloudberry instance deletion?', 'N') + yn = ask_yesno('', 'Continue with CloudberryDB instance deletion?', 'N') if yn: - logger.info('FINAL WARNING, you are about to delete the Cloudberry instance') + logger.info('FINAL WARNING, you are about to delete the CloudberryDB instance') logger.info('on coordinator host %s.' % dburl.pghost) if dumpDirsExist and options.force: logger.warn('There are database dump files, these will be DELETED if you continue!') g_warnings_generated = True - yn = ask_yesno('', 'Continue with Cloudberry instance deletion?', 'N') + yn = ask_yesno('', 'Continue with CloudberryDB instance deletion?', 'N') if not yn: raise GpDeleteSystemException('User canceled') else: diff --git a/gpMgmt/bin/gpexpand b/gpMgmt/bin/gpexpand index 072fa4421d7..6b6bfc05bbe 100755 --- a/gpMgmt/bin/gpexpand +++ b/gpMgmt/bin/gpexpand @@ -57,7 +57,7 @@ DBNAME = 'postgres' _gp_expand = None description = (""" -Adds additional segments to a pre-existing GPDB Array. +Adds additional segments to a pre-existing CBDB Array. """) _help = [""" @@ -571,7 +571,7 @@ class SegmentTemplateError(Exception): pass # ------------------------------------------------------------------------- class SegmentTemplate: """Class for creating, distributing and deploying new segments to an - existing GPDB array""" + existing CBDB array""" def __init__(self, logger, statusLogger, pool, gparray, coordinatorDataDirectory, @@ -2195,7 +2195,7 @@ def read_hosts_file(hosts_file): def interview_setup(gparray, options): help = """ -System Expansion is used to add segments to an existing GPDB array. +System Expansion is used to add segments to an existing CBDB array. gpexpand did not detect a System Expansion that is in progress. Before initiating a System Expansion, you need to provision and burn-in @@ -2397,7 +2397,7 @@ def main(options, args, parser): gparray = GpArray.initFromCatalog(dburl, utility=True) except DatabaseError as ex: logger.error('Failed to connect to database. Make sure the') - logger.error('Cloudberry instance you wish to expand is running') + logger.error('CloudberryDB instance you wish to expand is running') logger.error('and that your environment is correct, then rerun') logger.error('gexpand ' + ' '.join(sys.argv[1:])) sys.exit(1) diff --git a/gpMgmt/bin/gpinitstandby b/gpMgmt/bin/gpinitstandby index b92d532f23c..380a288e841 100755 --- a/gpMgmt/bin/gpinitstandby +++ b/gpMgmt/bin/gpinitstandby @@ -125,14 +125,14 @@ def parseargs(): optgrp.add_option('-r', '--remove', action='store_true', help='remove current warm coordinator standby. Use this option ' 'if the warm coordinator standby host has failed. This option will ' - 'need to shutdown the GPDB array to be able to complete the request') + 'need to shutdown the CBDB array to be able to complete the request') optgrp.add_option('', '--hba-hostnames', action='store_true', dest='hba_hostnames', help='use hostnames instead of CIDR in pg_hba.conf') # XXX - This option is added to keep backward compatibility with DCA tools. # But this option plays no role in the whole process, its a No-Op optgrp.add_option('-M', '--mode', type='string', default='smart', - help='use specified mode when stopping the GPDB array. Default: smart') + help='use specified mode when stopping the CBDB array. Default: smart') optgrp.add_option('-f', '--fts-host', type='string', dest='fts_host', help='hostname of fts to configure hba configuration.') parser.add_option_group(optgrp) @@ -193,19 +193,19 @@ def print_summary(options, array, standby_datadir, unreachable_hosts=[]): if options.remove: logger.info('Warm coordinator standby removal parameters') else: - logger.info('Cloudberry standby coordinator initialization parameters') + logger.info('CloudberryDB standby coordinator initialization parameters') logger.info('-----------------------------------------------------') - logger.info('Cloudberry coordinator hostname = %s' \ + logger.info('CloudberryDB coordinator hostname = %s' \ % array.coordinator.getSegmentHostName()) - logger.info('Cloudberry coordinator data directory = %s' \ + logger.info('CloudberryDB coordinator data directory = %s' \ % array.coordinator.getSegmentDataDirectory()) - logger.info('Cloudberry coordinator port = %s' \ + logger.info('CloudberryDB coordinator port = %s' \ % array.coordinator.getSegmentPort()) if options.remove: - logger.info('Cloudberry standby coordinator hostname = %s' \ + logger.info('CloudberryDB standby coordinator hostname = %s' \ % array.standbyCoordinator.getSegmentHostName()) else: - logger.info('Cloudberry standby coordinator hostname = %s' \ + logger.info('CloudberryDB standby coordinator hostname = %s' \ % options.standby_host) if array.standbyCoordinator: @@ -215,22 +215,22 @@ def print_summary(options, array, standby_datadir, unreachable_hosts=[]): else: standby_port = array.coordinator.getSegmentPort() - logger.info('Cloudberry standby coordinator port = %d' \ + logger.info('CloudberryDB standby coordinator port = %d' \ % standby_port) if array.standbyCoordinator: - logger.info('Cloudberry standby coordinator data directory = %s' \ + logger.info('CloudberryDB standby coordinator data directory = %s' \ % array.standbyCoordinator.getSegmentDataDirectory()) else: if standby_datadir: - logger.info('Cloudberry standby coordinator data directory = %s' % standby_datadir) + logger.info('CloudberryDB standby coordinator data directory = %s' % standby_datadir) else: raise GpInitStandbyException('No data directory specified for standby coordinator') if not options.remove and options.no_update: - logger.info('Cloudberry update system catalog = Off') + logger.info('CloudberryDB update system catalog = Off') elif not options.remove: - logger.info('Cloudberry update system catalog = On') + logger.info('CloudberryDB update system catalog = On') # Confirm the action if options.confirm: diff --git a/gpMgmt/bin/gpinitsystem b/gpMgmt/bin/gpinitsystem index eb816dcc707..4f19196fff0 100755 --- a/gpMgmt/bin/gpinitsystem +++ b/gpMgmt/bin/gpinitsystem @@ -140,16 +140,16 @@ USAGE () { $ECHO " Applies to coordinator and all segments." $ECHO " -B, run this batch of create segment processes in parallel [default $BATCH_DEFAULT]" $ECHO " -c, gp_config_file [mandatory]" - $ECHO " Supplies all Cloudberry configuration information required by this utility." + $ECHO " Supplies all CloudberryDB configuration information required by this utility." $ECHO " Full description of all parameters contained within the example file" $ECHO " supplied with this distribution." $ECHO " Also see gpinitsystem_INSTRUCTIONS file for greater detail on" $ECHO " the operation and configuration of this script" - $ECHO " -e, , password to set for Cloudberry superuser in database [default $GP_PASSWD]" + $ECHO " -e, , password to set for CloudberryDB superuser in database [default $GP_PASSWD]" $ECHO " -S, standby_datadir [optional]" $ECHO " -h, gp_hostlist_file [optional]" $ECHO " Contains a list of all segment instance hostnames required to participate in" - $ECHO " the new Cloudberry instance. Normally set in gp_config_file." + $ECHO " the new CloudberryDB instance. Normally set in gp_config_file." $ECHO " -I, " $ECHO " The full path and filename of an input configuration file, which defines the" $ECHO " Cloudberry Database members and segments using the QD_PRIMARY_ARRAY and" @@ -160,7 +160,7 @@ USAGE () { $ECHO " -m, maximum number of connections for coordinator instance [default ${DEFAULT_QD_MAX_CONNECT}]" $ECHO " -n, , setting for locale to be set when database initialized [defaults to system locale]" $ECHO " -O, " - $ECHO " When used with the -O option, gpinitsystem does not create a new Cloudberry" + $ECHO " When used with the -O option, gpinitsystem does not create a new CloudberryDB" $ECHO " Database cluster but instead writes the supplied cluster configuration" $ECHO " information to the specified output_configuration_file. This file defines" $ECHO " Cloudberry Database members and segments using the QD_PRIMARY_ARRAY and" @@ -168,7 +168,7 @@ USAGE () { $ECHO " to initialize a new cluster." $ECHO " -p, postgresql_conf_gp_additions [optional]" $ECHO " List of additional PostgreSQL parameters to be applied to each Coordinator/Segment" - $ECHO " postgresql.conf file during Cloudberry database initialization." + $ECHO " postgresql.conf file during Cloudberry Database initialization." $ECHO " -P, standby_port [optional]" $ECHO " -s, standby_hostname [optional]" $ECHO " -U, Use external FTS, default false" @@ -268,7 +268,7 @@ CHK_PARAMS () { $CAT $CLUSTER_CONFIG|$GREP -v "^\s*\(#.*\)\?$" >> $LOG_FILE LOG_MSG "[INFO]:-Completed $CLUSTER_CONFIG dump to logfile" # Source the cluster configuration file - LOG_MSG "[INFO]:-Reading Cloudberry configuration file $CLUSTER_CONFIG" 1 + LOG_MSG "[INFO]:-Reading CloudberryDB configuration file $CLUSTER_CONFIG" 1 . $CLUSTER_CONFIG ASSIGN_COORDINATOR_VARS @@ -285,7 +285,7 @@ CHK_PARAMS () { fi else - LOG_MSG "[INFO]:-Reading Cloudberry input configuration file $INPUT_CONFIG" + LOG_MSG "[INFO]:-Reading CloudberryDB input configuration file $INPUT_CONFIG" READ_INPUT_CONFIG fi @@ -1120,7 +1120,7 @@ DISPLAY_CONFIG () { LOG_MSG "[INFO]:-Coordinator port = $COORDINATOR_PORT" 1 LOG_MSG "[INFO]:-Coordinator instance dir = $GP_DIR" 1 LOG_MSG "[INFO]:-Coordinator LOCALE = $LOCALE_SETTING" 1 - LOG_MSG "[INFO]:-Cloudberry segment prefix = $SEG_PREFIX" 1 + LOG_MSG "[INFO]:-CloudberryDB segment prefix = $SEG_PREFIX" 1 LOG_MSG "[INFO]:-Coordinator Database = $DATABASE_NAME" 1 LOG_MSG "[INFO]:-Coordinator connections = $COORDINATOR_MAX_CONNECT" 1 LOG_MSG "[INFO]:-Coordinator buffers = $COORDINATOR_SHARED_BUFFERS" 1 @@ -1183,7 +1183,7 @@ DISPLAY_CONFIG () { LOG_MSG "[INFO]:-Mirroring config = OFF" 1 fi LOG_MSG "[INFO]:----------------------------------------" 1 - LOG_MSG "[INFO]:-Cloudberry Primary Segment Configuration" 1 + LOG_MSG "[INFO]:-CloudberryDB Primary Segment Configuration" 1 LOG_MSG "[INFO]:----------------------------------------" 1 for I in "${QE_PRIMARY_ARRAY[@]}" do @@ -1192,7 +1192,7 @@ DISPLAY_CONFIG () { done if [ $MIRRORING -ne 0 ]; then LOG_MSG "[INFO]:---------------------------------------" 1 - LOG_MSG "[INFO]:-Cloudberry Mirror Segment Configuration" 1 + LOG_MSG "[INFO]:-CloudberryDB Mirror Segment Configuration" 1 LOG_MSG "[INFO]:---------------------------------------" 1 for I in "${QE_MIRROR_ARRAY[@]}" do @@ -1201,7 +1201,7 @@ DISPLAY_CONFIG () { done fi - GET_REPLY "Continue with Cloudberry creation" + GET_REPLY "Continue with CloudberryDB creation" fi LOG_MSG "[INFO]:-End Function $FUNCNAME" } @@ -1309,7 +1309,7 @@ CREATE_QD_DB () { BACKOUT_COMMAND "if [ -d $GP_DIR ]; then $RM -Rf $GP_DIR; fi" BACKOUT_COMMAND "$ECHO Removing Coordinator data directory files" LOG_MSG "[INFO]:-Completed Coordinator instance initialization" - $ECHO "#Cloudberry specific configuration parameters for Coordinator instance database" >> ${GP_DIR}/$PG_CONF + $ECHO "#CloudberryDB specific configuration parameters for Coordinator instance database" >> ${GP_DIR}/$PG_CONF $ECHO "#------------------------------------------------------------------------" >> ${GP_DIR}/$PG_CONF LOG_MSG "[INFO]:-Setting the Coordinator port to $GP_PORT" SED_PG_CONF ${GP_DIR}/$PG_CONF "$PORT_TXT" port=$GP_PORT 0 @@ -1523,16 +1523,16 @@ REGISTER_MIRRORS () { STOP_QD_PRODUCTION () { LOG_MSG "[INFO]:-Start Function $FUNCNAME" - LOG_MSG "[INFO]:-Restarting the Cloudberry instance in production mode" 1 + LOG_MSG "[INFO]:-Restarting the CloudberryDB instance in production mode" 1 if [ -f $GPSTOP ]; then GPSTOP_OPTS=$(OUTPUT_LEVEL_OPTS) export COORDINATOR_DATA_DIRECTORY=${COORDINATOR_DIRECTORY}/${SEG_PREFIX}-1 $GPSTOP -a -l $LOG_DIR -m -d $COORDINATOR_DATA_DIRECTORY $GPSTOP_OPTS RETVAL=$? if [ $RETVAL -eq 0 ]; then - LOG_MSG "[INFO]:-Successfully shutdown the new Cloudberry instance" + LOG_MSG "[INFO]:-Successfully shutdown the new CloudberryDB instance" else - ERROR_EXIT "[FATAL]:-Error from Cloudberry instance shutdown, check log files" + ERROR_EXIT "[FATAL]:-Error from CloudberryDB instance shutdown, check log files" fi else ERROR_EXIT "[FATAL]:-$GPSTOP not located" @@ -1584,12 +1584,12 @@ START_QD_PRODUCTION () { $GPSTART -a -l $LOG_DIR -d $COORDINATOR_DATA_DIRECTORY $GPSTART_OPTS if [ $? -eq 0 ];then - LOG_MSG "[INFO]:-Successfully started new Cloudberry instance" + LOG_MSG "[INFO]:-Successfully started new CloudberryDB instance" else # this text is duplicated below LOG_MSG "[WARN]:" 1 - LOG_MSG "[WARN]:-Failed to start Cloudberry instance; review gpstart output to" 1 + LOG_MSG "[WARN]:-Failed to start CloudberryDB instance; review gpstart output to" 1 LOG_MSG "[WARN]:- determine why gpstart failed and reinitialize cluster after resolving" 1 LOG_MSG "[WARN]:- issues. Not all initialization tasks have completed so the cluster" 1 LOG_MSG "[WARN]:- should not be used." 1 @@ -1603,9 +1603,9 @@ START_QD_PRODUCTION () { RETVAL=$? if [ $RETVAL -eq 0 ]; then - LOG_MSG "[INFO]:-Successfully shutdown the Cloudberry instance" 1 + LOG_MSG "[INFO]:-Successfully shutdown the CloudberryDB instance" 1 else - ERROR_EXIT "[FATAL]:-Error from Cloudberry instance shutdown, check log files" + ERROR_EXIT "[FATAL]:-Error from CloudberryDB instance shutdown, check log files" fi else LOG_MSG "[WARN]:-$GPSTOP not located" 1 @@ -1613,7 +1613,7 @@ START_QD_PRODUCTION () { # this text is duplicated above LOG_MSG "[WARN]:" 1 - LOG_MSG "[WARN]:-Failed to start Cloudberry instance; review gpstart output to" 1 + LOG_MSG "[WARN]:-Failed to start CloudberryDB instance; review gpstart output to" 1 LOG_MSG "[WARN]:- determine why gpstart failed and reinitialize cluster after resolving" 1 LOG_MSG "[WARN]:- issues. Not all initialization tasks have completed so the cluster" 1 LOG_MSG "[WARN]:- should not be used." 1 @@ -1623,7 +1623,7 @@ START_QD_PRODUCTION () { else ERROR_EXIT "[FATAL]:-$GPSTART not located" fi - LOG_MSG "[INFO]:-Completed restart of Cloudberry instance in production mode" 1 + LOG_MSG "[INFO]:-Completed restart of CloudberryDB instance in production mode" 1 LOG_MSG "[INFO]:-End Function $FUNCNAME" } @@ -1644,7 +1644,7 @@ FORCE_FTS_PROBE () { $PSQL -p $GP_PORT -d "$DEFAULTDB" -A -t -c "select * from gp_segment_configuration where (mode = 'n' or status = 'd') and content != -1;" >> $LOG_FILE 2>&1 $PSQL -p $GP_PORT -d "$DEFAULTDB" -A -t -c "select * from gp_stat_replication where sync_error != 'none' or sync_state != 'sync';" >> $LOG_FILE 2>&1 LOG_MSG "[WARN]:" 1 - LOG_MSG "[WARN]:-Failed to start Cloudberry instance; please review gpinitsystem log to determine failure." 1 + LOG_MSG "[WARN]:-Failed to start CloudberryDB instance; please review gpinitsystem log to determine failure." 1 # FTS_TODO: should support notify fts, then re-enable this function # ERROR_EXIT "[FATAL]:-Some primary/mirror segment pairs were found to be not in sync" break; @@ -1780,7 +1780,7 @@ READ_INPUT_CONFIG () { $CAT $INPUT_CONFIG|$GREP -v "^\s*\(#.*\)\?$" >> $LOG_FILE LOG_MSG "[INFO]:-Completed $INPUT_CONFIG dump to logfile" # Source the cluster configuration file - LOG_MSG "[INFO]:-Reading Cloudberry configuration file $INPUT_CONFIG" + LOG_MSG "[INFO]:-Reading CloudberryDB configuration file $INPUT_CONFIG" . $INPUT_CONFIG SET_VAR $QD_PRIMARY_ARRAY @@ -2032,7 +2032,7 @@ CREATE_ETCD() { if [ x"$CLUSTER_BOOT_MODE" = x"PRODUCTION" ];then ETCD_CONFIG_FILE_PATH=`readlink -f $PG_CONF_ADD_FILE` if [ ! -x ${GPHOME}/bin/etcd ];then - ERROR_EXIT "[FATAL]: No mgmt executables(etcd) found for Cloudberry installation in ${GPHOME}/bin" + ERROR_EXIT "[FATAL]: No mgmt executables(etcd) found for CloudberryDB installation in ${GPHOME}/bin" fi fi local etcd_url=`GENERATE_ETCD_URL 2380` @@ -2066,7 +2066,7 @@ CREATE_ETCD() { CREATE_FTS() { if [ ! -x ${GPHOME}/bin/gpfts ]; then - ERROR_EXIT "[FATAL]: No mgmt executables(gpfts) found for Cloudberry installation in ${GPHOME}/bin" + ERROR_EXIT "[FATAL]: No mgmt executables(gpfts) found for CloudberryDB installation in ${GPHOME}/bin" fi for fts in ${FTS_HOST_MACHINE_LIST[*]} do @@ -2295,8 +2295,8 @@ LOG_MSG "[INFO]:-To complete the environment configuration, please " 1 LOG_MSG "[INFO]:-update $USER_NAME .bashrc file with the following" 1 LOG_MSG "[INFO]:-1. Ensure that the greenplum_path.sh file is sourced" 1 LOG_MSG "[INFO]:-2. Add \"export COORDINATOR_DATA_DIRECTORY=${COORDINATOR_DIRECTORY}/${SEG_PREFIX}-1\"" 1 -LOG_MSG "[INFO]:- to access the Cloudberry scripts for this instance:" 1 -LOG_MSG "[INFO]:- or, use -d ${COORDINATOR_DIRECTORY}/${SEG_PREFIX}-1 option for the Cloudberry scripts" 1 +LOG_MSG "[INFO]:- to access the CloudberryDB scripts for this instance:" 1 +LOG_MSG "[INFO]:- or, use -d ${COORDINATOR_DIRECTORY}/${SEG_PREFIX}-1 option for the CloudberryDB scripts" 1 LOG_MSG "[INFO]:- Example gpstate -d ${COORDINATOR_DIRECTORY}/${SEG_PREFIX}-1" 1 LOG_MSG "[INFO]:-Script log file = $LOG_FILE" 1 LOG_MSG "[INFO]:-To remove instance, run gpdeletesystem utility" 1 @@ -2310,7 +2310,7 @@ if [ x"" != x"$STANDBY_HOSTNAME" ];then *) LOG_MSG "[WARN]:-Standby Coordinator failed to initialize" 1 esac else - LOG_MSG "[INFO]:-To initialize a Standby Coordinator Segment for this Cloudberry instance" 1 + LOG_MSG "[INFO]:-To initialize a Standby Coordinator Segment for this CloudberryDB instance" 1 LOG_MSG "[INFO]:-Review options for gpinitstandby" 1 fi @@ -2319,8 +2319,6 @@ LOG_MSG "[INFO]:-The Coordinator ${COORDINATOR_DATA_DIRECTORY}/$PG_HBA post gpin LOG_MSG "[INFO]:-has been configured to allow all hosts within this new" 1 LOG_MSG "[INFO]:-array to intercommunicate. Any hosts external to this" 1 LOG_MSG "[INFO]:-new array must be explicitly added to this file" 1 -LOG_MSG "[INFO]:-Refer to the Cloudberry Admin support guide which is" 1 -LOG_MSG "[INFO]:-located in the $GPHOME/docs directory" 1 LOG_MSG "[INFO]:-------------------------------------------------------" 1 # Make sure that the user sees that there's an error with the standby diff --git a/gpMgmt/bin/gpload.py b/gpMgmt/bin/gpload.py index 451e3702fc3..a6ce8f471dc 100755 --- a/gpMgmt/bin/gpload.py +++ b/gpMgmt/bin/gpload.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# gpload - load file(s) into Cloudberry Database +# gpload - load file(s) into Greenplum Database & CloudberryDB # Copyright Greenplum 2008 '''gpload [options] -f configuration file @@ -2527,7 +2527,7 @@ def create_staging_table(self): resultList = self.db.query(sql).getresult() if len(resultList) > 0: self.log(self.WARN, """Old style, reusable tables named "temp_gpload_reusable_*" from a previous versions were found. - Cloudberry recommends running "DROP TABLE temp_gpload_reusable_..." on each table. This only needs to be done once.""") + CloudberryDB recommends running "DROP TABLE temp_gpload_reusable_..." on each table. This only needs to be done once.""") # If the 'reuse tables' option was specified we now try to find an # already existing staging table in the catalog which will match diff --git a/gpMgmt/bin/gpmovemirrors b/gpMgmt/bin/gpmovemirrors index d661cf2e5b7..8991c42f316 100755 --- a/gpMgmt/bin/gpmovemirrors +++ b/gpMgmt/bin/gpmovemirrors @@ -40,7 +40,7 @@ GPDB_UTILITY = 3 GP_MOVEMIRRORS_LOCK_PATH = "gpmovemirrors.lock" description = (""" -Moves mirror segments in a pre-existing GPDB Array. +Moves mirror segments in a pre-existing CBDB Array. """) EXECNAME = os.path.split(__file__)[-1] @@ -339,7 +339,7 @@ try: gpArrayInstance = GpArray.initFromCatalog(dburl, utility=True) except DatabaseError as ex: logger.error('Failed to connect to database. Make sure the') - logger.error('Cloudberry instance is running, and that') + logger.error('CloudberryDB instance is running, and that') logger.error('your environment is correct, then rerun') logger.error('gpmovemirrors ' + ' '.join(sys.argv[1:])) sys.exit(1) diff --git a/gpMgmt/bin/gppylib/commands/gp.py b/gpMgmt/bin/gppylib/commands/gp.py index 4685e4bef32..21ae6df7a45 100644 --- a/gpMgmt/bin/gppylib/commands/gp.py +++ b/gpMgmt/bin/gppylib/commands/gp.py @@ -1203,7 +1203,7 @@ def get_coordinatorport(datadir): ###### def check_permissions(username): - logger.debug("--Checking that current user can use GP binaries") + logger.debug("--Checking that current user can use CloudberryDB binaries") chk_gpdb_id(username) @@ -1523,7 +1523,7 @@ def chk_gpdb_id(username): path="%s/bin/initdb" % GPHOME if not os.access(path,os.X_OK): raise GpError("File permission mismatch. The current user %s does not have sufficient" - " privileges to run the Cloudberry binaries and management utilities." % username ) + " privileges to run the CloudberryDB binaries and management utilities." % username ) def chk_local_db_running(datadir, port): @@ -1571,7 +1571,7 @@ def get_lockfile_name(port): def get_local_db_mode(coordinator_data_dir): - """ Gets the mode Cloudberry is running in. + """ Gets the mode CloudberryDB is running in. Possible return values are: 'NORMAL' 'RESTRICTED' diff --git a/gpMgmt/bin/gppylib/gparray.py b/gpMgmt/bin/gppylib/gparray.py index a87c66de587..3cde686451a 100755 --- a/gpMgmt/bin/gppylib/gparray.py +++ b/gpMgmt/bin/gppylib/gparray.py @@ -957,7 +957,7 @@ def initFromCatalog(dbURL, utility=False): version_str = dbconn.querySingleton(conn, "SELECT version()") version = GpVersion(version_str) if not version.isVersionCurrentRelease(): - raise Exception("Cannot connect to GPDB version %s from installed version %s"%(version.getVersionRelease(), MAIN_VERSION[0])) + raise Exception("Cannot connect to CloudberryDB version %s from installed version %s"%(version.getVersionRelease(), MAIN_VERSION[0])) # Get gp_internal_is_singlenode GUC is_singlenode = dbconn.querySingleton(conn, "SHOW gp_internal_is_singlenode") == "on" diff --git a/gpMgmt/bin/gppylib/gpversion.py b/gpMgmt/bin/gppylib/gpversion.py index dad05c898ee..9d0a35e9f1a 100644 --- a/gpMgmt/bin/gppylib/gpversion.py +++ b/gpMgmt/bin/gppylib/gpversion.py @@ -175,7 +175,7 @@ def __init__(self, version): # If part of the conversion process above failed, throw an error, except Exception as e: - raise Exception("Unrecognised Cloudberry Version '%s' due to %s" % + raise Exception("Unrecognised CloudberryDB Version '%s' due to %s" % (str(version), str(e))) #------------------------------------------------------------ diff --git a/gpMgmt/bin/gppylib/programs/clsAddMirrors.py b/gpMgmt/bin/gppylib/programs/clsAddMirrors.py index 8fa831387b7..83d1e45a03b 100644 --- a/gpMgmt/bin/gppylib/programs/clsAddMirrors.py +++ b/gpMgmt/bin/gppylib/programs/clsAddMirrors.py @@ -422,10 +422,10 @@ def __getMirrorsToBuildBasedOnOptions(self, gpEnv, gpArray): return self.__generateMirrorsToBuild(gpEnv, gpArray) def __displayAddMirrors(self, gpEnv, mirrorBuilder, gpArray): - logger.info('Cloudberry Add Mirrors Parameters') + logger.info('CloudberryDB Add Mirrors Parameters') logger.info('--------------------------------------------') - logger.info('Cloudberry coordinator data directory = %s' % gpEnv.getCoordinatorDataDir()) - logger.info('Cloudberry coordinator port = %d' % gpEnv.getCoordinatorPort()) + logger.info('CloudberryDB coordinator data directory = %s' % gpEnv.getCoordinatorDataDir()) + logger.info('CloudberryDB coordinator port = %d' % gpEnv.getCoordinatorPort()) logger.info('Batch size = %d' % self.__options.batch_size) logger.info('Segment batch size = %d' % self.__options.segment_batch_size) @@ -522,7 +522,7 @@ def run(self): # check that we actually have mirrors if gpArray.hasMirrors: raise ExceptionNoStackTraceNeeded( \ - "GPDB physical mirroring cannot be added. The cluster is already configured with Mirrors.") + "CBDB physical mirroring cannot be added. The cluster is already configured with Mirrors.") # figure out what needs to be done (AND update the gpArray!) mirrorBuilder = self.__getMirrorsToBuildBasedOnOptions(gpEnv, gpArray) diff --git a/gpMgmt/bin/gppylib/programs/clsRecoverSegment.py b/gpMgmt/bin/gppylib/programs/clsRecoverSegment.py index 6304e5d0cfb..d0c6c33aaee 100644 --- a/gpMgmt/bin/gppylib/programs/clsRecoverSegment.py +++ b/gpMgmt/bin/gppylib/programs/clsRecoverSegment.py @@ -147,7 +147,7 @@ def syncPackages(self, new_hosts): self.logger.warning('Please run gppkg --clean after successful segment recovery.') def displayRecovery(self, mirrorBuilder, gpArray): - self.logger.info('Cloudberry instance recovery parameters') + self.logger.info('CloudberryDB instance recovery parameters') self.logger.info('---------------------------------------------------------') if self.__options.recoveryConfigFile: diff --git a/gpMgmt/bin/gppylib/programs/clsSystemState.py b/gpMgmt/bin/gppylib/programs/clsSystemState.py index c1b3bca58e5..afbc8c01dcf 100644 --- a/gpMgmt/bin/gppylib/programs/clsSystemState.py +++ b/gpMgmt/bin/gppylib/programs/clsSystemState.py @@ -305,7 +305,7 @@ def __showClusterConfig(self, gpEnv, gpArray): """ if gpArray.hasMirrors: logger.info("-------------------------------------------------------------" ) - logger.info("-Current GPDB mirror list and status" ) + logger.info("-Current CBDB mirror list and status" ) logger.info("-Type = %s" % self.__getMirrorType(gpArray) ) logger.info("-------------------------------------------------------------" ) @@ -412,7 +412,7 @@ def _showMirrorList(self,gpEnv, gpArray): tabLog.infoOrWarn(doWarn, line) logger.info("-------------------------------------------------------------" ) - logger.info("-Current GPDB mirror list and status" ) + logger.info("-Current CBDB mirror list and status" ) logger.info("-Type = %s" % self.__getMirrorType(gpArray) ) logger.info("-------------------------------------------------------------" ) @@ -464,7 +464,7 @@ def __showStatusStatistics(self, gpEnv, gpArray): """ hostNameToResults = self.__fetchAllSegmentData(gpArray) - logger.info("Cloudberry instance status summary") + logger.info("CloudberryDB instance status summary") # coordinator summary info tabLog = TableLogger().setWarnWithArrows(True) @@ -896,15 +896,15 @@ def __showStatus(self, gpEnv, gpArray): tabLog.info(["Coordinator port", "= %d" % coordinator.getSegmentPort()]) tabLog.info(["Coordinator current role", "= %s" % qdRole]) - tabLog.info(["Cloudberry initsystem version", "= %s" % initDbVersion]) + tabLog.info(["CloudberryDB initsystem version", "= %s" % initDbVersion]) if statusFetchWarning is None: if coordinatorData[gp.SEGMENT_STATUS__GET_VERSION] is None: - tabLog.warn(["Cloudberry current version", "= Unknown"]) + tabLog.warn(["CloudberryDB current version", "= Unknown"]) else: - tabLog.info(["Cloudberry current version", "= %s" % coordinatorData[gp.SEGMENT_STATUS__GET_VERSION]]) + tabLog.info(["CloudberryDB current version", "= %s" % coordinatorData[gp.SEGMENT_STATUS__GET_VERSION]]) else: - tabLog.warn(["Cloudberry current version", "= Error fetching data: %s" % statusFetchWarning]) + tabLog.warn(["CloudberryDB current version", "= Error fetching data: %s" % statusFetchWarning]) tabLog.info(["Postgres version", "= %s" % pgVersion]) self.__appendStandbySummary(hostNameToResults, gpArray.standbyCoordinator, tabLog) @@ -1242,7 +1242,7 @@ def __showQuickStatus(self, gpEnv, gpArray): exitCode = 0 - logger.info("-Quick Cloudberry database status from Coordinator instance only") + logger.info("-Quick Cloudberry Database status from Coordinator instance only") logger.info( "----------------------------------------------------------") segments = [seg for seg in gpArray.getDbList() if seg.isSegmentQE()] diff --git a/gpMgmt/bin/gppylib/programs/gppkg.py b/gpMgmt/bin/gppylib/programs/gppkg.py index f784386ea5f..b2ed2fdce9d 100755 --- a/gpMgmt/bin/gppylib/programs/gppkg.py +++ b/gpMgmt/bin/gppylib/programs/gppkg.py @@ -87,7 +87,7 @@ def __init__(self, options, args): @staticmethod def create_parser(): parser = OptParser(option_class=OptChecker, - description="Cloudberry Package Manager", + description="CloudberryDB Package Manager", version='%prog version $Revision: #1 $') parser.setHelp([]) diff --git a/gpMgmt/bin/gppylib/system/environment.py b/gpMgmt/bin/gppylib/system/environment.py index 23cbd5d8363..04635aeab5f 100644 --- a/gpMgmt/bin/gppylib/system/environment.py +++ b/gpMgmt/bin/gppylib/system/environment.py @@ -39,10 +39,10 @@ def __init__(self, coordinatorDataDir, readFromCoordinatorCatalog, timeout=None, logger.debug("Read from postgresql.conf max_connections=%s" % self.__coordinatorMaxConnections) self.__gpHome = gp.get_gphome() - self.__gpVersion = gp.GpVersion.local('local GP software version check',self.__gpHome) + self.__gpVersion = gp.GpVersion.local('local DB software version check',self.__gpHome) if verbose: - logger.info("local Cloudberry Version: '%s'" % self.__gpVersion) + logger.info("local CloudberryDB Version: '%s'" % self.__gpVersion) # read collation settings from coordinator if readFromCoordinatorCatalog: @@ -51,7 +51,7 @@ def __init__(self, coordinatorDataDir, readFromCoordinatorCatalog, timeout=None, # MPP-13807, read/show the coordinator's database version too self.__pgVersion = dbconn.queryRow(conn, "select version();")[0] - logger.info("coordinator Cloudberry Version: '%s'" % self.__pgVersion) + logger.info("coordinator CloudberryDB Version: '%s'" % self.__pgVersion) conn.close() else: self.__pgVersion = None diff --git a/gpMgmt/bin/gpsd b/gpMgmt/bin/gpsd index 723be450bfd..ad61a1e3de2 100755 --- a/gpMgmt/bin/gpsd +++ b/gpMgmt/bin/gpsd @@ -142,7 +142,7 @@ def main(): 'database': db, 'options': pgoptions } - sys.stdout.writelines(['\n-- Cloudberry database Statistics Dump', + sys.stdout.writelines(['\n-- Greenplum database Statistics Dump', '\n-- Copyright (C) 2007 - 2014 Pivotal' '\n-- Database: ' + db, '\n-- Date: ' + timestamp.date().isoformat(), diff --git a/gpMgmt/bin/gpstart b/gpMgmt/bin/gpstart index 817e735a46b..8dcf826b350 100755 --- a/gpMgmt/bin/gpstart +++ b/gpMgmt/bin/gpstart @@ -157,7 +157,7 @@ class GpStart: logger.warning("****************************************************************************") logger.warning("Coordinator-only start requested. If a standby is configured, this command") logger.warning("may lead to a split-brain condition and possible unrecoverable data loss.") - logger.warning("Maintenance mode should only be used with direction from Cloudberry Support.") + logger.warning("Maintenance mode should only be used with direction from CloudberryDB Support.") logger.warning("****************************************************************************") if self.interactive: if not userinput.ask_yesno(None, "\nContinue with coordinator-only startup", 'N'): @@ -215,7 +215,7 @@ class GpStart: if self.interactive: self._summarize_actions(segmentsToStart) - if not userinput.ask_yesno(None, "\nContinue with Cloudberry instance startup", 'N'): + if not userinput.ask_yesno(None, "\nContinue with CloudberryDB instance startup", 'N'): raise UserAbortedException() try: @@ -289,16 +289,16 @@ class GpStart: ###### def _check_version(self): - self.gpversion = gp.GpVersion.local('local GP software version check', self.gphome) - logger.info("Cloudberry Binary Version: '%s'" % self.gpversion) + self.gpversion = gp.GpVersion.local('local CBDB software version check', self.gphome) + logger.info("CloudberryDB Binary Version: '%s'" % self.gpversion) # It would be nice to work out the catalog version => greenplum version # calculation so that we can print out nicer error messages when # version doesn't match. - bin_catversion = gp.GpCatVersion.local('local GP software catalag version check', self.gphome) - logger.info("Cloudberry Catalog Version: '%s'" % bin_catversion) + bin_catversion = gp.GpCatVersion.local('local CBDB software catalag version check', self.gphome) + logger.info("CloudberryDB Catalog Version: '%s'" % bin_catversion) - dir_catversion = gp.GpCatVersionDirectory.local('local GP directory catalog version check', self.coordinator_datadir) + dir_catversion = gp.GpCatVersionDirectory.local('local CBDB directory catalog version check', self.coordinator_datadir) if bin_catversion != dir_catversion: logger.info("COORDINATOR_DIRECTORY Catalog Version: '%s'" % dir_catversion) @@ -506,7 +506,7 @@ class GpStart: logger.fatal("Failed to start Coordinator instance in admin mode") cmd.validate() - logger.info("Obtaining Cloudberry Coordinator catalog information") + logger.info("Obtaining CloudberryDB Coordinator catalog information") logger.info("Obtaining Segment details from coordinator...") self.dburl = dbconn.DbURL(port=self.port, dbname='template1') @@ -837,7 +837,7 @@ class GpStart: @staticmethod def createParser(): parser = OptParser(option_class=OptChecker, - description="Starts a GPDB Array.", + description="Starts a CBDB Array.", version='%prog version $Revision$') parser.setHelp([]) diff --git a/gpMgmt/bin/gpstop b/gpMgmt/bin/gpstop index 92c401b1bcf..fb852d94f2e 100755 --- a/gpMgmt/bin/gpstop +++ b/gpMgmt/bin/gpstop @@ -207,7 +207,7 @@ class GpStop: if self.interactive: self._summarize_actions(segs) - if not userinput.ask_yesno(None, "\nContinue with Cloudberry instance shutdown", 'N'): + if not userinput.ask_yesno(None, "\nContinue with CloudberryDB instance shutdown", 'N'): raise UserAbortedException() try: @@ -319,8 +319,8 @@ class GpStop: ###### def _check_version(self): - self.gpversion = gp.GpVersion.local('local GP software version check', self.gphome) - logger.info("Cloudberry Version: '%s'" % self.gpversion) + self.gpversion = gp.GpVersion.local('local CloudberryDB version check', self.gphome) + logger.info("CloudberryDB Version: '%s'" % self.gpversion) ###### def _read_postgresqlconf(self): @@ -352,11 +352,11 @@ class GpStop: pass else: raise ExceptionNoStackTraceNeeded( - 'postmaster.pid file does not exist. is Cloudberry instance already stopped?') + 'postmaster.pid file does not exist. is CloudberryDB instance already stopped?') ###### def _build_gparray(self): - logger.info("Obtaining Cloudberry Coordinator catalog information") + logger.info("Obtaining CloudberryDB Coordinator catalog information") logger.info("Obtaining Segment details from coordinator...") self.dburl = dbconn.DbURL(port=self.port, dbname='template1') @@ -810,7 +810,7 @@ class GpStop: logger.info("--------------------------------------------") tabLog = TableLogger(logger=logger).setWarnWithArrows(True) - tabLog.info(["Coordinator Cloudberry instance process active PID", "= %s" % self.pid]) + tabLog.info(["Coordinator CloudberryDB instance process active PID", "= %s" % self.pid]) tabLog.info(["Database", "= %s" % self.dburl.pgdb]) tabLog.info(["Coordinator port", "= %s" % self.port]) tabLog.info(["Coordinator directory", "= %s" % self.coordinator_datadir]) @@ -838,7 +838,7 @@ class GpStop: @staticmethod def createParser(): parser = OptParser(option_class=OptChecker, - description="Stops a GPDB Array.", + description="Stops a CBDB Array.", version='%prog version $Revision$') parser.setHelp([]) @@ -870,7 +870,7 @@ class GpStop: addTo.add_option('-y', dest="stop_standby", action='store_false', default=True, help='Do not stop the standby coordinator process.') addTo.add_option('-u', dest="request_sighup", action='store_true', - help="upload new coordinator postgresql.conf settings, does not stop Cloudberry array," \ + help="upload new coordinator postgresql.conf settings, does not stop CloudberryDB array," \ "issues a signal to the coordinator segment postmaster process to reload") addTo.add_option('-B', '--parallel', type="int", default=DEFAULT_NUM_WORKERS, metavar="", diff --git a/gpMgmt/sbin/gpsegstart.py b/gpMgmt/sbin/gpsegstart.py index fb58091a4de..90f39033883 100755 --- a/gpMgmt/sbin/gpsegstart.py +++ b/gpMgmt/sbin/gpsegstart.py @@ -139,7 +139,7 @@ def __init__(self, dblist, gpversion, mirroringMode, num_cids, era, self.dblist = list(map(gparray.Segment.initFromString, dblist)) expected_gpversion = gpversion - actual_gpversion = gp.GpVersion.local('local GP software version check', os.path.abspath(os.pardir)) + actual_gpversion = gp.GpVersion.local('local software version check', os.path.abspath(os.pardir)) if actual_gpversion != expected_gpversion: raise Exception("Local Software Version does not match what is expected.\n" "The local software version is: '%s'\n" diff --git a/gpMgmt/sbin/gpsegstop.py b/gpMgmt/sbin/gpsegstop.py index 188cd2f3c1f..d47fac68699 100755 --- a/gpMgmt/sbin/gpsegstop.py +++ b/gpMgmt/sbin/gpsegstop.py @@ -144,7 +144,7 @@ def __init__(self, dblist, mode, gpversion, timeout=SEGMENT_STOP_TIMEOUT_DEFAULT self.expected_gpversion = gpversion self.timeout = timeout self.gphome = os.path.abspath(os.pardir) - self.actual_gpversion = gp.GpVersion.local('local GP software version check', self.gphome) + self.actual_gpversion = gp.GpVersion.local('local software version check', self.gphome) if self.actual_gpversion != self.expected_gpversion: raise Exception("Local Software Version does not match what is expected.\n" "The local software version is: '%s'\n" diff --git a/src/bin/psql/help.c b/src/bin/psql/help.c index c6d56a32dd3..a888ae71c1b 100644 --- a/src/bin/psql/help.c +++ b/src/bin/psql/help.c @@ -267,7 +267,7 @@ slashUsage(unsigned short int pager) fprintf(output, _(" \\dT[S+] [PATTERN] list data types\n")); fprintf(output, _(" \\du[S+] [PATTERN] list roles\n")); fprintf(output, _(" \\dv[S+] [PATTERN] list views\n")); - /* In GPDB, we use \dE for both external and foreign tables. */ + /* In GPDB and CBDB, we use \dE for both external and foreign tables. */ fprintf(output, _(" \\dE[S+] [PATTERN] list foreign and external tables\n")); fprintf(output, _(" \\dx[+] [PATTERN] list extensions\n")); fprintf(output, _(" \\dX [PATTERN] list extended statistics\n")); @@ -692,6 +692,8 @@ print_copyright(void) { puts( "Cloudberry Database version of PostgreSQL Database Management System\n" + "Portions Copyright (c) 2023-, HashData Technology Limited.\n\n" + "Greenplum Database version of PostgreSQL Database Management System\n" "Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group\n\n" "Portions Copyright (c) 2014-Present VMware, Inc. or its affiliates.\n\n" "Portions Copyright (c) 2011-2014 EMC\n\n" diff --git a/src/nls-global.mk b/src/nls-global.mk index 004f02cbd1f..377a070bb02 100644 --- a/src/nls-global.mk +++ b/src/nls-global.mk @@ -40,7 +40,7 @@ ALL_PO_FILES = $(addprefix po/, $(addsuffix .po, $(AVAIL_LANGUAGES))) MO_FILES = $(addprefix po/, $(addsuffix .mo, $(LANGUAGES))) ifdef XGETTEXT -XGETTEXT += -ctranslator --copyright-holder='Cloudberry Project' --msgid-bugs-address=bugs@greenplum.org --no-wrap --sort-by-file --package-name='$(CATALOG_NAME) (Cloudberry)' --package-version='$(MAJORVERSION)' +XGETTEXT += -ctranslator --copyright-holder='CloudberryDB Project' --msgid-bugs-address=info@cloudberrydb.org --no-wrap --sort-by-file --package-name='$(CATALOG_NAME) (CloudberryDB)' --package-version='$(MAJORVERSION)' endif ifdef MSGMERGE