diff --git a/src/backend/access/common/session.c b/src/backend/access/common/session.c index f3a5e62e060..bd5bb7c6bec 100644 --- a/src/backend/access/common/session.c +++ b/src/backend/access/common/session.c @@ -46,7 +46,7 @@ /* This backend's current session. */ Session *CurrentSession = NULL; -/* gp style parallelism session. */ +/* CBDB style parallelism session. */ Session *ParallelSession = NULL; /* * Set up CurrentSession to point to an empty Session object. diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c index 478d0f89fdb..0aa3780372d 100644 --- a/src/backend/access/transam/parallel.c +++ b/src/backend/access/transam/parallel.c @@ -69,8 +69,8 @@ /* Magic number for parallel context TOC. */ #define PARALLEL_MAGIC 0x50477c7c -/* Magic number for gp style parallel context TOC. */ -#define GP_PARALLEL_MAGIC 0x50477d7d +/* Magic number for CBDB style parallel context TOC. */ +#define CBDB_PARALLEL_MAGIC 0x50477d7d /* * Magic numbers for per-context parallel state sharing. Higher-level code @@ -1692,7 +1692,7 @@ GpFetchParallelDSMEntry(ParallelEntryTag tag, int plan_node_id) else { Assert(ParallelSession->segment); - toc = shm_toc_attach(GP_PARALLEL_MAGIC, dsm_segment_address(ParallelSession->segment)); + toc = shm_toc_attach(CBDB_PARALLEL_MAGIC, dsm_segment_address(ParallelSession->segment)); } Assert(toc != NULL); @@ -1749,15 +1749,15 @@ void GpDestroyParallelDSMEntry() } void -AtEOXact_GP_Parallel() +AtEOXact_CBDB_Parallel() { GpDestroyParallelDSMEntry(); } void -AtProcExit_GP_Parallel(int code, Datum arg) +AtProcExit_CBDB_Parallel(int code, Datum arg) { - AtEOXact_GP_Parallel(); + AtEOXact_CBDB_Parallel(); } GpParallelDSMEntry * @@ -1818,14 +1818,14 @@ GpInsertParallelDSMHash(PlanState *planstate) dsm_segment* seg = dsm_create(segsize, DSM_CREATE_NULL_IF_MAXSEGMENTS); if (seg != NULL) - toc = shm_toc_create(GP_PARALLEL_MAGIC, + toc = shm_toc_create(CBDB_PARALLEL_MAGIC, dsm_segment_address(seg), segsize); else ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"), - errhint("create dsm for gp style parallel workers failed."))); + errhint("create dsm for CBDB style parallel workers failed."))); BarrierInit(&entry->build_barrier, parallel_workers); entry->handle = dsm_segment_handle(seg); @@ -1866,7 +1866,7 @@ GpInsertParallelDSMHash(PlanState *planstate) if (!init) { /* should ensure that no shared memory is pinned before process exist. */ - before_shmem_exit(AtProcExit_GP_Parallel, 0); + before_shmem_exit(AtProcExit_CBDB_Parallel, 0); init = true; } } @@ -1878,7 +1878,7 @@ GpInsertParallelDSMHash(PlanState *planstate) ParallelSession->segment = seg; /* Attach to DSA area that can be used by the leader and all workers. */ - shm_toc* toc = shm_toc_attach(GP_PARALLEL_MAGIC, dsm_segment_address(seg)); + shm_toc* toc = shm_toc_attach(CBDB_PARALLEL_MAGIC, dsm_segment_address(seg)); char* area_space = shm_toc_lookup(toc, PARALLEL_KEY_GP_DSA, false); dsa_area* area = dsa_attach_in_place(area_space, seg); diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 5b291905edc..02f4a77255f 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -2816,8 +2816,8 @@ CommitTransaction(void) if (IsInParallelMode()) AtEOXact_Parallel(true); - /* Clean up GP style parallel workers which we might have. */ - AtEOXact_GP_Parallel(); + /* Clean up CBDB style parallel workers which we might have. */ + AtEOXact_CBDB_Parallel(); /* Shut down the deferred-trigger manager */ AfterTriggerEndXact(true); @@ -3546,8 +3546,8 @@ AbortTransaction(void) s->parallelModeLevel = 0; } - /* Clean up GP style parallel workers which we might have. */ - AtEOXact_GP_Parallel(); + /* Clean up CBDB style parallel workers which we might have. */ + AtEOXact_CBDB_Parallel(); /* * do abort processing diff --git a/src/backend/cdb/cdbpath.c b/src/backend/cdb/cdbpath.c index 4143f73d2af..4fb64912cc0 100644 --- a/src/backend/cdb/cdbpath.c +++ b/src/backend/cdb/cdbpath.c @@ -2950,7 +2950,7 @@ can_elide_explicit_motion(PlannerInfo *root, Index rti, Path *subpath, * parallel plan with Motion(1:6), but it still can't be processed by multiple * workers or be duplicated in every worker as the inner path. * - * All locus test cases are in gp_parallel, see final join locus examples there. + * All locus test cases are in cbdb_parallel, see final join locus examples there. */ CdbPathLocus cdbpath_motion_for_parallel_join(PlannerInfo *root, @@ -3137,7 +3137,7 @@ cdbpath_motion_for_parallel_join(PlannerInfo *root, /* * SegmentGeneralWorkers parallel join SegmentGeneralWorkers when parallel_aware * generate SegmentGeneralWorerks locus. - * see ex 5_P_5_5 in gp_parallel.sql + * see ex 5_P_5_5 in cbdb_parallel.sql */ if (outer.ok_to_replicate && inner.ok_to_replicate) return outer.locus; @@ -3156,7 +3156,7 @@ cdbpath_motion_for_parallel_join(PlannerInfo *root, * SegmentGeneralWorkers JOIN SegmentGeneral without shared hash table. * And the join locus is SegmentGeneralWorkers. * Then we can return the outer locus as join will set workers as outer locus. - * See ex 5_4_5 in gp_parallel.sql + * See ex 5_4_5 in cbdb_parallel.sql */ if (outer.ok_to_replicate && inner.ok_to_replicate) return outer.locus; diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 0b5dfd03a3d..189a6c9e31d 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -2540,7 +2540,7 @@ ExecutePlan(EState *estate, EnterParallelMode(); /* - * GP style parallelism won't interfere PG style parallel mechanism. + * CBDB style parallelism won't interfere PG style parallel mechanism. * So that we will pass if use_parallel_mode is true which means there exists * Gather/GatherMerge node. */ diff --git a/src/backend/optimizer/README.cbdb.parallel b/src/backend/optimizer/README.cbdb.parallel index ca41ec570d1..c5a54697e2a 100644 --- a/src/backend/optimizer/README.cbdb.parallel +++ b/src/backend/optimizer/README.cbdb.parallel @@ -14,11 +14,11 @@ Cloudberry Database treats all workers equally. They work together to execute a plan node with some sync mechanism to keep the right thing, ex: create a shared hash table etc. -That's called GP style. GP style launches workers as non-parallel +That's called CBDB style. CBDB style launches workers as non-parallel plan except that expand Gang size by factor if a top path node has parallel_workers > 1. -The reasons we choose GP style but not PG style or mix them is complex. +The reasons we choose CBDB style but not PG style or mix them is complex. We encounter lots of problems when mixing them together and we don't have enough time to enable both and don't know how much the benefit we @@ -41,7 +41,7 @@ The Gather node should be Hashed locus in that situation. But things become complex when joining with other locus and if there is a Motion node below that. -3. GP style could parallelize plan as late as possible until the final +3. CBDB style could parallelize plan as late as possible until the final Gather(to QD or to QE in the middle), But PG style will Gather workers in apply_scanjoin_target_to_path. PG style can't generate the final scan/join target in parallel workers. This is PG's last opportunity to @@ -49,7 +49,7 @@ use any partial paths that exist. It will empty partial_pathlist, all paths are moved to pathlist that it couldn't participate in later parallel join as the outer path, ex: -parallel_aware hash join with a shared table. But GP style could keep +parallel_aware hash join with a shared table. But CBDB style could keep partial path in partial_pathlist because we have a Gather Motion on the top. diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index 5690a3b2e54..98cbde9fb6d 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -2504,7 +2504,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) * Limit parallel: * PG doesn't have to handle limit here becuase all partial paths have been Gathered * into pathlist, and the subpath of Limit node could be parallel. - * For our GP style, we don't have Gather node and keep the partial path in partial_pathlist + * For our CBDB style, we don't have Gather node and keep the partial path in partial_pathlist * until the last step if possible. * When we generate two phase limit path or limit has sub partial path, * the Limit node on QEs could be parallel. diff --git a/src/include/access/parallel.h b/src/include/access/parallel.h index 1fdaf6539a5..58a1ab12665 100644 --- a/src/include/access/parallel.h +++ b/src/include/access/parallel.h @@ -125,8 +125,8 @@ extern void* GpFetchParallelDSMEntry(ParallelEntryTag tag, int plan_node_id); extern void GpDestroyParallelDSMEntry(void); -extern void AtEOXact_GP_Parallel(void); +extern void AtEOXact_CBDB_Parallel(void); -extern void AtProcExit_GP_Parallel(int code, Datum arg); +extern void AtProcExit_CBDB_Parallel(int code, Datum arg); #endif /* PARALLEL_H */ diff --git a/src/test/regress/expected/gp_parallel.out b/src/test/regress/expected/cbdb_parallel.out similarity index 99% rename from src/test/regress/expected/gp_parallel.out rename to src/test/regress/expected/cbdb_parallel.out index d8f229795df..7e505cf3f62 100644 --- a/src/test/regress/expected/gp_parallel.out +++ b/src/test/regress/expected/cbdb_parallel.out @@ -1,6 +1,6 @@ -- --- GP PARALLEL --- Test GP style parallel plan. +-- CBDB PARALLEL +-- Test CBDB style parallel plan. -- GUCs shoule be set with local, do not disturb other parallel plans. -- Should not use force_parallel_mode as it will ignore plan and check results only. -- We want to check plan in this file! diff --git a/src/test/regress/expected/select_parallel.out b/src/test/regress/expected/select_parallel.out index 180498dae04..ba3ee7ef55a 100644 --- a/src/test/regress/expected/select_parallel.out +++ b/src/test/regress/expected/select_parallel.out @@ -1,6 +1,6 @@ -- -- PARALLEL --- We have GP style parallel now, open this file in parallel mode. +-- We have CBDB style parallel now, open this file in parallel mode. -- set enable_parallel = on; set optimizer = off; diff --git a/src/test/regress/greenplum_schedule b/src/test/regress/greenplum_schedule index a7cd3e9bf58..545367f2ca6 100755 --- a/src/test/regress/greenplum_schedule +++ b/src/test/regress/greenplum_schedule @@ -250,8 +250,8 @@ test: uao_dml/uao_dml_column test: ao_locks test: freeze_aux_tables -# gp parallel test -test: gp_parallel +# cbdb parallel test +test: cbdb_parallel # These cannot run in parallel, because they check that VACUUM FULL shrinks table size. # A concurrent session could hold back the xid horizon and prevent old tuples from being diff --git a/src/test/regress/sql/gp_parallel.sql b/src/test/regress/sql/cbdb_parallel.sql similarity index 99% rename from src/test/regress/sql/gp_parallel.sql rename to src/test/regress/sql/cbdb_parallel.sql index 92dec58a677..3ead947b0ba 100644 --- a/src/test/regress/sql/gp_parallel.sql +++ b/src/test/regress/sql/cbdb_parallel.sql @@ -1,6 +1,6 @@ -- --- GP PARALLEL --- Test GP style parallel plan. +-- CBDB PARALLEL +-- Test CBDB style parallel plan. -- GUCs shoule be set with local, do not disturb other parallel plans. -- Should not use force_parallel_mode as it will ignore plan and check results only. -- We want to check plan in this file! diff --git a/src/test/regress/sql/select_parallel.sql b/src/test/regress/sql/select_parallel.sql index 09513821a50..8a56061710e 100644 --- a/src/test/regress/sql/select_parallel.sql +++ b/src/test/regress/sql/select_parallel.sql @@ -1,6 +1,6 @@ -- -- PARALLEL --- We have GP style parallel now, open this file in parallel mode. +-- We have CBDB style parallel now, open this file in parallel mode. -- set enable_parallel = on;