diff --git a/src/backend/access/bitmap/bitmapattutil.c b/src/backend/access/bitmap/bitmapattutil.c index 0675342c222..699c7e2a84d 100644 --- a/src/backend/access/bitmap/bitmapattutil.c +++ b/src/backend/access/bitmap/bitmapattutil.c @@ -227,7 +227,7 @@ _bitmap_create_lov_heapandindex(Relation rel, indexInfo->ii_Concurrent = false; indexInfo->ii_BrokenHotChain = false; /* - * GP_PARALLEL_FIXME: temporarily set ii_ParallelWorkers to -1 to disable parallel in bitmap index + * CBDB_PARALLEL_FIXME: temporarily set ii_ParallelWorkers to -1 to disable parallel in bitmap index * building. That's because that we still hold InterruptHoldoffCount after launch parallel workers. * And when parallel workers detach the message 'X' is not interrupt the leader. However, the leader * must wait for workers detaching. Thus there will be a hang issue. diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c index a4bf68cea33..478d0f89fdb 100644 --- a/src/backend/access/transam/parallel.c +++ b/src/backend/access/transam/parallel.c @@ -1665,7 +1665,7 @@ InitGpParallelDSMHash(void) Size GpParallelDSMHashSize(void) { - /* GPDB_PARALLEL_FIXME: limit for max slice */ + /* CBDB_PARALLEL_FIXME: limit for max slice */ return hash_estimate_size(SHARED_PARALLEL_DSM_TABLE_SIZE, sizeof(GpParallelDSMEntry)); } diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index ba6411db0fe..e9d1502b295 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -1374,7 +1374,7 @@ index_create_internal(Relation heapRelation, else { /* - * GP_PARALLEL_FIXME: temporarily set ii_ParallelWorkers to -1 to disable parallel in bitmap index + * CBDB_PARALLEL_FIXME: temporarily set ii_ParallelWorkers to -1 to disable parallel in bitmap index * building. That's because that we still hold InterruptHoldoffCount after launch parallel workers. * And when parallel workers detach the message 'X' is not interrupt the leader. However, the leader * must wait for workers detaching. Thus there will be a hang issue. diff --git a/src/backend/cdb/cdbllize.c b/src/backend/cdb/cdbllize.c index 425d35d5f0a..6a5fd990179 100644 --- a/src/backend/cdb/cdbllize.c +++ b/src/backend/cdb/cdbllize.c @@ -167,7 +167,7 @@ get_partitioned_policy_from_path(PlannerInfo *root, Path *path) * * NOTE: HashedOJ is not OK, because we cannot let the NULLs be stored * multiple segments. HashedWorkers is OK. - * GPDB_PARALLEL_FIXME: Is HashedWorkers OK? + * CBDB_PARALLEL_FIXME: Is HashedWorkers OK? * There is no parallel insertion now, query->intoPolicy couldn't be CdbLocusType_HashedWorkers. */ if (!(path->locus.locustype == CdbLocusType_Hashed || @@ -1579,7 +1579,7 @@ motion_sanity_check(PlannerInfo *root, Plan *plan) static void adjust_top_path_for_parallel_retrieve_cursor(Path *path, PlanSlice *slice) { - /* GPDB_PARALLEL_FIXME: should consider parallel_workers for parallel cursor? */ + /* CBDB_PARALLEL_FIXME: should consider parallel_workers for parallel cursor? */ Assert(path->locus.parallel_workers == 0); if (CdbPathLocus_IsSingleQE(path->locus) diff --git a/src/backend/cdb/cdbpath.c b/src/backend/cdb/cdbpath.c index c67c6f86468..d688214da1b 100644 --- a/src/backend/cdb/cdbpath.c +++ b/src/backend/cdb/cdbpath.c @@ -144,7 +144,7 @@ cdbpath_cost_motion(PlannerInfo *root, CdbMotionPath *motionpath) recvrows = motionpath->path.rows; motioncost = cost_per_row * 0.5 * (sendrows + recvrows); /* - * GPDB_PARALLEL_FIXME: + * CBDB_PARALLEL_FIXME: * Motioncost may be higher than sendrows + recvrows. * ex: Broadcast Motion 3:6 * Broadcast to prallel workers, each worker's has a rel's all rows(recvrows), @@ -603,7 +603,7 @@ cdbpath_create_motion_path(PlannerInfo *root, */ pathnode->path.parallel_aware = false; /* - * GPDB_PARALLEL_FIXME: + * CBDB_PARALLEL_FIXME: * We once set parallel_safe by locus type, but almost all locus are * parallel safe nowadays. * In principle, we should set parallel_safe = true if we are in a parallel join. @@ -2895,7 +2895,7 @@ cdbpath_motion_for_parallel_join(PlannerInfo *root, Assert(cdbpathlocus_is_valid(outer.locus)); Assert(cdbpathlocus_is_valid(inner.locus)); - /* GPDB_PARALLEL_FIXME: reconsider the meaning of parallel_safe in GP parallel? */ + /* CBDB_PARALLEL_FIXME: reconsider the meaning of parallel_safe in GP parallel? */ if (!outer.path->parallel_safe || !inner.path->parallel_safe) goto fail; @@ -2954,7 +2954,7 @@ cdbpath_motion_for_parallel_join(PlannerInfo *root, * unfortunately we have to pretend that inner is randomly distributed, * otherwise we may end up with redistributing outer rel. */ - /* GPDB_PARALLEL_FIXME: this may cause parallel CTE, not sure if it's right */ + /* CBDB_PARALLEL_FIXME: this may cause parallel CTE, not sure if it's right */ if (outer.has_wts && inner.locus.distkey != NIL) CdbPathLocus_MakeStrewn(&inner.locus, CdbPathLocus_NumSegments(inner.locus), @@ -3139,7 +3139,7 @@ cdbpath_motion_for_parallel_join(PlannerInfo *root, else if (innerParallel == 0 && other->path->pathtype == T_SeqScan) { /* - * GPDB_PARALLEL_FIXME: The inner path will be duplicately processed. + * CBDB_PARALLEL_FIXME: The inner path will be duplicately processed. * That require inner path should not have descendant Motion paths. * Use Seqscan here is more strit, but for now. * @@ -3181,7 +3181,7 @@ cdbpath_motion_for_parallel_join(PlannerInfo *root, CdbPathLocus_IsSegmentGeneralWorkers(inner.locus)) { /* - * GPDB_PARALLEL_FIXME: + * CBDB_PARALLEL_FIXME: * We shouln't get here as Path(parallel_worker=1) won't be added to partial_pathlist. * If outer locus is SegmentGeneral and its parallel_workers must be 0. * We neighter want a Motion nor change the parallel_workers of a path(May be enabled @@ -3667,7 +3667,7 @@ cdbpath_motion_for_parallel_join(PlannerInfo *root, } /* Which rel is bigger? */ - /* GPDB_PARALLEL_FIXME: should we swap if parallel_aware? */ + /* CBDB_PARALLEL_FIXME: should we swap if parallel_aware? */ if (large_rel->bytes < small_rel->bytes) CdbSwap(CdbpathMfjRel *, large_rel, small_rel); diff --git a/src/backend/cdb/cdbpathtoplan.c b/src/backend/cdb/cdbpathtoplan.c index b5e64b7415a..9b33c4decf1 100644 --- a/src/backend/cdb/cdbpathtoplan.c +++ b/src/backend/cdb/cdbpathtoplan.c @@ -60,7 +60,7 @@ cdbpathtoplan_create_flow(PlannerInfo *root, } else if (CdbPathLocus_IsReplicated(locus)) { - /* GPDB_PARALLEL_FIXME: What if ReplicatedWorkers? */ + /* CBDB_PARALLEL_FIXME: What if ReplicatedWorkers? */ flow = makeFlow(FLOW_REPLICATED, locus.numsegments); } else if (CdbPathLocus_IsHashed(locus) || diff --git a/src/backend/cdb/endpoint/cdbendpoint.c b/src/backend/cdb/endpoint/cdbendpoint.c index 6dc24a72650..eb00afee2ca 100644 --- a/src/backend/cdb/endpoint/cdbendpoint.c +++ b/src/backend/cdb/endpoint/cdbendpoint.c @@ -227,7 +227,7 @@ GetParallelCursorEndpointPosition(PlannedStmt *plan) { if (plan->planTree->flow->flotype == FLOW_SINGLETON) { - /* GPDB_PARALLEL_FIXME: CdbLocusType_SegmentGeneralWorkers */ + /* CBDB_PARALLEL_FIXME: CdbLocusType_SegmentGeneralWorkers */ if (plan->planTree->flow->locustype == CdbLocusType_SegmentGeneral) return ENDPOINT_ON_SINGLE_QE; else diff --git a/src/backend/commands/explain_gp.c b/src/backend/commands/explain_gp.c index 06a925129b4..c97e8b6b6d0 100644 --- a/src/backend/commands/explain_gp.c +++ b/src/backend/commands/explain_gp.c @@ -773,7 +773,7 @@ cdbexplain_depositSliceStats(CdbExplain_StatHdr *hdr, iworker = hdr->segindex - ss->segindex0; ssw = &ss->workers[iworker]; Assert(iworker >= 0 && iworker < ss->nworker); - /* GPDB_PARALLEL_FIXME: reuse worker to store the stats of same slice */ + /* CBDB_PARALLEL_FIXME: reuse worker to store the stats of same slice */ #if 0 Assert(ssw->peakmemused == 0); /* each worker should be seen just once */ #endif diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c index 61a580873b1..cb924e586aa 100644 --- a/src/backend/commands/matview.c +++ b/src/backend/commands/matview.c @@ -473,7 +473,7 @@ refresh_matview_datafill(DestReceiver *dest, Query *query, /* Plan the query which will generate data for the refresh. */ - /* GPDB_PARALLEL_FIXME: hack here, use cursor_option to disable parallel */ + /* CBDB_PARALLEL_FIXME: hack here, use cursor_option to disable parallel */ if (!refreshClause->intoAO) plan = pg_plan_query(query, queryString, CURSOR_OPT_PARALLEL_OK, NULL); else diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index 2c560274dd5..865abd42f5c 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -3183,7 +3183,7 @@ vac_update_relstats_from_list(List *updated_stats) if (RelationIsAppendOptimized(rel)) { /* - * GPDB_PARALLEL_FIXME: This is very hacky! + * CBDB_PARALLEL_FIXME: This is very hacky! * relallvisible came from vacuum AO/AOCO processes means the segment file count * of AO/AOCO tables. We use it to update pg_appendonly.segfilecount. * See ao_vacuum_rel_post_cleanup in vacuum_ao.c. diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index d9c4b6ad7ef..1311722aa10 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -1773,7 +1773,7 @@ InitPlan(QueryDesc *queryDesc, int eflags) /* set our global sliceid variable for elog. */ int save_currentSliceId = estate->currentSliceId; - /* GPDB_PARALLEL_FIXME: Is it necessary to save and recover this? */ + /* CBDB_PARALLEL_FIXME: Is it necessary to save and recover this? */ bool save_useMppParallelMode = estate->useMppParallelMode; estate->currentSliceId = estate->es_plannedstmt->subplan_sliceIds[subplan_id - 1]; diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index 5399811cac8..8d810b074da 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -1565,7 +1565,7 @@ InitializeGpParallelWorkers(PlanState *planstate, ParallelWorkerContext *pwcxt) if (planstate == NULL) return false; /* - * GPDB_PARALLEL_FIXME: + * CBDB_PARALLEL_FIXME: * Why we call PG's xxxInitializeWorker functions for some nodes, but not for others? */ switch (nodeTag(planstate)) diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index af4ee939a37..2aee6c0db2e 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -1601,7 +1601,7 @@ FillSliceGangInfo(ExecSlice *slice, PlanSlice *ps) * It's either the root slice or an InitPlan slice that runs in * the QD process, or really unused slice. */ - /* GPDB_PARALLEL_FIXME: QD process should never be parallel, do we need to plus factor? */ + /* CBDB_PARALLEL_FIXME: QD process should never be parallel, do we need to plus factor? */ slice->planNumSegments = 1; break; case GANGTYPE_PRIMARY_WRITER: @@ -1629,13 +1629,13 @@ FillSliceGangInfo(ExecSlice *slice, PlanSlice *ps) } break; case GANGTYPE_ENTRYDB_READER: - /* GPDB_PARALLEL_FIXME: QD parallel is disabled */ + /* CBDB_PARALLEL_FIXME: QD parallel is disabled */ slice->planNumSegments = 1; slice->segments = list_make1_int(-1); break; case GANGTYPE_SINGLETON_READER: /* - * GPDB_PARALLEL_FIXME: + * CBDB_PARALLEL_FIXME: * Could be parallel, parallel scan on replica tables. */ slice->planNumSegments = 1 * factor; diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index f61f8f53491..f53037808d8 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -778,7 +778,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, /* Careful, this could overflow size_t */ double newlimit; - /* GP_PARALLEL_FIXME: if we enable pg style parallel some day, we should reconsider it. */ + /* CBDB_PARALLEL_FIXME: if we enable pg style parallel some day, we should reconsider it. */ newlimit = (double) hash_table_bytes * (double) parallel_workers; newlimit = Min(newlimit, (double) SIZE_MAX); hash_table_bytes = (size_t) newlimit; @@ -2563,7 +2563,7 @@ ExecHashTableExplainEnd(PlanState *planstate, struct StringInfoData *buf) } /* Report workfile I/O statistics. */ - /* GPDB_PARALLEL_FIXME: ExecHashTableExplainBatches if parallel_aware? */ + /* CBDB_PARALLEL_FIXME: ExecHashTableExplainBatches if parallel_aware? */ if (hashtable->nbatch > 1 && !planstate->plan->parallel_aware) { ExecHashTableExplainBatches(hashtable, buf, 0, 1, "Initial"); diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index 0cfad9f7335..5b286179e06 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -204,7 +204,7 @@ ExecHashJoinImpl(PlanState *pstate, bool parallel) hashtable = node->hj_HashTable; econtext = node->js.ps.ps_ExprContext; parallel_state = hashNode->parallel_state; - /* GPDB_PARALLEL_FIXME: When parallel is true and parallel_state is NULL */ + /* CBDB_PARALLEL_FIXME: When parallel is true and parallel_state is NULL */ parallel = parallel && (parallel_state != NULL); /* diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index 14f622d08ed..eef62e2b22d 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -522,7 +522,7 @@ bring_to_outer_query(PlannerInfo *root, RelOptInfo *rel, List *outer_quals) rel->cheapest_unique_path = NULL; rel->cheapest_parameterized_paths = NIL; rel->pathlist = NIL; - /* GPDB_PARALLEL_FIXME: Need to clear partial_pathlist before we enable OuterQuery locus in paralle mode */ + /* CBDB_PARALLEL_FIXME: Need to clear partial_pathlist before we enable OuterQuery locus in paralle mode */ rel->partial_pathlist = NIL; foreach(lc, origpathlist) @@ -652,7 +652,7 @@ bring_to_singleQE(PlannerInfo *root, RelOptInfo *rel) add_path(rel, path, root); } /* - * GP_PARALLEL_FIXME: + * CBDB_PARALLEL_FIXME: * If we need to bring to single QE which commonly seen in lateral * join with group by or limit, we better to set partial pathlist * to NIL in order to make sure single QE locus is satisfied in @@ -790,7 +790,7 @@ set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, if (rel->upperrestrictinfo) { bring_to_outer_query(root, rel, rel->upperrestrictinfo); - /* GP_PARALLEL_FIXME: enable parallel outer query? */ + /* CBDB_PARALLEL_FIXME: enable parallel outer query? */ } else if (root->config->force_singleQE) { @@ -920,7 +920,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, } /* - * GP_PARALLEL_FIXME: GPDB don't allow parallelism for relations that are system catalogs. + * CBDB_PARALLEL_FIXME: GPDB don't allow parallelism for relations that are system catalogs. */ if (IsSystemClassByRelid(rte->relid)) return; @@ -1091,7 +1091,7 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) if (parallel_workers <= 1) return; - /* GPDB_PARALLEL_FIXME: update locus.parallel_workers? */ + /* CBDB_PARALLEL_FIXME: update locus.parallel_workers? */ /* Add an unordered partial path based on a parallel sequential scan. */ add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers)); @@ -1669,7 +1669,7 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, &pa_nonpartial_subpaths, NULL); /* - * GPDB_PARALLEL_FIXME: can't use parallel append if subpath + * CBDB_PARALLEL_FIXME: can't use parallel append if subpath * is not parallel safe. */ if (!nppath->parallel_safe) @@ -1770,7 +1770,7 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, parallel_workers = Max(parallel_workers, path->parallel_workers); } /* - * GPDB_PARALLEL_FIXME: it still cannot be opened after we deal with append. + * CBDB_PARALLEL_FIXME: it still cannot be opened after we deal with append. * Because we currently allow path with non parallel_workers been added to * partial_path. */ @@ -1795,7 +1795,7 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, max_parallel_workers_per_gather); } /* - * GPDB_PARALLEL_FIXME: it still cannot be opened after we deal with append. + * CBDB_PARALLEL_FIXME: it still cannot be opened after we deal with append. * Because we currently allow path with non parallel_workers been added to * partial_path. */ @@ -3694,7 +3694,7 @@ make_rel_from_joinlist(PlannerInfo *root, List *joinlist) * already. */ bring_to_outer_query(root, rel, NIL); - /* GP_PARALLEL_FIXME: enable parallel outer query? */ + /* CBDB_PARALLEL_FIXME: enable parallel outer query? */ } return rel; @@ -3815,7 +3815,7 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels) if (bms_equal(rel->relids, root->all_baserels) && root->is_correlated_subplan) { bring_to_outer_query(root, rel, NIL); - /* GP_PARALLEL_FIXME: enable parallel outer query? */ + /* CBDB_PARALLEL_FIXME: enable parallel outer query? */ } /* Find and save the cheapest paths for this rel */ diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c index 5e8a613f952..74384680c5b 100644 --- a/src/backend/optimizer/path/joinpath.c +++ b/src/backend/optimizer/path/joinpath.c @@ -1147,7 +1147,7 @@ try_partial_hashjoin_path(PlannerInfo *root, return; /* - * GPDB_PARALLEL_FIXME + * CBDB_PARALLEL_FIXME * Customers encounter an issue that when parallel hash, broadcast motion * a smaller table may be worser than redistribute a big table. * We add a path whic doesn't try broadcast if possible. @@ -1174,7 +1174,7 @@ try_partial_hashjoin_path(PlannerInfo *root, } /* - * GPDB_PARALLEL_FIXME: + * CBDB_PARALLEL_FIXME: * We only want non-broadcast in parallel hash if the guc is set. */ if (parallel_hash && !parallel_hash_enable_motion_broadcast) diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index 974577c0d5b..6dc49c7357b 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -5785,7 +5785,7 @@ create_hashjoin_plan(PlannerInfo *root, bool partition_selectors_created = false; ListCell *lc; - /* GP_PARALLEL_FIXME: + /* CBDB_PARALLEL_FIXME: * PartitionSelector is not parallel-aware, so disable it temporarily. * In future, after enabling merging partition prune info in shared memory, * PartitionSelector could work in parallel mode. diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index a32eacffaa8..e9512e66969 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -549,7 +549,7 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions, * Unlike upstream, partial_path is valid in GP without Gather nodes. * Keep the two pathlist separated until the final. Now it's the time * to choose the best. - * GPDB_PARALLEL_FIXME: + * CBDB_PARALLEL_FIXME: * Take GP's special into partial_pathlist, ex: agg and etc. */ if (final_rel->partial_pathlist != NIL) @@ -2463,7 +2463,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) * be able to make use of them. */ /* - * GPDB_PARALLEL_FIXME: should keep query_level > 1 in GPDB? + * CBDB_PARALLEL_FIXME: should keep query_level > 1 in GPDB? * It will lose parallel path, ex: plain parallel scan. * PG have Gather node but GP delay partial path until Gather Motion. * @@ -2491,7 +2491,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) { Assert(!parse->rowMarks && parse->commandType == CMD_SELECT); - /* GPDB_PARALLEL_FIXEME: support parallel SCATTER BY? */ + /* CBDB_PARALLEL_FIXME: support parallel SCATTER BY? */ if (parse->scatterClause) { current_rel->partial_pathlist = NIL; @@ -5040,7 +5040,7 @@ create_distinct_paths(PlannerInfo *root, /* On how many segments will the distinct result reside? */ if (CdbPathLocus_IsPartitioned(path->locus)) { - /* GPDB_PARALLEL_FIXME: should we consider parallel in distinct path? */ + /* CBDB_PARALLEL_FIXME: should we consider parallel in distinct path? */ numDistinctRows = numDistinctRowsTotal / CdbPathLocus_NumSegments(path->locus); if (path->locus.parallel_workers > 1) numDistinctRows /= path->locus.parallel_workers; @@ -5084,7 +5084,7 @@ create_distinct_paths(PlannerInfo *root, if (CdbPathLocus_IsPartitioned(path->locus)) { - /* GPDB_PARALLEL_FIXME: should we consider parallel in distinct path? */ + /* CBDB_PARALLEL_FIXME: should we consider parallel in distinct path? */ numDistinctRows = numDistinctRowsTotal / CdbPathLocus_NumSegments(path->locus); if (path->locus.parallel_workers > 1) numDistinctRows /= path->locus.parallel_workers; @@ -5133,7 +5133,7 @@ create_distinct_paths(PlannerInfo *root, if (CdbPathLocus_IsPartitioned(path->locus)) { - /* GPDB_PARALLEL_FIXME: should we consider parallel in distinct path? */ + /* CBDB_PARALLEL_FIXME: should we consider parallel in distinct path? */ numDistinctRows = clamp_row_est(numDistinctRowsTotal / CdbPathLocus_NumSegments(path->locus)); if (path->locus.parallel_workers > 1) numDistinctRows /= path->locus.parallel_workers; diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c index 468d2ba8810..f623eba24d2 100644 --- a/src/backend/optimizer/prep/prepunion.c +++ b/src/backend/optimizer/prep/prepunion.c @@ -590,7 +590,7 @@ generate_union_paths(SetOperationStmt *op, PlannerInfo *root, ListCell *lc; List *pathlist = NIL; List *partial_pathlist = NIL; - bool partial_paths_valid = false; /* GPDB_PARALLEL_FIXME: temproary disable partial path */ + bool partial_paths_valid = false; /* CBDB_PARALLEL_FIXME: temproary disable partial path */ bool consider_parallel = true; List *rellist; List *tlist_list; @@ -749,7 +749,7 @@ generate_union_paths(SetOperationStmt *op, PlannerInfo *root, NIL, NULL, parallel_workers, enable_parallel_append, -1); - /* GPDB_PARALLEL_FIXME: we disable pg styple Gather/GatherMerge node */ + /* CBDB_PARALLEL_FIXME: we disable pg styple Gather/GatherMerge node */ #if 0 ppath = (Path *) create_gather_path(root, result_rel, ppath, diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index 90e2f8044ee..73644f014c3 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -1400,7 +1400,7 @@ create_append_path(PlannerInfo *root, ListCell *l; /* - * GPDB_PARALLEL_FIXME: it still cannot be opened after we deal with append. + * CBDB_PARALLEL_FIXME: it still cannot be opened after we deal with append. * Because we currently allow path with non parallel_workers been added to * partial_path. */ @@ -1803,14 +1803,14 @@ set_append_path_locus(PlannerInfo *root, Path *pathnode, RelOptInfo *rel, { CdbLocusType_SegmentGeneralWorkers, CdbLocusType_Strewn, CdbLocusType_Strewn }, /* - * GPDB_PARALLEL_FIXME: The following three locus are not considering parallel for now. + * CBDB_PARALLEL_FIXME: The following three locus are not considering parallel for now. * We might need to consider it in the future. */ { CdbLocusType_SegmentGeneralWorkers, CdbLocusType_OuterQuery, CdbLocusType_OuterQuery}, { CdbLocusType_SegmentGeneralWorkers, CdbLocusType_Entry, CdbLocusType_Entry}, { CdbLocusType_SegmentGeneralWorkers, CdbLocusType_SingleQE, CdbLocusType_SingleQE}, - /* GPDB_PARALLEL_FIXME: Is there any chance replicated workers exist in append subpath? */ + /* CBDB_PARALLEL_FIXME: Is there any chance replicated workers exist in append subpath? */ }; targetlocustype = CdbLocusType_General; @@ -2138,7 +2138,7 @@ set_append_path_locus(PlannerInfo *root, Path *pathnode, RelOptInfo *rel, pathnode->locus = targetlocus; /* - * GPDB_PARALLEL_FIXME: + * CBDB_PARALLEL_FIXME: * Workaround for assertions in create_plan, * else will get wrong plan, ex: general locus with parallel_workers > 1. * Reconsider this after append locus is fixed. @@ -3420,7 +3420,7 @@ create_ctescan_path(PlannerInfo *root, RelOptInfo *rel, pathnode->rows = clamp_row_est(rel->rows / numsegments); pathnode->startup_cost = subpath->startup_cost; pathnode->total_cost = subpath->total_cost; - /* GPDB_PARALLEL_FIXME: Is it correct to set parallel workers here? */ + /* CBDB_PARALLEL_FIXME: Is it correct to set parallel workers here? */ pathnode->parallel_workers = subpath->parallel_workers; ctepath->subpath = subpath; @@ -3498,7 +3498,7 @@ create_resultscan_path(PlannerInfo *root, RelOptInfo *rel, CdbPathLocus_MakeEntry(&pathnode->locus); else if (exec_location == PROEXECLOCATION_ALL_SEGMENTS) { - /* GPDB_PARALLEL_FIXME: I'm not sure if this makes sense. This + /* CBDB_PARALLEL_FIXME: I'm not sure if this makes sense. This * would return multiple rows, one for each segment, but usually * a "SELECT func()" is expected to return just one row. */ @@ -4340,7 +4340,7 @@ create_hashjoin_path(PlannerInfo *root, int rowidexpr_id; /* - * GPDB_PARALLEL_FIXME: + * CBDB_PARALLEL_FIXME: * We do have outer_path(parallel_workers=0) when parallel_aware is true * as we try more partial hash join paths than upstream. * Are them reasonable? Better to remove them until we have a clear answer. @@ -4481,7 +4481,7 @@ create_hashjoin_path(PlannerInfo *root, /* * For parallel hash, it is motionHazard. If there are parallel hash join on outside child, * not use parallel hash. - * GPDB_PARALLEL_FIXME: At least, should not have impact on non-parallel path generation. + * CBDB_PARALLEL_FIXME: At least, should not have impact on non-parallel path generation. */ if (enable_parallel && outer_path->barrierHazard && !parallel_hash) return NULL; diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index 743a52e19ab..564c3467257 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -964,7 +964,7 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist, } /* - * GP_PARALLEL_FIXME: + * CBDB_PARALLEL_FIXME: * GPDB hack here for IntoClause, see GetCachedPlan(). * Disable parallel if into a AO/AOCS table. */ diff --git a/src/test/regress/expected/segspace.out b/src/test/regress/expected/segspace.out index 7ccf7a813f5..c854a25e671 100644 --- a/src/test/regress/expected/segspace.out +++ b/src/test/regress/expected/segspace.out @@ -1,7 +1,7 @@ -- -- Tests the spill files disk space accounting mechanism -- --- GPDP_PARALLEL_FIXME: it's hard to make fault_injection work with prallel processes. +-- CBDB_PARALLEL_FIXME: it's hard to make fault_injection work with prallel processes. set enable_parallel = false; -- check segspace before test reset statement_mem; diff --git a/src/test/regress/expected/select_parallel.out b/src/test/regress/expected/select_parallel.out index bf27948896c..951c6a5438f 100644 --- a/src/test/regress/expected/select_parallel.out +++ b/src/test/regress/expected/select_parallel.out @@ -1115,7 +1115,7 @@ explain (costs off) -- to increase the parallel query test coverage SAVEPOINT settings; SET LOCAL force_parallel_mode = 1; --- GPDB_PARALLEL_FIXME: analyze actual rows may be different by running multiple times. +-- CBDB_PARALLEL_FIXME: analyze actual rows may be different by running multiple times. EXPLAIN (timing off, summary off, costs off) SELECT * FROM tenk1; QUERY PLAN ------------------------------------------ diff --git a/src/test/regress/expected/workfile/materialize_spill.out b/src/test/regress/expected/workfile/materialize_spill.out index e2687bbbb96..3e4010cbf37 100644 --- a/src/test/regress/expected/workfile/materialize_spill.out +++ b/src/test/regress/expected/workfile/materialize_spill.out @@ -59,7 +59,7 @@ set enable_nestloop = true; -- ORCA doesn't honor enable_nestloop/enable_hashjoin, so this won't produce -- the kind of plan we're looking for. set optimizer=off; --- GP_PARALLEL_FIXME: seems like work_mem are affected by parallel, thus more spilling +-- CBDB_PARALLEL_FIXME: seems like work_mem are affected by parallel, thus more spilling -- happened. Temporally disable parallel in this case to pass the test. set enable_parallel=off; -- This is the actual test query. diff --git a/src/test/regress/input/temp_tablespaces.source b/src/test/regress/input/temp_tablespaces.source index 3bbd8f2fb37..1130658f9da 100644 --- a/src/test/regress/input/temp_tablespaces.source +++ b/src/test/regress/input/temp_tablespaces.source @@ -41,7 +41,7 @@ CREATE TABLE tts_foo (i int, j int) distributed by(i); insert into tts_foo select i, i from generate_series(1,80000)i; ANALYZE tts_foo; set gp_cte_sharing=on; --- GP_PARALLEL_FIXME: since we disabled shared input scan in parallel mode, sisc_xslice_temp_files +-- CBDB_PARALLEL_FIXME: since we disabled shared input scan in parallel mode, sisc_xslice_temp_files -- will never be triggered. We need set max_parallel_workers_per_gather to 0 in this case. set max_parallel_workers_per_gather = 0; @@ -134,7 +134,7 @@ select gp_wait_until_triggered_fault('hashagg_spill_temp_files', 1, dbid) select gp_inject_fault('hashagg_spill_temp_files', 'reset', dbid) from gp_segment_configuration where role='p' and content>=0; --- GP_PARALLEL_FIXME: we need to reset max_parallel_workers_per_gather since we changed it. +-- CBDB_PARALLEL_FIXME: we need to reset max_parallel_workers_per_gather since we changed it. reset max_parallel_workers_per_gather; drop table tts_foo, tts_bar, tts_hashagg; diff --git a/src/test/regress/output/temp_tablespaces.source b/src/test/regress/output/temp_tablespaces.source index 867cfcf15a6..8fe01bff02a 100644 --- a/src/test/regress/output/temp_tablespaces.source +++ b/src/test/regress/output/temp_tablespaces.source @@ -52,7 +52,7 @@ CREATE TABLE tts_foo (i int, j int) distributed by(i); insert into tts_foo select i, i from generate_series(1,80000)i; ANALYZE tts_foo; set gp_cte_sharing=on; --- GP_PARALLEL_FIXME: since we disabled shared input scan in parallel mode, sisc_xslice_temp_files +-- CBDB_PARALLEL_FIXME: since we disabled shared input scan in parallel mode, sisc_xslice_temp_files -- will never be triggered. We need set max_parallel_workers_per_gather to 0 in this case. set max_parallel_workers_per_gather = 0; -- CASE 1: when temp_tablespaces is set, hashagg and share-input-scan @@ -224,7 +224,7 @@ select gp_inject_fault('hashagg_spill_temp_files', 'reset', dbid) Success: (3 rows) --- GP_PARALLEL_FIXME: we need to reset max_parallel_workers_per_gather since we changed it. +-- CBDB_PARALLEL_FIXME: we need to reset max_parallel_workers_per_gather since we changed it. reset max_parallel_workers_per_gather; drop table tts_foo, tts_bar, tts_hashagg; drop tablespace mytempsp0; diff --git a/src/test/regress/sql/segspace.sql b/src/test/regress/sql/segspace.sql index aa4d1df0d27..01b00fc0b5a 100644 --- a/src/test/regress/sql/segspace.sql +++ b/src/test/regress/sql/segspace.sql @@ -1,7 +1,7 @@ -- -- Tests the spill files disk space accounting mechanism -- --- GPDP_PARALLEL_FIXME: it's hard to make fault_injection work with prallel processes. +-- CBDB_PARALLEL_FIXME: it's hard to make fault_injection work with prallel processes. set enable_parallel = false; -- check segspace before test diff --git a/src/test/regress/sql/select_parallel.sql b/src/test/regress/sql/select_parallel.sql index fbd94c31a64..09513821a50 100644 --- a/src/test/regress/sql/select_parallel.sql +++ b/src/test/regress/sql/select_parallel.sql @@ -402,7 +402,7 @@ explain (costs off) -- to increase the parallel query test coverage SAVEPOINT settings; SET LOCAL force_parallel_mode = 1; --- GPDB_PARALLEL_FIXME: analyze actual rows may be different by running multiple times. +-- CBDB_PARALLEL_FIXME: analyze actual rows may be different by running multiple times. EXPLAIN (timing off, summary off, costs off) SELECT * FROM tenk1; ROLLBACK TO SAVEPOINT settings; diff --git a/src/test/regress/sql/workfile/materialize_spill.sql b/src/test/regress/sql/workfile/materialize_spill.sql index bb02936a969..a71d8b3728b 100644 --- a/src/test/regress/sql/workfile/materialize_spill.sql +++ b/src/test/regress/sql/workfile/materialize_spill.sql @@ -60,7 +60,7 @@ set enable_nestloop = true; -- ORCA doesn't honor enable_nestloop/enable_hashjoin, so this won't produce -- the kind of plan we're looking for. set optimizer=off; --- GP_PARALLEL_FIXME: seems like work_mem are affected by parallel, thus more spilling +-- CBDB_PARALLEL_FIXME: seems like work_mem are affected by parallel, thus more spilling -- happened. Temporally disable parallel in this case to pass the test. set enable_parallel=off;