Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/backend/access/bitmap/bitmapattutil.c
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ _bitmap_create_lov_heapandindex(Relation rel,
indexInfo->ii_Concurrent = false;
indexInfo->ii_BrokenHotChain = false;
/*
* GP_PARALLEL_FIXME: temporarily set ii_ParallelWorkers to -1 to disable parallel in bitmap index
* CBDB_PARALLEL_FIXME: temporarily set ii_ParallelWorkers to -1 to disable parallel in bitmap index
* building. That's because that we still hold InterruptHoldoffCount after launch parallel workers.
* And when parallel workers detach the message 'X' is not interrupt the leader. However, the leader
* must wait for workers detaching. Thus there will be a hang issue.
Expand Down
2 changes: 1 addition & 1 deletion src/backend/access/transam/parallel.c
Original file line number Diff line number Diff line change
Expand Up @@ -1665,7 +1665,7 @@ InitGpParallelDSMHash(void)
Size
GpParallelDSMHashSize(void)
{
/* GPDB_PARALLEL_FIXME: limit for max slice */
/* CBDB_PARALLEL_FIXME: limit for max slice */
return hash_estimate_size(SHARED_PARALLEL_DSM_TABLE_SIZE,
sizeof(GpParallelDSMEntry));
}
Expand Down
2 changes: 1 addition & 1 deletion src/backend/catalog/index.c
Original file line number Diff line number Diff line change
Expand Up @@ -1374,7 +1374,7 @@ index_create_internal(Relation heapRelation,
else
{
/*
* GP_PARALLEL_FIXME: temporarily set ii_ParallelWorkers to -1 to disable parallel in bitmap index
* CBDB_PARALLEL_FIXME: temporarily set ii_ParallelWorkers to -1 to disable parallel in bitmap index
* building. That's because that we still hold InterruptHoldoffCount after launch parallel workers.
* And when parallel workers detach the message 'X' is not interrupt the leader. However, the leader
* must wait for workers detaching. Thus there will be a hang issue.
Expand Down
4 changes: 2 additions & 2 deletions src/backend/cdb/cdbllize.c
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ get_partitioned_policy_from_path(PlannerInfo *root, Path *path)
*
* NOTE: HashedOJ is not OK, because we cannot let the NULLs be stored
* multiple segments. HashedWorkers is OK.
* GPDB_PARALLEL_FIXME: Is HashedWorkers OK?
* CBDB_PARALLEL_FIXME: Is HashedWorkers OK?
* There is no parallel insertion now, query->intoPolicy couldn't be CdbLocusType_HashedWorkers.
*/
if (!(path->locus.locustype == CdbLocusType_Hashed ||
Expand Down Expand Up @@ -1579,7 +1579,7 @@ motion_sanity_check(PlannerInfo *root, Plan *plan)
static void
adjust_top_path_for_parallel_retrieve_cursor(Path *path, PlanSlice *slice)
{
/* GPDB_PARALLEL_FIXME: should consider parallel_workers for parallel cursor? */
/* CBDB_PARALLEL_FIXME: should consider parallel_workers for parallel cursor? */
Assert(path->locus.parallel_workers == 0);

if (CdbPathLocus_IsSingleQE(path->locus)
Expand Down
14 changes: 7 additions & 7 deletions src/backend/cdb/cdbpath.c
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ cdbpath_cost_motion(PlannerInfo *root, CdbMotionPath *motionpath)
recvrows = motionpath->path.rows;
motioncost = cost_per_row * 0.5 * (sendrows + recvrows);
/*
* GPDB_PARALLEL_FIXME:
* CBDB_PARALLEL_FIXME:
* Motioncost may be higher than sendrows + recvrows.
* ex: Broadcast Motion 3:6
* Broadcast to prallel workers, each worker's has a rel's all rows(recvrows),
Expand Down Expand Up @@ -603,7 +603,7 @@ cdbpath_create_motion_path(PlannerInfo *root,
*/
pathnode->path.parallel_aware = false;
/*
* GPDB_PARALLEL_FIXME:
* CBDB_PARALLEL_FIXME:
* We once set parallel_safe by locus type, but almost all locus are
* parallel safe nowadays.
* In principle, we should set parallel_safe = true if we are in a parallel join.
Expand Down Expand Up @@ -2895,7 +2895,7 @@ cdbpath_motion_for_parallel_join(PlannerInfo *root,

Assert(cdbpathlocus_is_valid(outer.locus));
Assert(cdbpathlocus_is_valid(inner.locus));
/* GPDB_PARALLEL_FIXME: reconsider the meaning of parallel_safe in GP parallel? */
/* CBDB_PARALLEL_FIXME: reconsider the meaning of parallel_safe in GP parallel? */
if (!outer.path->parallel_safe || !inner.path->parallel_safe)
goto fail;

Expand Down Expand Up @@ -2954,7 +2954,7 @@ cdbpath_motion_for_parallel_join(PlannerInfo *root,
* unfortunately we have to pretend that inner is randomly distributed,
* otherwise we may end up with redistributing outer rel.
*/
/* GPDB_PARALLEL_FIXME: this may cause parallel CTE, not sure if it's right */
/* CBDB_PARALLEL_FIXME: this may cause parallel CTE, not sure if it's right */
if (outer.has_wts && inner.locus.distkey != NIL)
CdbPathLocus_MakeStrewn(&inner.locus,
CdbPathLocus_NumSegments(inner.locus),
Expand Down Expand Up @@ -3139,7 +3139,7 @@ cdbpath_motion_for_parallel_join(PlannerInfo *root,
else if (innerParallel == 0 && other->path->pathtype == T_SeqScan)
{
/*
* GPDB_PARALLEL_FIXME: The inner path will be duplicately processed.
* CBDB_PARALLEL_FIXME: The inner path will be duplicately processed.
* That require inner path should not have descendant Motion paths.
* Use Seqscan here is more strit, but for now.
*
Expand Down Expand Up @@ -3181,7 +3181,7 @@ cdbpath_motion_for_parallel_join(PlannerInfo *root,
CdbPathLocus_IsSegmentGeneralWorkers(inner.locus))
{
/*
* GPDB_PARALLEL_FIXME:
* CBDB_PARALLEL_FIXME:
* We shouln't get here as Path(parallel_worker=1) won't be added to partial_pathlist.
* If outer locus is SegmentGeneral and its parallel_workers must be 0.
* We neighter want a Motion nor change the parallel_workers of a path(May be enabled
Expand Down Expand Up @@ -3667,7 +3667,7 @@ cdbpath_motion_for_parallel_join(PlannerInfo *root,
}

/* Which rel is bigger? */
/* GPDB_PARALLEL_FIXME: should we swap if parallel_aware? */
/* CBDB_PARALLEL_FIXME: should we swap if parallel_aware? */
if (large_rel->bytes < small_rel->bytes)
CdbSwap(CdbpathMfjRel *, large_rel, small_rel);

Expand Down
2 changes: 1 addition & 1 deletion src/backend/cdb/cdbpathtoplan.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ cdbpathtoplan_create_flow(PlannerInfo *root,
}
else if (CdbPathLocus_IsReplicated(locus))
{
/* GPDB_PARALLEL_FIXME: What if ReplicatedWorkers? */
/* CBDB_PARALLEL_FIXME: What if ReplicatedWorkers? */
flow = makeFlow(FLOW_REPLICATED, locus.numsegments);
}
else if (CdbPathLocus_IsHashed(locus) ||
Expand Down
2 changes: 1 addition & 1 deletion src/backend/cdb/endpoint/cdbendpoint.c
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ GetParallelCursorEndpointPosition(PlannedStmt *plan)
{
if (plan->planTree->flow->flotype == FLOW_SINGLETON)
{
/* GPDB_PARALLEL_FIXME: CdbLocusType_SegmentGeneralWorkers */
/* CBDB_PARALLEL_FIXME: CdbLocusType_SegmentGeneralWorkers */
if (plan->planTree->flow->locustype == CdbLocusType_SegmentGeneral)
return ENDPOINT_ON_SINGLE_QE;
else
Expand Down
2 changes: 1 addition & 1 deletion src/backend/commands/explain_gp.c
Original file line number Diff line number Diff line change
Expand Up @@ -773,7 +773,7 @@ cdbexplain_depositSliceStats(CdbExplain_StatHdr *hdr,
iworker = hdr->segindex - ss->segindex0;
ssw = &ss->workers[iworker];
Assert(iworker >= 0 && iworker < ss->nworker);
/* GPDB_PARALLEL_FIXME: reuse worker to store the stats of same slice */
/* CBDB_PARALLEL_FIXME: reuse worker to store the stats of same slice */
#if 0
Assert(ssw->peakmemused == 0); /* each worker should be seen just once */
#endif
Expand Down
2 changes: 1 addition & 1 deletion src/backend/commands/matview.c
Original file line number Diff line number Diff line change
Expand Up @@ -473,7 +473,7 @@ refresh_matview_datafill(DestReceiver *dest, Query *query,

/* Plan the query which will generate data for the refresh. */

/* GPDB_PARALLEL_FIXME: hack here, use cursor_option to disable parallel */
/* CBDB_PARALLEL_FIXME: hack here, use cursor_option to disable parallel */
if (!refreshClause->intoAO)
plan = pg_plan_query(query, queryString, CURSOR_OPT_PARALLEL_OK, NULL);
else
Expand Down
2 changes: 1 addition & 1 deletion src/backend/commands/vacuum.c
Original file line number Diff line number Diff line change
Expand Up @@ -3183,7 +3183,7 @@ vac_update_relstats_from_list(List *updated_stats)
if (RelationIsAppendOptimized(rel))
{
/*
* GPDB_PARALLEL_FIXME: This is very hacky!
* CBDB_PARALLEL_FIXME: This is very hacky!
* relallvisible came from vacuum AO/AOCO processes means the segment file count
* of AO/AOCO tables. We use it to update pg_appendonly.segfilecount.
* See ao_vacuum_rel_post_cleanup in vacuum_ao.c.
Expand Down
2 changes: 1 addition & 1 deletion src/backend/executor/execMain.c
Original file line number Diff line number Diff line change
Expand Up @@ -1773,7 +1773,7 @@ InitPlan(QueryDesc *queryDesc, int eflags)

/* set our global sliceid variable for elog. */
int save_currentSliceId = estate->currentSliceId;
/* GPDB_PARALLEL_FIXME: Is it necessary to save and recover this? */
/* CBDB_PARALLEL_FIXME: Is it necessary to save and recover this? */
bool save_useMppParallelMode = estate->useMppParallelMode;

estate->currentSliceId = estate->es_plannedstmt->subplan_sliceIds[subplan_id - 1];
Expand Down
2 changes: 1 addition & 1 deletion src/backend/executor/execParallel.c
Original file line number Diff line number Diff line change
Expand Up @@ -1565,7 +1565,7 @@ InitializeGpParallelWorkers(PlanState *planstate, ParallelWorkerContext *pwcxt)
if (planstate == NULL)
return false;
/*
* GPDB_PARALLEL_FIXME:
* CBDB_PARALLEL_FIXME:
* Why we call PG's xxxInitializeWorker functions for some nodes, but not for others?
*/
switch (nodeTag(planstate))
Expand Down
6 changes: 3 additions & 3 deletions src/backend/executor/execUtils.c
Original file line number Diff line number Diff line change
Expand Up @@ -1601,7 +1601,7 @@ FillSliceGangInfo(ExecSlice *slice, PlanSlice *ps)
* It's either the root slice or an InitPlan slice that runs in
* the QD process, or really unused slice.
*/
/* GPDB_PARALLEL_FIXME: QD process should never be parallel, do we need to plus factor? */
/* CBDB_PARALLEL_FIXME: QD process should never be parallel, do we need to plus factor? */
slice->planNumSegments = 1;
break;
case GANGTYPE_PRIMARY_WRITER:
Expand Down Expand Up @@ -1629,13 +1629,13 @@ FillSliceGangInfo(ExecSlice *slice, PlanSlice *ps)
}
break;
case GANGTYPE_ENTRYDB_READER:
/* GPDB_PARALLEL_FIXME: QD parallel is disabled */
/* CBDB_PARALLEL_FIXME: QD parallel is disabled */
slice->planNumSegments = 1;
slice->segments = list_make1_int(-1);
break;
case GANGTYPE_SINGLETON_READER:
/*
* GPDB_PARALLEL_FIXME:
* CBDB_PARALLEL_FIXME:
* Could be parallel, parallel scan on replica tables.
*/
slice->planNumSegments = 1 * factor;
Expand Down
4 changes: 2 additions & 2 deletions src/backend/executor/nodeHash.c
Original file line number Diff line number Diff line change
Expand Up @@ -778,7 +778,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
/* Careful, this could overflow size_t */
double newlimit;

/* GP_PARALLEL_FIXME: if we enable pg style parallel some day, we should reconsider it. */
/* CBDB_PARALLEL_FIXME: if we enable pg style parallel some day, we should reconsider it. */
newlimit = (double) hash_table_bytes * (double) parallel_workers;
newlimit = Min(newlimit, (double) SIZE_MAX);
hash_table_bytes = (size_t) newlimit;
Expand Down Expand Up @@ -2563,7 +2563,7 @@ ExecHashTableExplainEnd(PlanState *planstate, struct StringInfoData *buf)
}

/* Report workfile I/O statistics. */
/* GPDB_PARALLEL_FIXME: ExecHashTableExplainBatches if parallel_aware? */
/* CBDB_PARALLEL_FIXME: ExecHashTableExplainBatches if parallel_aware? */
if (hashtable->nbatch > 1 && !planstate->plan->parallel_aware)
{
ExecHashTableExplainBatches(hashtable, buf, 0, 1, "Initial");
Expand Down
2 changes: 1 addition & 1 deletion src/backend/executor/nodeHashjoin.c
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ ExecHashJoinImpl(PlanState *pstate, bool parallel)
hashtable = node->hj_HashTable;
econtext = node->js.ps.ps_ExprContext;
parallel_state = hashNode->parallel_state;
/* GPDB_PARALLEL_FIXME: When parallel is true and parallel_state is NULL */
/* CBDB_PARALLEL_FIXME: When parallel is true and parallel_state is NULL */
parallel = parallel && (parallel_state != NULL);

/*
Expand Down
20 changes: 10 additions & 10 deletions src/backend/optimizer/path/allpaths.c
Original file line number Diff line number Diff line change
Expand Up @@ -522,7 +522,7 @@ bring_to_outer_query(PlannerInfo *root, RelOptInfo *rel, List *outer_quals)
rel->cheapest_unique_path = NULL;
rel->cheapest_parameterized_paths = NIL;
rel->pathlist = NIL;
/* GPDB_PARALLEL_FIXME: Need to clear partial_pathlist before we enable OuterQuery locus in paralle mode */
/* CBDB_PARALLEL_FIXME: Need to clear partial_pathlist before we enable OuterQuery locus in paralle mode */
rel->partial_pathlist = NIL;

foreach(lc, origpathlist)
Expand Down Expand Up @@ -652,7 +652,7 @@ bring_to_singleQE(PlannerInfo *root, RelOptInfo *rel)
add_path(rel, path, root);
}
/*
* GP_PARALLEL_FIXME:
* CBDB_PARALLEL_FIXME:
* If we need to bring to single QE which commonly seen in lateral
* join with group by or limit, we better to set partial pathlist
* to NIL in order to make sure single QE locus is satisfied in
Expand Down Expand Up @@ -790,7 +790,7 @@ set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
if (rel->upperrestrictinfo)
{
bring_to_outer_query(root, rel, rel->upperrestrictinfo);
/* GP_PARALLEL_FIXME: enable parallel outer query? */
/* CBDB_PARALLEL_FIXME: enable parallel outer query? */
}
else if (root->config->force_singleQE)
{
Expand Down Expand Up @@ -920,7 +920,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
}

/*
* GP_PARALLEL_FIXME: GPDB don't allow parallelism for relations that are system catalogs.
* CBDB_PARALLEL_FIXME: GPDB don't allow parallelism for relations that are system catalogs.
*/
if (IsSystemClassByRelid(rte->relid))
return;
Expand Down Expand Up @@ -1091,7 +1091,7 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
if (parallel_workers <= 1)
return;

/* GPDB_PARALLEL_FIXME: update locus.parallel_workers? */
/* CBDB_PARALLEL_FIXME: update locus.parallel_workers? */

/* Add an unordered partial path based on a parallel sequential scan. */
add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers));
Expand Down Expand Up @@ -1669,7 +1669,7 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel,
&pa_nonpartial_subpaths,
NULL);
/*
* GPDB_PARALLEL_FIXME: can't use parallel append if subpath
* CBDB_PARALLEL_FIXME: can't use parallel append if subpath
* is not parallel safe.
*/
if (!nppath->parallel_safe)
Expand Down Expand Up @@ -1770,7 +1770,7 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel,
parallel_workers = Max(parallel_workers, path->parallel_workers);
}
/*
* GPDB_PARALLEL_FIXME: it still cannot be opened after we deal with append.
* CBDB_PARALLEL_FIXME: it still cannot be opened after we deal with append.
* Because we currently allow path with non parallel_workers been added to
* partial_path.
*/
Expand All @@ -1795,7 +1795,7 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel,
max_parallel_workers_per_gather);
}
/*
* GPDB_PARALLEL_FIXME: it still cannot be opened after we deal with append.
* CBDB_PARALLEL_FIXME: it still cannot be opened after we deal with append.
* Because we currently allow path with non parallel_workers been added to
* partial_path.
*/
Expand Down Expand Up @@ -3694,7 +3694,7 @@ make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
* already.
*/
bring_to_outer_query(root, rel, NIL);
/* GP_PARALLEL_FIXME: enable parallel outer query? */
/* CBDB_PARALLEL_FIXME: enable parallel outer query? */
}

return rel;
Expand Down Expand Up @@ -3815,7 +3815,7 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
if (bms_equal(rel->relids, root->all_baserels) && root->is_correlated_subplan)
{
bring_to_outer_query(root, rel, NIL);
/* GP_PARALLEL_FIXME: enable parallel outer query? */
/* CBDB_PARALLEL_FIXME: enable parallel outer query? */
}

/* Find and save the cheapest paths for this rel */
Expand Down
4 changes: 2 additions & 2 deletions src/backend/optimizer/path/joinpath.c
Original file line number Diff line number Diff line change
Expand Up @@ -1147,7 +1147,7 @@ try_partial_hashjoin_path(PlannerInfo *root,
return;

/*
* GPDB_PARALLEL_FIXME
* CBDB_PARALLEL_FIXME
* Customers encounter an issue that when parallel hash, broadcast motion
* a smaller table may be worser than redistribute a big table.
* We add a path whic doesn't try broadcast if possible.
Expand All @@ -1174,7 +1174,7 @@ try_partial_hashjoin_path(PlannerInfo *root,
}

/*
* GPDB_PARALLEL_FIXME:
* CBDB_PARALLEL_FIXME:
* We only want non-broadcast in parallel hash if the guc is set.
*/
if (parallel_hash && !parallel_hash_enable_motion_broadcast)
Expand Down
2 changes: 1 addition & 1 deletion src/backend/optimizer/plan/createplan.c
Original file line number Diff line number Diff line change
Expand Up @@ -5785,7 +5785,7 @@ create_hashjoin_plan(PlannerInfo *root,
bool partition_selectors_created = false;
ListCell *lc;

/* GP_PARALLEL_FIXME:
/* CBDB_PARALLEL_FIXME:
* PartitionSelector is not parallel-aware, so disable it temporarily.
* In future, after enabling merging partition prune info in shared memory,
* PartitionSelector could work in parallel mode.
Expand Down
12 changes: 6 additions & 6 deletions src/backend/optimizer/plan/planner.c
Original file line number Diff line number Diff line change
Expand Up @@ -549,7 +549,7 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions,
* Unlike upstream, partial_path is valid in GP without Gather nodes.
* Keep the two pathlist separated until the final. Now it's the time
* to choose the best.
* GPDB_PARALLEL_FIXME:
* CBDB_PARALLEL_FIXME:
* Take GP's special into partial_pathlist, ex: agg and etc.
*/
if (final_rel->partial_pathlist != NIL)
Expand Down Expand Up @@ -2463,7 +2463,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* be able to make use of them.
*/
/*
* GPDB_PARALLEL_FIXME: should keep query_level > 1 in GPDB?
* CBDB_PARALLEL_FIXME: should keep query_level > 1 in GPDB?
* It will lose parallel path, ex: plain parallel scan.
* PG have Gather node but GP delay partial path until Gather Motion.
*
Expand Down Expand Up @@ -2491,7 +2491,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
{
Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);

/* GPDB_PARALLEL_FIXEME: support parallel SCATTER BY? */
/* CBDB_PARALLEL_FIXME: support parallel SCATTER BY? */
if (parse->scatterClause)
{
current_rel->partial_pathlist = NIL;
Expand Down Expand Up @@ -5040,7 +5040,7 @@ create_distinct_paths(PlannerInfo *root,
/* On how many segments will the distinct result reside? */
if (CdbPathLocus_IsPartitioned(path->locus))
{
/* GPDB_PARALLEL_FIXME: should we consider parallel in distinct path? */
/* CBDB_PARALLEL_FIXME: should we consider parallel in distinct path? */
numDistinctRows = numDistinctRowsTotal / CdbPathLocus_NumSegments(path->locus);
if (path->locus.parallel_workers > 1)
numDistinctRows /= path->locus.parallel_workers;
Expand Down Expand Up @@ -5084,7 +5084,7 @@ create_distinct_paths(PlannerInfo *root,

if (CdbPathLocus_IsPartitioned(path->locus))
{
/* GPDB_PARALLEL_FIXME: should we consider parallel in distinct path? */
/* CBDB_PARALLEL_FIXME: should we consider parallel in distinct path? */
numDistinctRows = numDistinctRowsTotal / CdbPathLocus_NumSegments(path->locus);
if (path->locus.parallel_workers > 1)
numDistinctRows /= path->locus.parallel_workers;
Expand Down Expand Up @@ -5133,7 +5133,7 @@ create_distinct_paths(PlannerInfo *root,

if (CdbPathLocus_IsPartitioned(path->locus))
{
/* GPDB_PARALLEL_FIXME: should we consider parallel in distinct path? */
/* CBDB_PARALLEL_FIXME: should we consider parallel in distinct path? */
numDistinctRows = clamp_row_est(numDistinctRowsTotal / CdbPathLocus_NumSegments(path->locus));
if (path->locus.parallel_workers > 1)
numDistinctRows /= path->locus.parallel_workers;
Expand Down
Loading