From 61c76a0950857d61efea0dfea0b92125bfb93575 Mon Sep 17 00:00:00 2001 From: "Aman Khalid (from Dev Box)" Date: Tue, 20 Feb 2024 17:56:03 -0500 Subject: [PATCH 01/16] Add SetTargetEdge --- src/coreclr/jit/block.h | 21 ++++++--- src/coreclr/jit/fgbasic.cpp | 48 ++++++++++++--------- src/coreclr/jit/fgehopt.cpp | 12 +++--- src/coreclr/jit/fginline.cpp | 7 +-- src/coreclr/jit/fgopt.cpp | 10 ++--- src/coreclr/jit/flowgraph.cpp | 19 ++++---- src/coreclr/jit/helperexpansion.cpp | 43 ++++++++++-------- src/coreclr/jit/importer.cpp | 26 ++++++----- src/coreclr/jit/indirectcalltransformer.cpp | 8 ++-- src/coreclr/jit/jiteh.cpp | 4 +- src/coreclr/jit/loopcloning.cpp | 15 +++++-- src/coreclr/jit/morph.cpp | 21 ++++++--- 12 files changed, 138 insertions(+), 96 deletions(-) diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 208eb07e583fa0..d9fc1e9894c659 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -526,7 +526,7 @@ struct BasicBlock : private LIR::Range /* The following union describes the jump target(s) of this block */ union { unsigned bbTargetOffs; // PC offset (temporary only) - BasicBlock* bbTarget; // basic block + BasicBlock* bbTargetEdge; // successor edge for block kinds with only one successor (BBJ_ALWAYS, etc) BasicBlock* bbTrueTarget; // BBJ_COND jump target when its condition is true (alias for bbTarget) BBswtDesc* bbSwtTargets; // switch descriptor BBehfDesc* bbEhfTargets; // BBJ_EHFINALLYRET descriptor @@ -638,17 +638,26 @@ struct BasicBlock : private LIR::Range BasicBlock* GetTarget() const { - // Only block kinds that use `bbTarget` can access it, and it must be non-null. + return GetTargetEdge()->getDestinationBlock(); + } + + FlowEdge* GetTargetEdge() const + { + // Only block kinds that use `bbTargetEdge` can access it, and it must be non-null. assert(HasInitializedTarget()); - return bbTarget; + assert(bbTargetEdge->getSourceBlock() == this); + assert(bbTargetEdge->getDestinationBlock() != nullptr); + return bbTargetEdge; } - void SetTarget(BasicBlock* target) + void SetTargetEdge(FlowEdge* targetEdge) { // SetKindAndTarget() nulls target for non-jump kinds, - // so don't use SetTarget() to null bbTarget without updating bbKind. - bbTarget = target; + // so don't use SetTargetEdge() to null bbTargetEdge without updating bbKind. + bbTargetEdge = targetEdge; assert(HasInitializedTarget()); + assert(bbTargetEdge->getSourceBlock() == this); + assert(bbTargetEdge->getDestinationBlock() != nullptr); } BasicBlock* GetTrueTarget() const diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 3e583d74500529..a6e637daca23ed 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -646,9 +646,9 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* oldTarget, Bas case BBJ_LEAVE: // This function can be called before import, so we still have BBJ_LEAVE { assert(block->TargetIs(oldTarget)); - block->SetTarget(newTarget); - FlowEdge* const oldEdge = fgRemoveRefPred(oldTarget, block); - fgAddRefPred(newTarget, block, oldEdge); + fgRemoveRefPred(block->GetTargetEdge()); + FlowEdge* const newEdge = fgAddRefPred(newTarget, block, block->GetTargetEdge()); + block->SetTargetEdge(newEdge); break; } @@ -656,44 +656,50 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* oldTarget, Bas if (block->TrueTargetIs(oldTarget)) { - if (block->FalseTargetIs(oldTarget)) + FlowEdge* const oldEdge = block->GetTrueEdge(); + + if (block->TrueEdgeIs(oldEdge)) { // fgRemoveRefPred returns nullptr for BBJ_COND blocks with two flow edges to target fgRemoveConditionalJump(block); assert(block->KindIs(BBJ_ALWAYS)); assert(block->TargetIs(oldTarget)); - block->SetTarget(newTarget); - } - else - { - block->SetTrueTarget(newTarget); } // fgRemoveRefPred should have removed the flow edge - FlowEdge* oldEdge = fgRemoveRefPred(oldTarget, block); - assert(oldEdge != nullptr); + fgRemoveRefPred(oldEdge); + assert(oldEdge->getDupCount() == 0); // TODO-NoFallThrough: Proliferate weight from oldEdge // (as a quirk, we avoid doing so for the true target to reduce diffs for now) FlowEdge* const newEdge = fgAddRefPred(newTarget, block); + if (block->KindIs(BBJ_ALWAYS)) { newEdge->setLikelihood(1.0); + block->SetTargetEdge(newEdge); } - else if (oldEdge->hasLikelihood()) + else { - newEdge->setLikelihood(oldEdge->getLikelihood()); + assert(block->KindIs(BBJ_COND)); + block->SetTrueEdge(newEdge); + + if (oldEdge->hasLikelihood()) + { + newEdge->setLikelihood(oldEdge->getLikelihood()); + } } } else { assert(block->FalseTargetIs(oldTarget)); + FlowEdge* const oldEdge = block->GetFalseEdge(); // fgRemoveRefPred should have removed the flow edge - FlowEdge* oldEdge = fgRemoveRefPred(oldTarget, block); - assert(oldEdge != nullptr); - block->SetFalseTarget(newTarget); - fgAddRefPred(newTarget, block, oldEdge); + fgRemoveRefPred(oldEdge); + assert(oldEdge->getDupCount() == 0); + FlowEdge* const newEdge = fgAddRefPred(newTarget, block, oldEdge); + block->SetFalseEdge(newEdge); } break; @@ -2990,8 +2996,8 @@ void Compiler::fgLinkBasicBlocks() assert(!(curBBdesc->IsLast() && jumpsToNext)); BasicBlock* const jumpDest = jumpsToNext ? curBBdesc->Next() : fgLookupBB(curBBdesc->GetTargetOffs()); - // Redundantly use SetKindAndTarget() instead of SetTarget() just this once, - // so we don't break the HasInitializedTarget() invariant of SetTarget(). + // Redundantly use SetKindAndTarget() instead of SetTargetEdge() just this once, + // so we don't break the HasInitializedTarget() invariant of SetTargetEdge(). curBBdesc->SetKindAndTarget(curBBdesc->GetKind(), jumpDest); fgAddRefPred(jumpDest, curBBdesc); @@ -3886,10 +3892,10 @@ void Compiler::fgFindBasicBlocks() if (block->KindIs(BBJ_EHFILTERRET)) { // Mark catch handler as successor. - block->SetTarget(hndBegBB); FlowEdge* const newEdge = fgAddRefPred(hndBegBB, block); newEdge->setLikelihood(1.0); - assert(block->GetTarget()->bbCatchTyp == BBCT_FILTER_HANDLER); + block->SetTargetEdge(newEdge); + assert(hndBegBB->bbCatchTyp == BBCT_FILTER_HANDLER); break; } } diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 4c781cbc0c222c..55208dfeeb2301 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -1757,8 +1757,8 @@ bool Compiler::fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, JITDUMP("Redirecting branch in " FMT_BB " from " FMT_BB " to " FMT_BB ".\n", block->bbNum, callFinally->bbNum, canonicalCallFinally->bbNum); - block->SetTarget(canonicalCallFinally); - fgAddRefPred(canonicalCallFinally, block); + FlowEdge* const newEdge = fgAddRefPred(canonicalCallFinally, block); + block->SetTargetEdge(newEdge); assert(callFinally->bbRefs > 0); fgRemoveRefPred(callFinally, block); @@ -2149,17 +2149,17 @@ void Compiler::fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock, fgRemoveRefPred(nonCanonicalBlock, predBlock); // Wire up the new flow + FlowEdge* const newEdge = fgAddRefPred(canonicalBlock, predBlock, predEdge); + if (predBlock->KindIs(BBJ_ALWAYS)) { assert(predBlock->TargetIs(nonCanonicalBlock)); - predBlock->SetTarget(canonicalBlock); + predBlock->SetTargetEdge(newEdge); } else { assert(predBlock->KindIs(BBJ_COND)); assert(predBlock->TrueTargetIs(nonCanonicalBlock)); - predBlock->SetTrueTarget(canonicalBlock); + predBlock->SetTrueEdge(newEdge); } - - fgAddRefPred(canonicalBlock, predBlock, predEdge); } diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index 54003637e7e36e..409b07eb7a9ad7 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -1551,11 +1551,12 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) // Insert inlinee's blocks into inliner's block list. assert(topBlock->KindIs(BBJ_ALWAYS)); assert(topBlock->TargetIs(bottomBlock)); + FlowEdge* const oldEdge = fgRemoveRefPred(bottomBlock, topBlock); + FlowEdge* const newEdge = fgAddRefPred(InlineeCompiler->fgFirstBB, topBlock, oldEdge); + topBlock->SetNext(InlineeCompiler->fgFirstBB); - topBlock->SetTarget(topBlock->Next()); + topBlock->SetTargetEdge(newEdge); topBlock->SetFlags(BBF_NONE_QUIRK); - FlowEdge* const oldEdge = fgRemoveRefPred(bottomBlock, topBlock); - fgAddRefPred(InlineeCompiler->fgFirstBB, topBlock, oldEdge); InlineeCompiler->fgLastBB->SetNext(bottomBlock); // diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index f0030bc1c0ec67..e973def8efe1e5 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -833,9 +833,9 @@ PhaseStatus Compiler::fgPostImportationCleanup() if (entryJumpTarget != osrEntry) { - fgFirstBB->SetTarget(entryJumpTarget); FlowEdge* const oldEdge = fgRemoveRefPred(osrEntry, fgFirstBB); - fgAddRefPred(entryJumpTarget, fgFirstBB, oldEdge); + FlowEdge* const newEdge = fgAddRefPred(entryJumpTarget, fgFirstBB, oldEdge); + fgFirstBB->SetTargetEdge(newEdge); JITDUMP("OSR: redirecting flow from method entry " FMT_BB " to OSR entry " FMT_BB " via step blocks.\n", @@ -1557,11 +1557,13 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc } // Optimize the JUMP to empty unconditional JUMP to go to the new target + FlowEdge* const newEdge = fgAddRefPred(bDest->GetTarget(), block, fgRemoveRefPred(bDest, block)); + switch (block->GetKind()) { case BBJ_ALWAYS: case BBJ_CALLFINALLYRET: - block->SetTarget(bDest->GetTarget()); + block->SetTargetEdge(newEdge); break; case BBJ_COND: @@ -1581,8 +1583,6 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc unreached(); } - fgAddRefPred(bDest->GetTarget(), block, fgRemoveRefPred(bDest, block)); - return true; } return false; diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index c393648c843ef4..88af5490bf3fc2 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -272,9 +272,6 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) BasicBlock* poll = fgNewBBafter(BBJ_ALWAYS, top, true); bottom = fgNewBBafter(top->GetKind(), poll, true); - poll->SetTarget(bottom); - assert(poll->JumpsToNext()); - bottom->TransferTarget(top); // Update block flags @@ -366,9 +363,12 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) top->SetCond(bottom, poll); // Bottom has Top and Poll as its predecessors. Poll has just Top as a predecessor. - fgAddRefPred(bottom, poll); fgAddRefPred(bottom, top); fgAddRefPred(poll, top); + + FlowEdge* const newEdge = fgAddRefPred(bottom, poll); + poll->SetTargetEdge(newEdge); + assert(poll->JumpsToNext()); // Replace Top with Bottom in the predecessor list of all outgoing edges from Bottom // (1 for unconditional branches, 2 for conditional branches, N for switches). @@ -2758,7 +2758,7 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) /* Allocate a new basic block */ - BasicBlock* newHead = BasicBlock::New(this, BBJ_ALWAYS, block); + BasicBlock* newHead = BasicBlock::New(this, BBJ_ALWAYS); newHead->SetFlags(BBF_INTERNAL | BBF_NONE_QUIRK); newHead->inheritWeight(block); newHead->bbRefs = 0; @@ -2783,9 +2783,9 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) { case BBJ_CALLFINALLY: noway_assert(predBlock->TargetIs(block)); - predBlock->SetTarget(newHead); - fgRemoveRefPred(block, predBlock); - fgAddRefPred(newHead, predBlock); + fgRemoveRefPred(predBlock->GetTargetEdge()); + FlowEdge* const newEdge = fgAddRefPred(newHead, predBlock); + predBlock->SetTargetEdge(newEdge); break; default: @@ -2798,7 +2798,8 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) } assert(nullptr == fgGetPredForBlock(block, newHead)); - fgAddRefPred(block, newHead); + FlowEdge* const newEdge = fgAddRefPred(block, newHead); + newHead->SetTargetEdge(newEdge); assert(newHead->HasFlag(BBF_INTERNAL)); } diff --git a/src/coreclr/jit/helperexpansion.cpp b/src/coreclr/jit/helperexpansion.cpp index 39f7b8f09d2764..6ce59a4bfc2e54 100644 --- a/src/coreclr/jit/helperexpansion.cpp +++ b/src/coreclr/jit/helperexpansion.cpp @@ -390,8 +390,9 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm if (needsSizeCheck) { // sizeCheckBb is the first block after prevBb - prevBb->SetTarget(sizeCheckBb); - fgAddRefPred(sizeCheckBb, prevBb); + FlowEdge* const newEdge = fgAddRefPred(sizeCheckBb, prevBb); + prevBb->SetTargetEdge(newEdge); + // sizeCheckBb flows into nullcheckBb in case if the size check passes fgAddRefPred(nullcheckBb, sizeCheckBb); // fallbackBb is reachable from both nullcheckBb and sizeCheckBb @@ -403,8 +404,9 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm else { // nullcheckBb is the first block after prevBb - prevBb->SetTarget(nullcheckBb); - fgAddRefPred(nullcheckBb, prevBb); + FlowEdge* const newEdge = fgAddRefPred(nullcheckBb, prevBb); + prevBb->SetTargetEdge(newEdge); + // No size check, nullcheckBb jumps to fast path fgAddRefPred(fastPathBb, nullcheckBb); // fallbackBb is only reachable from nullcheckBb (jump destination) @@ -730,9 +732,9 @@ bool Compiler::fgExpandThreadLocalAccessForCallNativeAOT(BasicBlock** pBlock, St // fallback will just execute first time fallbackBb->bbSetRunRarely(); - fgRemoveRefPred(block, prevBb); - fgAddRefPred(tlsRootNullCondBB, prevBb); - prevBb->SetTarget(tlsRootNullCondBB); + fgRemoveRefPred(prevBb->GetTargetEdge()); + FlowEdge* const newEdge = fgAddRefPred(tlsRootNullCondBB, prevBb); + prevBb->SetTargetEdge(newEdge); // All blocks are expected to be in the same EH region assert(BasicBlock::sameEHRegion(prevBb, block)); @@ -1085,9 +1087,9 @@ bool Compiler::fgExpandThreadLocalAccessForCall(BasicBlock** pBlock, Statement* // Update preds in all new blocks // assert(prevBb->KindIs(BBJ_ALWAYS)); - prevBb->SetTarget(maxThreadStaticBlocksCondBB); - fgRemoveRefPred(block, prevBb); - fgAddRefPred(maxThreadStaticBlocksCondBB, prevBb); + fgRemoveRefPred(prevBb->GetTargetEdge()); + FlowEdge* const newEdge = fgAddRefPred(maxThreadStaticBlocksCondBB, prevBb); + prevBb->SetTargetEdge(newEdge); fgAddRefPred(threadStaticBlockNullCondBB, maxThreadStaticBlocksCondBB); fgAddRefPred(fallbackBb, maxThreadStaticBlocksCondBB); @@ -1443,7 +1445,7 @@ bool Compiler::fgExpandStaticInitForCall(BasicBlock** pBlock, Statement* stmt, G // // Unlink block and prevBb - fgRemoveRefPred(block, prevBb); + fgRemoveRefPred(prevBb->GetTargetEdge()); // Block has two preds now: either isInitedBb or helperCallBb fgAddRefPred(block, isInitedBb); @@ -1451,10 +1453,10 @@ bool Compiler::fgExpandStaticInitForCall(BasicBlock** pBlock, Statement* stmt, G // prevBb always flows into isInitedBb assert(prevBb->KindIs(BBJ_ALWAYS)); - prevBb->SetTarget(isInitedBb); + FlowEdge* const newEdge = fgAddRefPred(isInitedBb, prevBb); + prevBb->SetTargetEdge(newEdge); prevBb->SetFlags(BBF_NONE_QUIRK); assert(prevBb->JumpsToNext()); - fgAddRefPred(isInitedBb, prevBb); // Both fastPathBb and helperCallBb have a single common pred - isInitedBb isInitedBb->SetFalseTarget(helperCallBb); @@ -1764,13 +1766,15 @@ bool Compiler::fgVNBasedIntrinsicExpansionForCall_ReadUtf8(BasicBlock** pBlock, // Update preds in all new blocks // // block is no longer a predecessor of prevBb - fgRemoveRefPred(block, prevBb); + fgRemoveRefPred(prevBb->GetTargetEdge()); + // prevBb flows into lengthCheckBb assert(prevBb->KindIs(BBJ_ALWAYS)); - prevBb->SetTarget(lengthCheckBb); + FlowEdge* const newEdge = fgAddRefPred(lengthCheckBb, prevBb); + prevBb->SetTargetEdge(newEdge); prevBb->SetFlags(BBF_NONE_QUIRK); assert(prevBb->JumpsToNext()); - fgAddRefPred(lengthCheckBb, prevBb); + // lengthCheckBb has two successors: block and fastpathBb lengthCheckBb->SetFalseTarget(fastpathBb); fgAddRefPred(fastpathBb, lengthCheckBb); @@ -2445,7 +2449,6 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, // // Wire up the blocks // - firstBb->SetTarget(nullcheckBb); nullcheckBb->SetTrueTarget(lastBb); nullcheckBb->SetFalseTarget(typeCheckNotNeeded ? fallbackBb : typeChecksBbs[0]); @@ -2473,8 +2476,10 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, } } - fgRemoveRefPred(lastBb, firstBb); - fgAddRefPred(nullcheckBb, firstBb); + fgRemoveRefPred(firstBb->GetTargetEdge()); + FlowEdge* const newEdge = fgAddRefPred(nullcheckBb, firstBb); + firstBb->SetTargetEdge(newEdge); + fgAddRefPred(lastBb, nullcheckBb); if (typeCheckNotNeeded) { diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 36f0fdcc45e55c..589e0e9033be8a 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -4436,9 +4436,9 @@ void Compiler::impImportLeave(BasicBlock* block) assert(!step->HasInitializedTarget()); // the previous call to a finally returns to this call (to the next finally in the chain) - step->SetTarget(callBlock); FlowEdge* const newEdge = fgAddRefPred(callBlock, step); newEdge->setLikelihood(1.0); + step->SetTargetEdge(newEdge); // The new block will inherit this block's weight. callBlock->inheritWeight(block); @@ -4538,10 +4538,10 @@ void Compiler::impImportLeave(BasicBlock* block) // step's jump target shouldn't be set yet assert(!step->HasInitializedTarget()); - step->SetTarget(finalStep); { FlowEdge* const newEdge = fgAddRefPred(finalStep, step); newEdge->setLikelihood(1.0); + step->SetTargetEdge(newEdge); } // The new block will inherit this block's weight. @@ -4690,10 +4690,11 @@ void Compiler::impImportLeave(BasicBlock* block) { fgRemoveRefPred(step->GetTarget(), step); } - step->SetTarget(exitBlock); // the previous step (maybe a call to a nested finally, or a nested catch - // exit) returns to this block + FlowEdge* const newEdge = fgAddRefPred(exitBlock, step); newEdge->setLikelihood(1.0); + step->SetTargetEdge(newEdge); // the previous step (maybe a call to a nested finally, or a nested catch + // exit) returns to this block // The new block will inherit this block's weight. exitBlock->inheritWeight(block); @@ -4806,9 +4807,10 @@ void Compiler::impImportLeave(BasicBlock* block) { fgRemoveRefPred(step->GetTarget(), step); } - step->SetTarget(step2); + FlowEdge* const newEdge = fgAddRefPred(step2, step); newEdge->setLikelihood(1.0); + step->SetTargetEdge(newEdge); step2->inheritWeight(block); step2->CopyFlags(block, BBF_RUN_RARELY); step2->SetFlags(BBF_IMPORTED); @@ -4845,12 +4847,13 @@ void Compiler::impImportLeave(BasicBlock* block) fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step, HBtab->ebdHndBeg); if (step == block) { - fgRemoveRefPred(step->GetTarget(), step); + fgRemoveRefPred(step->GetTargetEdge()); } - step->SetTarget(callBlock); // the previous call to a finally returns to this call (to the next - // finally in the chain) + FlowEdge* const newEdge = fgAddRefPred(callBlock, step); newEdge->setLikelihood(1.0); + step->SetTargetEdge(newEdge); // the previous call to a finally returns to this call (to the next + // finally in the chain) // The new block will inherit this block's weight. callBlock->inheritWeight(block); @@ -4951,11 +4954,12 @@ void Compiler::impImportLeave(BasicBlock* block) if (step == block) { - fgRemoveRefPred(step->GetTarget(), step); + fgRemoveRefPred(step->GetTargetEdge()); } - step->SetTarget(catchStep); + FlowEdge* const newEdge = fgAddRefPred(catchStep, step); newEdge->setLikelihood(1.0); + step->SetTargetEdge(newEdge); // The new block will inherit this block's weight. catchStep->inheritWeight(block); @@ -5008,9 +5012,9 @@ void Compiler::impImportLeave(BasicBlock* block) { fgRemoveRefPred(step->GetTarget(), step); } - step->SetTarget(leaveTarget); // this is the ultimate destination of the LEAVE FlowEdge* const newEdge = fgAddRefPred(leaveTarget, step); newEdge->setLikelihood(1.0); + step->SetTargetEdge(newEdge); // this is the ultimate destination of the LEAVE #ifdef DEBUG if (verbose) diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index fbbe08d66fc3c6..136db42a4bc50e 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -272,9 +272,9 @@ class IndirectCallTransformer if (checkBlock != currBlock) { assert(currBlock->KindIs(BBJ_ALWAYS)); - currBlock->SetTarget(checkBlock); FlowEdge* const newEdge = compiler->fgAddRefPred(checkBlock, currBlock); newEdge->setLikelihood(1.0); + currBlock->SetTargetEdge(newEdge); } // checkBlock @@ -1027,13 +1027,13 @@ class IndirectCallTransformer // Also, thenBlock has a single pred - last checkBlock assert(checkBlock->KindIs(BBJ_ALWAYS)); - checkBlock->SetTarget(thenBlock); - checkBlock->SetFlags(BBF_NONE_QUIRK); - assert(checkBlock->JumpsToNext()); FlowEdge* const thenEdge = compiler->fgAddRefPred(thenBlock, checkBlock); thenEdge->setLikelihood(thenLikelihoodWt); FlowEdge* const elseEdge = compiler->fgAddRefPred(remainderBlock, thenBlock); elseEdge->setLikelihood(elseLikelihoodWt); + checkBlock->SetTargetEdge(thenEdge); + checkBlock->SetFlags(BBF_NONE_QUIRK); + assert(checkBlock->JumpsToNext()); DevirtualizeCall(thenBlock, checkIdx); } diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index a4eacd9069db42..354ecaae0012ff 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -4342,10 +4342,10 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) } #endif // DEBUG // Change the bbTarget for bFilterLast from the old first 'block' to the new first 'bPrev' - fgRemoveRefPred(bFilterLast->GetTarget(), bFilterLast); - bFilterLast->SetTarget(bPrev); + fgRemoveRefPred(bFilterLast->GetTargetEdge()); FlowEdge* const newEdge = fgAddRefPred(bPrev, bFilterLast); newEdge->setLikelihood(1.0); + bFilterLast->SetTargetEdge(newEdge); } } diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index ca4c2572fa41d9..4d6f051b3b3e3b 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -2039,9 +2039,12 @@ void Compiler::optCloneLoop(FlowGraphNaturalLoop* loop, LoopCloneContext* contex // We haven't set the jump target yet assert(slowPreheader->KindIs(BBJ_ALWAYS)); assert(!slowPreheader->HasInitializedTarget()); - slowPreheader->SetTarget(slowHeader); - fgAddRefPred(slowHeader, slowPreheader); + { + FlowEdge* const newEdge = fgAddRefPred(slowHeader, slowPreheader); + slowPreheader->SetTargetEdge(newEdge); + } + JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", slowPreheader->bbNum, slowHeader->bbNum); BasicBlock* condLast = optInsertLoopChoiceConditions(context, loop, slowPreheader, preheader); @@ -2049,8 +2052,12 @@ void Compiler::optCloneLoop(FlowGraphNaturalLoop* loop, LoopCloneContext* contex // Now redirect the old preheader to jump to the first new condition that // was inserted by the above function. assert(preheader->KindIs(BBJ_ALWAYS)); - preheader->SetTarget(preheader->Next()); - fgAddRefPred(preheader->Next(), preheader); + + { + FlowEdge* const newEdge = fgAddRefPred(preheader->Next(), preheader); + preheader->SetTargetEdge(newEdge); + } + preheader->SetFlags(BBF_NONE_QUIRK); // And make sure we insert a pred link for the final fallthrough into the fast preheader. diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 9758a43b5f17f9..1d4dcb9b4a8069 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -14632,15 +14632,24 @@ bool Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) assert(condBlock->bbWeight == remainderBlock->bbWeight); assert(block->KindIs(BBJ_ALWAYS)); - block->SetTarget(condBlock); - condBlock->SetTarget(elseBlock); - elseBlock->SetTarget(remainderBlock); + { + FlowEdge* const newEdge = fgAddRefPred(condBlock, block); + block->SetTargetEdge(newEdge); + } + + { + FlowEdge* const newEdge = fgAddRefPred(elseBlock, condBlock); + condBlock->SetTargetEdge(newEdge); + } + + { + FlowEdge* const newEdge = fgAddRefPred(remainderBlock, elseBlock); + elseBlock->SetTargetEdge(newEdge); + } + assert(condBlock->JumpsToNext()); assert(elseBlock->JumpsToNext()); - fgAddRefPred(condBlock, block); - fgAddRefPred(elseBlock, condBlock); - fgAddRefPred(remainderBlock, elseBlock); condBlock->SetFlags(propagateFlagsToAll | BBF_NONE_QUIRK); elseBlock->SetFlags(propagateFlagsToAll | BBF_NONE_QUIRK); From 7ff571effaa8e46b061da5dfb22a99375bec6470 Mon Sep 17 00:00:00 2001 From: "Aman Khalid (from Dev Box)" Date: Tue, 20 Feb 2024 21:57:13 -0500 Subject: [PATCH 02/16] Add SetKindAndTargetEdge --- src/coreclr/jit/block.cpp | 6 +-- src/coreclr/jit/block.h | 19 +++----- src/coreclr/jit/fgbasic.cpp | 22 ++++----- src/coreclr/jit/fgehopt.cpp | 28 +++++------ src/coreclr/jit/fginline.cpp | 8 ++-- src/coreclr/jit/fgopt.cpp | 20 ++++---- src/coreclr/jit/flowgraph.cpp | 4 +- src/coreclr/jit/helperexpansion.cpp | 6 +-- src/coreclr/jit/ifconversion.cpp | 2 +- src/coreclr/jit/importer.cpp | 53 ++++++++++----------- src/coreclr/jit/indirectcalltransformer.cpp | 6 +-- src/coreclr/jit/lower.cpp | 13 ++--- src/coreclr/jit/morph.cpp | 19 ++++---- src/coreclr/jit/optimizebools.cpp | 6 +-- src/coreclr/jit/optimizer.cpp | 37 ++++++++------ src/coreclr/jit/patchpoint.cpp | 2 +- 16 files changed, 125 insertions(+), 126 deletions(-) diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 273c6f045123df..317deea4568d0c 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -861,7 +861,7 @@ void BasicBlock::TransferTarget(BasicBlock* from) SetCond(from->GetTrueTarget(), from->GetFalseTarget()); break; case BBJ_ALWAYS: - SetKindAndTarget(from->GetKind(), from->GetTarget()); + SetKindAndTargetEdge(BBJ_ALWAYS, from->GetTargetEdge()); CopyFlags(from, BBF_NONE_QUIRK); break; case BBJ_CALLFINALLY: @@ -869,10 +869,10 @@ void BasicBlock::TransferTarget(BasicBlock* from) case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: case BBJ_LEAVE: - SetKindAndTarget(from->GetKind(), from->GetTarget()); + SetKindAndTargetEdge(from->GetKind(), from->GetTargetEdge()); break; default: - SetKindAndTarget(from->GetKind()); // Clear the target + SetKindAndTargetEdge(from->GetKind()); // Clear the target break; } assert(KindIs(from->GetKind())); diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index d9fc1e9894c659..ca7ee9660a321a 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -623,13 +623,6 @@ struct BasicBlock : private LIR::Range return bbTargetOffs; } - void SetKindAndTarget(BBKinds kind, unsigned targetOffs) - { - bbKind = kind; - bbTargetOffs = targetOffs; - assert(KindIs(BBJ_ALWAYS, BBJ_COND, BBJ_LEAVE)); - } - bool HasTarget() const { // These block types should always have bbTarget set @@ -710,16 +703,16 @@ struct BasicBlock : private LIR::Range bbFalseTarget = falseTarget; } - // Set both the block kind and target. This can clear `bbTarget` when setting - // block kinds that don't use `bbTarget`. - void SetKindAndTarget(BBKinds kind, BasicBlock* target = nullptr) + // Set both the block kind and target edge. This can clear `bbTargetEdge` when setting + // block kinds that don't use `bbTargetEdge`. + void SetKindAndTargetEdge(BBKinds kind, FlowEdge* targetEdge = nullptr) { bbKind = kind; - bbTarget = target; + bbTargetEdge = targetEdge; - // If bbKind indicates this block has a jump, bbTarget cannot be null. + // If bbKind indicates this block has a jump, bbTargetEdge cannot be null. // You shouldn't use this to set a BBJ_COND, BBJ_SWITCH, or BBJ_EHFINALLYRET. - assert(HasTarget() ? HasInitializedTarget() : (bbTarget == nullptr)); + assert(HasTarget() ? HasInitializedTarget() : (bbTargetEdge == nullptr)); } bool HasInitializedTarget() const diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index a6e637daca23ed..ea373eb154b99f 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -358,7 +358,7 @@ void Compiler::fgConvertBBToThrowBB(BasicBlock* block) fgRemoveBlockAsPred(block); // Update jump kind after the scrub. - block->SetKindAndTarget(BBJ_THROW); + block->SetKindAndTargetEdge(BBJ_THROW); block->RemoveFlags(BBF_RETLESS_CALL); // no longer a BBJ_CALLFINALLY // Any block with a throw is rare @@ -2996,10 +2996,10 @@ void Compiler::fgLinkBasicBlocks() assert(!(curBBdesc->IsLast() && jumpsToNext)); BasicBlock* const jumpDest = jumpsToNext ? curBBdesc->Next() : fgLookupBB(curBBdesc->GetTargetOffs()); - // Redundantly use SetKindAndTarget() instead of SetTargetEdge() just this once, + // Redundantly use SetKindAndTargetEdge() instead of SetTargetEdge() just this once, // so we don't break the HasInitializedTarget() invariant of SetTargetEdge(). - curBBdesc->SetKindAndTarget(curBBdesc->GetKind(), jumpDest); - fgAddRefPred(jumpDest, curBBdesc); + FlowEdge* const newEdge = fgAddRefPred(jumpDest, curBBdesc); + curBBdesc->SetKindAndTargetEdge(curBBdesc->GetKind(), newEdge); if (curBBdesc->GetTarget()->bbNum <= curBBdesc->bbNum) { @@ -4228,9 +4228,9 @@ void Compiler::fgFixEntryFlowForOSR() fgEnsureFirstBBisScratch(); assert(fgFirstBB->KindIs(BBJ_ALWAYS) && fgFirstBB->JumpsToNext()); fgRemoveRefPred(fgFirstBB->GetTarget(), fgFirstBB); - fgFirstBB->SetKindAndTarget(BBJ_ALWAYS, fgOSREntryBB); - FlowEdge* const edge = fgAddRefPred(fgOSREntryBB, fgFirstBB); - edge->setLikelihood(1.0); + FlowEdge* const newEdge = fgAddRefPred(fgOSREntryBB, fgFirstBB); + newEdge->setLikelihood(1.0); + fgFirstBB->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); // We don't know the right weight for this block, since // execution of the method was interrupted within the @@ -4824,12 +4824,12 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) newBlock->TransferTarget(curr); // Default to fallthrough, and add the arc for that. - curr->SetKindAndTarget(BBJ_ALWAYS, newBlock); - curr->SetFlags(BBF_NONE_QUIRK); - assert(curr->JumpsToNext()); - FlowEdge* const newEdge = fgAddRefPred(newBlock, curr); newEdge->setLikelihood(1.0); + + curr->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); + curr->SetFlags(BBF_NONE_QUIRK); + assert(curr->JumpsToNext()); return newBlock; } diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 55208dfeeb2301..4cd24d67830417 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -167,12 +167,12 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() fgPrepareCallFinallyRetForRemoval(leaveBlock); fgRemoveBlock(leaveBlock, /* unreachable */ true); - currentBlock->SetKindAndTarget(BBJ_ALWAYS, postTryFinallyBlock); - currentBlock->RemoveFlags(BBF_RETLESS_CALL); // no longer a BBJ_CALLFINALLY - // Ref count updates. - fgAddRefPred(postTryFinallyBlock, currentBlock); - fgRemoveRefPred(firstBlock, currentBlock); + fgRemoveRefPred(currentBlock->GetTargetEdge()); + FlowEdge* const newEdge = fgAddRefPred(postTryFinallyBlock, currentBlock); + + currentBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); + currentBlock->RemoveFlags(BBF_RETLESS_CALL); // no longer a BBJ_CALLFINALLY // Cleanup the postTryFinallyBlock fgCleanupContinuation(postTryFinallyBlock); @@ -524,8 +524,8 @@ PhaseStatus Compiler::fgRemoveEmptyTry() GenTree* finallyRetExpr = finallyRet->GetRootNode(); assert(finallyRetExpr->gtOper == GT_RETFILT); fgRemoveStmt(block, finallyRet); - block->SetKindAndTarget(BBJ_ALWAYS, continuation); - fgAddRefPred(continuation, block); + FlowEdge* const newEdge = fgAddRefPred(continuation, block); + block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); } } @@ -1093,9 +1093,9 @@ PhaseStatus Compiler::fgCloneFinally() GenTree* finallyRetExpr = finallyRet->GetRootNode(); assert(finallyRetExpr->gtOper == GT_RETFILT); fgRemoveStmt(newBlock, finallyRet); - newBlock->SetKindAndTarget(BBJ_ALWAYS, normalCallFinallyReturn); - fgAddRefPred(normalCallFinallyReturn, newBlock); + FlowEdge* const newEdge = fgAddRefPred(normalCallFinallyReturn, newBlock); + newBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); } else { @@ -1135,13 +1135,13 @@ PhaseStatus Compiler::fgCloneFinally() fgPrepareCallFinallyRetForRemoval(leaveBlock); fgRemoveBlock(leaveBlock, /* unreachable */ true); + // Ref count updates. + fgRemoveRefPred(currentBlock->GetTargetEdge()); + FlowEdge* const newEdge = fgAddRefPred(firstCloneBlock, currentBlock); + // This call returns to the expected spot, so retarget it to branch to the clone. - currentBlock->SetKindAndTarget(BBJ_ALWAYS, firstCloneBlock); currentBlock->RemoveFlags(BBF_RETLESS_CALL); // no longer a BBJ_CALLFINALLY - - // Ref count updates. - fgAddRefPred(firstCloneBlock, currentBlock); - fgRemoveRefPred(firstBlock, currentBlock); + currentBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); // Make sure iteration isn't going off the deep end. assert(leaveBlock != endCallFinallyRangeBlock); diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index 409b07eb7a9ad7..f1673e4c32eef1 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -675,13 +675,13 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorIsIntegralConst(0)) { - m_compiler->fgRemoveRefPred(block->GetTrueTarget(), block); - block->SetKindAndTarget(BBJ_ALWAYS, block->Next()); + m_compiler->fgRemoveRefPred(block->GetTrueEdge()); + block->SetKindAndTargetEdge(BBJ_ALWAYS, block->GetFalseEdge()); block->SetFlags(BBF_NONE_QUIRK); } else { - m_compiler->fgRemoveRefPred(block->GetFalseTarget(), block); + m_compiler->fgRemoveRefPred(block->GetFalseEdge()); block->SetKind(BBJ_ALWAYS); } } @@ -1533,9 +1533,9 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) JITDUMP("\nConvert bbKind of " FMT_BB " to BBJ_ALWAYS to bottomBlock " FMT_BB "\n", block->bbNum, bottomBlock->bbNum); - block->SetKindAndTarget(BBJ_ALWAYS, bottomBlock); FlowEdge* const newEdge = fgAddRefPred(bottomBlock, block); newEdge->setLikelihood(1.0); + block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); if (block == InlineeCompiler->fgLastBB) { diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index e973def8efe1e5..cf61943cbae786 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -132,7 +132,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) block->RemoveFlags(BBF_REMOVED | BBF_INTERNAL); block->SetFlags(BBF_IMPORTED); - block->SetKindAndTarget(BBJ_THROW); + block->SetKindAndTargetEdge(BBJ_THROW); block->bbSetRunRarely(); } else @@ -643,7 +643,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() // plausible flow target. Simplest is to just mark it as a throw. if (bbIsHandlerBeg(newTryEntry->Next())) { - newTryEntry->SetKindAndTarget(BBJ_THROW); + newTryEntry->SetKindAndTargetEdge(BBJ_THROW); } else { @@ -1286,10 +1286,10 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: - block->SetKindAndTarget(bNext->GetKind(), bNext->GetTarget()); - /* Update the predecessor list for 'bNext->bbTarget' */ - fgReplacePred(bNext->GetTarget(), bNext, block); + fgReplacePred(bNext->GetTargetEdge(), block); + + block->SetKindAndTargetEdge(bNext->GetKind(), bNext->GetTargetEdge()); break; case BBJ_COND: @@ -1995,10 +1995,10 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) } // Change the switch jump into a BBJ_ALWAYS - block->SetKindAndTarget(BBJ_ALWAYS, block->GetSwitchTargets()->bbsDstTab[0]->getDestinationBlock()); + block->SetKindAndTargetEdge(BBJ_ALWAYS, block->GetSwitchTargets()->bbsDstTab[0]); for (unsigned i = 1; i < jmpCnt; ++i) { - fgRemoveRefPred(jmpTab[i]->getDestinationBlock(), block); + fgRemoveRefPred(jmpTab[i]); } return true; @@ -5663,13 +5663,13 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early) // Fix up the flow. // - predBlock->SetKindAndTarget(BBJ_ALWAYS, crossJumpTarget); - if (commSucc != nullptr) { fgRemoveRefPred(commSucc, predBlock); } - fgAddRefPred(crossJumpTarget, predBlock); + + FlowEdge* const newEdge = fgAddRefPred(crossJumpTarget, predBlock); + predBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); } // We changed things diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 88af5490bf3fc2..9bff7439d23e71 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -1625,9 +1625,9 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block) assert(ehDsc->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX); // Convert the BBJ_RETURN to BBJ_ALWAYS, jumping to genReturnBB. - block->SetKindAndTarget(BBJ_ALWAYS, genReturnBB); FlowEdge* const newEdge = fgAddRefPred(genReturnBB, block); newEdge->setLikelihood(1.0); + block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); #ifdef DEBUG if (verbose) @@ -2097,9 +2097,9 @@ class MergedReturns // Change BBJ_RETURN to BBJ_ALWAYS targeting const return block. assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); - returnBlock->SetKindAndTarget(BBJ_ALWAYS, constReturnBlock); FlowEdge* const newEdge = comp->fgAddRefPred(constReturnBlock, returnBlock); newEdge->setLikelihood(1.0); + returnBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); // Remove GT_RETURN since constReturnBlock returns the constant. assert(returnBlock->lastStmt()->GetRootNode()->OperIs(GT_RETURN)); diff --git a/src/coreclr/jit/helperexpansion.cpp b/src/coreclr/jit/helperexpansion.cpp index 6ce59a4bfc2e54..e91abebf320386 100644 --- a/src/coreclr/jit/helperexpansion.cpp +++ b/src/coreclr/jit/helperexpansion.cpp @@ -2542,12 +2542,12 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, assert(BasicBlock::sameEHRegion(firstBb, fallbackBb)); // call guarantees that obj is never null, we can drop the nullcheck - // by converting it to a BBJ_ALWAYS to typeCheckBb. + // by converting it to a BBJ_ALWAYS to its false target. if ((call->gtCallMoreFlags & GTF_CALL_M_CAST_OBJ_NONNULL) != 0) { fgRemoveStmt(nullcheckBb, nullcheckBb->lastStmt()); - nullcheckBb->SetKindAndTarget(BBJ_ALWAYS, typeCheckNotNeeded ? fallbackBb : typeChecksBbs[0]); - fgRemoveRefPred(lastBb, nullcheckBb); + fgRemoveRefPred(nullcheckBb->GetTrueEdge()); + nullcheckBb->SetKindAndTargetEdge(BBJ_ALWAYS, nullcheckBb->GetFalseEdge()); } // Bonus step: merge prevBb with nullcheckBb as they are likely to be mergeable diff --git a/src/coreclr/jit/ifconversion.cpp b/src/coreclr/jit/ifconversion.cpp index f9cb5af17925e7..1e6a573aa7b0de 100644 --- a/src/coreclr/jit/ifconversion.cpp +++ b/src/coreclr/jit/ifconversion.cpp @@ -739,7 +739,7 @@ bool OptIfConversionDsc::optIfConvert() // Update the flow from the original block. m_comp->fgRemoveAllRefPreds(m_startBlock->GetFalseTarget(), m_startBlock); - m_startBlock->SetKindAndTarget(BBJ_ALWAYS, m_startBlock->GetTrueTarget()); + m_startBlock->SetKindAndTargetEdge(BBJ_ALWAYS, m_startBlock->GetTrueEdge()); #ifdef DEBUG if (m_comp->verbose) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 589e0e9033be8a..b84a34eca7a8f9 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -2493,7 +2493,7 @@ GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom) void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)) { - block->SetKindAndTarget(BBJ_THROW); + block->SetKindAndTargetEdge(BBJ_THROW); block->SetFlags(BBF_FAILED_VERIFICATION); block->RemoveFlags(BBF_IMPORTED); @@ -4404,10 +4404,9 @@ void Compiler::impImportLeave(BasicBlock* block) callBlock = block; assert(callBlock->HasInitializedTarget()); - fgRemoveRefPred(callBlock->GetTarget(), callBlock); + fgRemoveRefPred(callBlock->GetTargetEdge()); - // callBlock will call the finally handler. Convert the BBJ_LEAVE to BBJ_CALLFINALLY. - callBlock->SetKindAndTarget(BBJ_CALLFINALLY, HBtab->ebdHndBeg); + // callBlock will call the finally handler. This will be set up later. if (endCatches) { @@ -4429,8 +4428,8 @@ void Compiler::impImportLeave(BasicBlock* block) // Calling the finally block. - // callBlock will call the finally handler - callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step, HBtab->ebdHndBeg); + // callBlock will call the finally handler. This will be set up later. + callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step); // step's jump target shouldn't be set yet assert(!step->HasInitializedTarget()); @@ -4486,10 +4485,10 @@ void Compiler::impImportLeave(BasicBlock* block) unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel; assert(finallyNesting <= compHndBBtabCount); - assert(callBlock->KindIs(BBJ_CALLFINALLY)); - assert(callBlock->TargetIs(HBtab->ebdHndBeg)); - FlowEdge* const newEdge = fgAddRefPred(callBlock->GetTarget(), callBlock); + assert(!callBlock->HasInitializedTarget()); + FlowEdge* const newEdge = fgAddRefPred(HBtab->ebdHndBeg, callBlock); newEdge->setLikelihood(1.0); + callBlock->SetKindAndTargetEdge(BBJ_CALLFINALLY, newEdge); GenTree* endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting); endLFinStmt = gtNewStmt(endLFin); @@ -4730,16 +4729,16 @@ void Compiler::impImportLeave(BasicBlock* block) unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; callBlock = - fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block, HBtab->ebdHndBeg); + fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block); // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE, // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the // next block, and flow optimizations will remove it. - fgRemoveRefPred(block->GetTarget(), block); - block->SetKindAndTarget(BBJ_ALWAYS, callBlock); + fgRemoveRefPred(block->GetTargetEdge()); FlowEdge* const newEdge = fgAddRefPred(callBlock, block); newEdge->setLikelihood(1.0); + block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); // The new block will inherit this block's weight. callBlock->inheritWeight(block); @@ -4759,10 +4758,9 @@ void Compiler::impImportLeave(BasicBlock* block) callBlock = block; assert(callBlock->HasInitializedTarget()); - fgRemoveRefPred(callBlock->GetTarget(), callBlock); + fgRemoveRefPred(callBlock->GetTargetEdge()); - // callBlock will call the finally handler. Convert the BBJ_LEAVE to BBJ_CALLFINALLY - callBlock->SetKindAndTarget(BBJ_CALLFINALLY, HBtab->ebdHndBeg); + // callBlock will call the finally handler. This will be set up later. #ifdef DEBUG if (verbose) @@ -4805,7 +4803,7 @@ void Compiler::impImportLeave(BasicBlock* block) BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); if (step == block) { - fgRemoveRefPred(step->GetTarget(), step); + fgRemoveRefPred(step->GetTargetEdge()); } FlowEdge* const newEdge = fgAddRefPred(step2, step); @@ -4844,7 +4842,7 @@ void Compiler::impImportLeave(BasicBlock* block) // callBlock will call the finally handler callBlock = - fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step, HBtab->ebdHndBeg); + fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step); if (step == block) { fgRemoveRefPred(step->GetTargetEdge()); @@ -4887,10 +4885,9 @@ void Compiler::impImportLeave(BasicBlock* block) } #endif - assert(callBlock->KindIs(BBJ_CALLFINALLY)); - assert(callBlock->TargetIs(HBtab->ebdHndBeg)); - FlowEdge* const newEdge = fgAddRefPred(callBlock->GetTarget(), callBlock); + FlowEdge* const newEdge = fgAddRefPred(HBtab->ebdHndBeg, callBlock); newEdge->setLikelihood(1.0); + callBlock->SetKindAndTargetEdge(BBJ_CALLFINALLY, newEdge); } else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) @@ -5105,10 +5102,10 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) fgInitBBLookup(); - fgRemoveRefPred(block->GetTarget(), block); - block->SetKindAndTarget(BBJ_LEAVE, fgLookupBB(jmpAddr)); - FlowEdge* const newEdge = fgAddRefPred(block->GetTarget(), block); + fgRemoveRefPred(block->GetTargetEdge()); + FlowEdge* const newEdge = fgAddRefPred(fgLookupBB(jmpAddr), block); newEdge->setLikelihood(1.0); + block->SetKindAndTargetEdge(BBJ_LEAVE, newEdge); // We will leave the BBJ_ALWAYS block we introduced. When it's reimported // the BBJ_ALWAYS block will be unreachable, and will be removed after. The @@ -6025,7 +6022,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Change block to BBJ_THROW so we won't trigger importation of successors. // - block->SetKindAndTarget(BBJ_THROW); + block->SetKindAndTargetEdge(BBJ_THROW); // If this method has a explicit generic context, the only uses of it may be in // the IL for this block. So assume it's used. @@ -7420,7 +7417,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) { JITDUMP("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n", block->GetTrueTarget()->bbNum); - fgRemoveRefPred(block->GetFalseTarget(), block); + fgRemoveRefPred(block->GetFalseEdge()); block->SetKind(BBJ_ALWAYS); } else @@ -7428,8 +7425,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) // TODO-NoFallThrough: Update once bbFalseTarget can diverge from bbNext assert(block->NextIs(block->GetFalseTarget())); JITDUMP("\nThe block jumps to the next " FMT_BB "\n", block->Next()->bbNum); - fgRemoveRefPred(block->GetTrueTarget(), block); - block->SetKindAndTarget(BBJ_ALWAYS, block->Next()); + fgRemoveRefPred(block->GetTrueEdge()); + block->SetKindAndTargetEdge(BBJ_ALWAYS, block->GetFalseEdge()); // TODO-NoFallThrough: Once bbFalseTarget can diverge from bbNext, it may not make sense // to set BBF_NONE_QUIRK @@ -7695,7 +7692,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1))) { // transform the basic block into a BBJ_ALWAYS - block->SetKindAndTarget(BBJ_ALWAYS, curEdge->getDestinationBlock()); + block->SetKindAndTargetEdge(BBJ_ALWAYS, curEdge); foundVal = true; } else diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index 136db42a4bc50e..f6d89dee1b4508 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -1176,9 +1176,9 @@ class IndirectCallTransformer // Finally, rewire the cold block to jump to the else block, // not fall through to the check block. // - FlowEdge* const oldEdge = compiler->fgRemoveRefPred(checkBlock, coldBlock); - coldBlock->SetKindAndTarget(BBJ_ALWAYS, elseBlock); - compiler->fgAddRefPred(elseBlock, coldBlock, oldEdge); + compiler->fgRemoveRefPred(coldBlock->GetTargetEdge()); + FlowEdge* const newEdge = compiler->fgAddRefPred(elseBlock, coldBlock, coldBlock->GetTargetEdge()); + coldBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); } // When the current candidate hads sufficiently high likelihood, scan diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 2d699eb6bf81ce..1635496c9706ac 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -861,7 +861,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) { JITDUMP("Lowering switch " FMT_BB ": single target; converting to BBJ_ALWAYS\n", originalSwitchBB->bbNum); noway_assert(comp->opts.OptimizationDisabled()); - originalSwitchBB->SetKindAndTarget(BBJ_ALWAYS, jumpTab[0]->getDestinationBlock()); + originalSwitchBB->SetKindAndTargetEdge(BBJ_ALWAYS, jumpTab[0]); if (originalSwitchBB->JumpsToNext()) { @@ -1014,7 +1014,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) comp->fgRemoveRefPred(uniqueSucc); } - afterDefaultCondBlock->SetKindAndTarget(BBJ_ALWAYS, uniqueSucc->getDestinationBlock()); + afterDefaultCondBlock->SetKindAndTargetEdge(BBJ_ALWAYS, uniqueSucc); if (afterDefaultCondBlock->JumpsToNext()) { @@ -1081,7 +1081,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) } // Wire up the predecessor list for the "branch" case. - comp->fgAddRefPred(targetBlock, currentBlock, jumpTab[i]); + FlowEdge* const newEdge = comp->fgAddRefPred(targetBlock, currentBlock, jumpTab[i]); if (!fAnyTargetFollows && (i == jumpCnt - 2)) { @@ -1090,7 +1090,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // case: there is no need to compare against the case index, since it's // guaranteed to be taken (since the default case was handled first, above). - currentBlock->SetKindAndTarget(BBJ_ALWAYS, targetBlock); + currentBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); } else { @@ -1118,7 +1118,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // There is a fall-through to the following block. In the loop // above, we deleted all the predecessor edges from the switch. // In this case, we need to add one back. - comp->fgAddRefPred(currentBlock->Next(), currentBlock); + // comp->fgAddRefPred(currentBlock->Next(), currentBlock); } if (!fUsedAfterDefaultCondBlock) @@ -1129,7 +1129,8 @@ GenTree* Lowering::LowerSwitch(GenTree* node) JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum); assert(currentBlock == afterDefaultCondBlock); assert(currentBlock->KindIs(BBJ_SWITCH)); - currentBlock->SetKindAndTarget(BBJ_ALWAYS, currentBlock->Next()); + FlowEdge* const newEdge = comp->fgAddRefPred(currentBlock->Next(), currentBlock); + currentBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); currentBlock->RemoveFlags(BBF_DONT_REMOVE); comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block. } diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 1d4dcb9b4a8069..eea95aa83fa007 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -6316,7 +6316,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // We call CORINFO_HELP_TAILCALL which does not return, so we will // not need epilogue. - compCurBB->SetKindAndTarget(BBJ_THROW); + compCurBB->SetKindAndTargetEdge(BBJ_THROW); } if (isRootReplaced) @@ -7463,7 +7463,8 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa { // Todo: this may not look like a viable loop header. // Might need the moral equivalent of a scratch BB. - block->SetKindAndTarget(BBJ_ALWAYS, fgEntryBB); + FlowEdge* const newEdge = fgAddRefPred(fgEntryBB, block); + block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); } else { @@ -7478,11 +7479,11 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa // block removal on it. // fgFirstBB->SetFlags(BBF_DONT_REMOVE); - block->SetKindAndTarget(BBJ_ALWAYS, fgFirstBB->Next()); + FlowEdge* const newEdge = fgAddRefPred(fgFirstBB->Next(), block); + block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); } // Finish hooking things up. - fgAddRefPred(block->GetTarget(), block); block->RemoveFlags(BBF_HAS_JMP); } @@ -13205,7 +13206,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) // JTRUE 0 - transform the basic block into a BBJ_ALWAYS bTaken = block->GetFalseTarget(); bNotTaken = block->GetTrueTarget(); - block->SetKindAndTarget(BBJ_ALWAYS, bTaken); + block->SetKindAndTargetEdge(BBJ_ALWAYS, block->GetFalseEdge()); block->SetFlags(BBF_NONE_QUIRK); } @@ -13373,13 +13374,13 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1))) { - block->SetKindAndTarget(BBJ_ALWAYS, curEdge->getDestinationBlock()); + block->SetKindAndTargetEdge(BBJ_ALWAYS, curEdge); foundVal = true; } else { // Remove 'curEdge' - fgRemoveRefPred(curEdge->getDestinationBlock(), block); + fgRemoveRefPred(curEdge); } } @@ -14129,8 +14130,8 @@ void Compiler::fgMergeBlockReturn(BasicBlock* block) else #endif // !TARGET_X86 { - block->SetKindAndTarget(BBJ_ALWAYS, genReturnBB); - fgAddRefPred(genReturnBB, block); + FlowEdge* const newEdge = fgAddRefPred(genReturnBB, block); + block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); fgReturnCount--; } diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index f9616636681b5c..75a016d9034f02 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -1012,8 +1012,8 @@ bool OptBoolsDsc::optOptimizeCompareChainCondBlock() m_comp->fgSetStmtSeq(s2); // Update the flow. - m_comp->fgRemoveRefPred(m_b1->GetTrueTarget(), m_b1); - m_b1->SetKindAndTarget(BBJ_ALWAYS, m_b1->GetFalseTarget()); + m_comp->fgRemoveRefPred(m_b1->GetTrueEdge()); + m_b1->SetKindAndTargetEdge(BBJ_ALWAYS, m_b1->GetFalseEdge()); m_b1->SetFlags(BBF_NONE_QUIRK); // Fixup flags. @@ -1307,7 +1307,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() assert(m_b2->KindIs(BBJ_RETURN)); assert(m_b1->FalseTargetIs(m_b2)); assert(m_b3 != nullptr); - m_b1->SetKindAndTarget(BBJ_RETURN); + m_b1->SetKindAndTargetEdge(BBJ_RETURN); } else { diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index c5d168bedf9cff..7959e949198486 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -590,20 +590,24 @@ void Compiler::optSetMappedBlockTargets(BasicBlock* blk, BasicBlock* newBlk, Blo case BBJ_CALLFINALLY: case BBJ_CALLFINALLYRET: case BBJ_LEAVE: + { + FlowEdge* newEdge; + // Determine if newBlk should be redirected to a different target from blk's target if (redirectMap->Lookup(blk->GetTarget(), &newTarget)) { // newBlk needs to be redirected to a new target - newBlk->SetKindAndTarget(blk->GetKind(), newTarget); + newEdge = fgAddRefPred(newTarget, newblk); } else { // newBlk uses the same target as blk - newBlk->SetKindAndTarget(blk->GetKind(), blk->GetTarget()); + newEdge = fgAddRefPred(blk->GetTarget(), newBlk); } - fgAddRefPred(newBlk->GetTarget(), newBlk); + newBlk->SetKindAndTargetEdge(blk->GetKind(), newEdge); break; + } case BBJ_COND: { @@ -701,16 +705,18 @@ void Compiler::optSetMappedBlockTargets(BasicBlock* blk, BasicBlock* newBlk, Blo case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: + { // newBlk's jump target should not need to be redirected assert(!redirectMap->Lookup(blk->GetTarget(), &newTarget)); - newBlk->SetKindAndTarget(blk->GetKind(), blk->GetTarget()); - fgAddRefPred(newBlk->GetTarget(), newBlk); + FlowEdge* newEdge = fgAddRefPred(newBlk->GetTarget(), newBlk); + newBlk->SetKindAndTargetEdge(blk->GetKind(), newEdge); break; + } default: // blk doesn't have a jump destination assert(blk->NumSucc() == 0); - newBlk->SetKindAndTarget(blk->GetKind()); + newBlk->SetKindAndTargetEdge(blk->GetKind()); break; } @@ -1713,12 +1719,12 @@ void Compiler::optRedirectPrevUnrollIteration(FlowGraphNaturalLoop* loop, BasicB testCopyStmt->SetRootNode(sideEffList); } - fgRemoveRefPred(prevTestBlock->GetTrueTarget(), prevTestBlock); - fgRemoveRefPred(prevTestBlock->GetFalseTarget(), prevTestBlock); + fgRemoveRefPred(prevTestBlock->GetTrueEdge()); + fgRemoveRefPred(prevTestBlock->GetFalseEdge()); // Redirect exit edge from previous iteration to new entry. - prevTestBlock->SetKindAndTarget(BBJ_ALWAYS, target); - fgAddRefPred(target, prevTestBlock); + FlowEdge* const newEdge = fgAddRefPred(target, prevTestBlock); + prevTestBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); JITDUMP("Redirecting previously created exiting " FMT_BB " -> " FMT_BB "\n", prevTestBlock->bbNum, target->bbNum); @@ -2140,9 +2146,6 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) // Create a new block after `block` to put the copied condition code. BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true, bJoin); - block->SetKindAndTarget(BBJ_ALWAYS, bNewCond); - block->SetFlags(BBF_NONE_QUIRK); - assert(block->JumpsToNext()); // Clone each statement in bTest and append to bNewCond. for (Statement* const stmt : bTest->Statements()) @@ -2205,8 +2208,12 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) fgAddRefPred(bJoin, bNewCond); fgAddRefPred(bTop, bNewCond); - fgAddRefPred(bNewCond, block); - fgRemoveRefPred(bTest, block); + fgRemoveRefPred(block->GetTargetEdge()); + FlowEdge* const newEdge = fgAddRefPred(bNewCond, block); + + block->SetTargetEdge(newEdge); + block->SetFlags(BBF_NONE_QUIRK); + assert(block->JumpsToNext()); // Move all predecessor edges that look like loop entry edges to point to the new cloned condition // block, not the existing condition block. The idea is that if we only move `block` to point to diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp index 27b94470962ef0..d1849a2b41be8a 100644 --- a/src/coreclr/jit/patchpoint.cpp +++ b/src/coreclr/jit/patchpoint.cpp @@ -238,7 +238,7 @@ class PatchpointTransformer } // Update flow - block->SetKindAndTarget(BBJ_THROW); + block->SetKindAndTargetEdge(BBJ_THROW); // Add helper call // From a46006aa8eab1f348e78dc8490b3d54d8fae5e3a Mon Sep 17 00:00:00 2001 From: "Aman Khalid (from Dev Box)" Date: Thu, 22 Feb 2024 17:27:59 -0500 Subject: [PATCH 03/16] Remove bbTarget references --- src/coreclr/jit/block.cpp | 24 ++++++++++++------------ src/coreclr/jit/block.h | 20 +++++++------------- src/coreclr/jit/fgopt.cpp | 4 ++-- src/coreclr/jit/optimizebools.cpp | 2 +- 4 files changed, 22 insertions(+), 28 deletions(-) diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 317deea4568d0c..63d2b43e55209d 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -676,11 +676,11 @@ void BasicBlock::dspKind() const break; case BBJ_EHFILTERRET: - printf(" -> %s (fltret)", dspBlockNum(bbTarget)); + printf(" -> %s (fltret)", dspBlockNum(GetTarget())); break; case BBJ_EHCATCHRET: - printf(" -> %s (cret)", dspBlockNum(bbTarget)); + printf(" -> %s (cret)", dspBlockNum(GetTarget())); break; case BBJ_THROW: @@ -694,24 +694,24 @@ void BasicBlock::dspKind() const case BBJ_ALWAYS: if (HasFlag(BBF_KEEP_BBJ_ALWAYS)) { - printf(" -> %s (ALWAYS)", dspBlockNum(bbTarget)); + printf(" -> %s (ALWAYS)", dspBlockNum(GetTarget())); } else { - printf(" -> %s (always)", dspBlockNum(bbTarget)); + printf(" -> %s (always)", dspBlockNum(GetTarget())); } break; case BBJ_LEAVE: - printf(" -> %s (leave)", dspBlockNum(bbTarget)); + printf(" -> %s (leave)", dspBlockNum(GetTarget())); break; case BBJ_CALLFINALLY: - printf(" -> %s (callf)", dspBlockNum(bbTarget)); + printf(" -> %s (callf)", dspBlockNum(GetTarget())); break; case BBJ_CALLFINALLYRET: - printf(" -> %s (callfr)", dspBlockNum(bbTarget)); + printf(" -> %s (callfr)", dspBlockNum(GetTarget())); break; case BBJ_COND: @@ -985,7 +985,7 @@ BasicBlock* BasicBlock::GetUniquePred(Compiler* compiler) const // BasicBlock* BasicBlock::GetUniqueSucc() const { - return KindIs(BBJ_ALWAYS) ? bbTarget : nullptr; + return KindIs(BBJ_ALWAYS) ? GetTarget() : nullptr; } // Static vars. @@ -1199,7 +1199,7 @@ BasicBlock* BasicBlock::GetSucc(unsigned i) const case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: case BBJ_LEAVE: - return bbTarget; + return GetTarget(); case BBJ_COND: if (i == 0) @@ -1309,8 +1309,8 @@ BasicBlock* BasicBlock::GetSucc(unsigned i, Compiler* comp) { case BBJ_EHFILTERRET: // Handler is the (sole) normal successor of the filter. - assert(comp->fgFirstBlockOfHandler(this) == bbTarget); - return bbTarget; + assert(comp->fgFirstBlockOfHandler(this) == GetTarget()); + return GetTarget(); case BBJ_EHFINALLYRET: assert(bbEhfTargets != nullptr); @@ -1322,7 +1322,7 @@ BasicBlock* BasicBlock::GetSucc(unsigned i, Compiler* comp) case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_LEAVE: - return bbTarget; + return GetTarget(); case BBJ_COND: if (i == 0) diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index ca7ee9660a321a..c0e364aea268a5 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -526,7 +526,7 @@ struct BasicBlock : private LIR::Range /* The following union describes the jump target(s) of this block */ union { unsigned bbTargetOffs; // PC offset (temporary only) - BasicBlock* bbTargetEdge; // successor edge for block kinds with only one successor (BBJ_ALWAYS, etc) + FlowEdge* bbTargetEdge; // successor edge for block kinds with only one successor (BBJ_ALWAYS, etc) BasicBlock* bbTrueTarget; // BBJ_COND jump target when its condition is true (alias for bbTarget) BBswtDesc* bbSwtTargets; // switch descriptor BBehfDesc* bbEhfTargets; // BBJ_EHFINALLYRET descriptor @@ -625,7 +625,7 @@ struct BasicBlock : private LIR::Range bool HasTarget() const { - // These block types should always have bbTarget set + // These block types should always have bbTargetEdge set return KindIs(BBJ_ALWAYS, BBJ_CALLFINALLY, BBJ_CALLFINALLYRET, BBJ_EHCATCHRET, BBJ_EHFILTERRET, BBJ_LEAVE); } @@ -718,7 +718,7 @@ struct BasicBlock : private LIR::Range bool HasInitializedTarget() const { assert(HasTarget()); - return (bbTarget != nullptr); + return (bbTargetEdge != nullptr); } bool TargetIs(const BasicBlock* target) const @@ -764,19 +764,13 @@ struct BasicBlock : private LIR::Range bbEhfTargets = ehfTarget; } - // BBJ_CALLFINALLYRET uses the `bbTarget` field. However, also treat it specially: + // BBJ_CALLFINALLYRET uses the `bbTargetEdge` field. However, also treat it specially: // for callers that know they want a continuation, use this function instead of the // general `GetTarget()` to allow asserting on the block kind. BasicBlock* GetFinallyContinuation() const { assert(KindIs(BBJ_CALLFINALLYRET)); - return bbTarget; - } - - void SetFinallyContinuation(BasicBlock* finallyContinuation) - { - assert(KindIs(BBJ_CALLFINALLYRET)); - bbTarget = finallyContinuation; + return ; } #ifdef DEBUG @@ -785,7 +779,7 @@ struct BasicBlock : private LIR::Range BasicBlock* GetTargetRaw() const { assert(HasTarget()); - return bbTarget; + return (bbTargetEdge == nullptr) ? nullptr : bbTargetEdge->getDestinationBlock(); } // Return the BBJ_COND true target; it might be null. Only used during dumping. @@ -1952,7 +1946,7 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: case BBJ_LEAVE: - m_succs[0] = block->bbTarget; + m_succs[0] = block->GetTarget(); m_begin = &m_succs[0]; m_end = &m_succs[1]; break; diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index cf61943cbae786..009d4b0e1916ee 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -1286,7 +1286,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: - /* Update the predecessor list for 'bNext->bbTarget' */ + /* Update the predecessor list for bNext's target */ fgReplacePred(bNext->GetTargetEdge(), block); block->SetKindAndTargetEdge(bNext->GetKind(), bNext->GetTargetEdge()); @@ -1642,7 +1642,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) else { // TODO-NoFallThrough: Once BBJ_COND blocks have pointers to their false branches, - // allow removing empty BBJ_ALWAYS and pointing bPrev's false branch to block->bbTarget. + // allow removing empty BBJ_ALWAYS and pointing bPrev's false branch to block's target. if (bPrev->bbFallsThrough() && !block->JumpsToNext()) { break; diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index 75a016d9034f02..bb95bfb0546940 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -46,7 +46,7 @@ class OptBoolsDsc private: BasicBlock* m_b1; // The first basic block with the BBJ_COND conditional jump type BasicBlock* m_b2; // The next basic block of m_b1. Either BBJ_COND or BBJ_RETURN type - BasicBlock* m_b3; // m_b1->bbTarget. Null if m_b2 is not a return block. + BasicBlock* m_b3; // m_b1's target block. Null if m_b2 is not a return block. Compiler* m_comp; // The pointer to the Compiler instance From 1ca20c8b59b75db4c435bb5564bb4fb31574b006 Mon Sep 17 00:00:00 2001 From: "Aman Khalid (from Dev Box)" Date: Fri, 23 Feb 2024 14:28:53 -0500 Subject: [PATCH 04/16] Update fgNewBB variants to not set jump target --- src/coreclr/jit/block.cpp | 7 +- src/coreclr/jit/block.h | 2 +- src/coreclr/jit/compiler.h | 13 +-- src/coreclr/jit/fgbasic.cpp | 44 ++++---- src/coreclr/jit/fgehopt.cpp | 5 +- src/coreclr/jit/fgopt.cpp | 16 +-- src/coreclr/jit/fgprofile.cpp | 6 +- src/coreclr/jit/flowgraph.cpp | 13 ++- src/coreclr/jit/helperexpansion.cpp | 112 ++++++++++++++------ src/coreclr/jit/importer.cpp | 11 +- src/coreclr/jit/indirectcalltransformer.cpp | 21 ++-- src/coreclr/jit/jiteh.cpp | 12 ++- src/coreclr/jit/loopcloning.cpp | 36 ++++--- src/coreclr/jit/lower.cpp | 13 +-- src/coreclr/jit/morph.cpp | 5 +- src/coreclr/jit/optimizer.cpp | 23 ++-- src/coreclr/jit/patchpoint.cpp | 8 +- src/coreclr/jit/switchrecognition.cpp | 6 +- 18 files changed, 205 insertions(+), 148 deletions(-) diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 63d2b43e55209d..af630d5c82e764 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -1585,15 +1585,10 @@ BasicBlock* BasicBlock::New(Compiler* compiler) return block; } -BasicBlock* BasicBlock::New(Compiler* compiler, BBKinds kind, BasicBlock* target /* = nullptr */) +BasicBlock* BasicBlock::New(Compiler* compiler, BBKinds kind) { BasicBlock* block = BasicBlock::New(compiler); - - // In some cases, we don't know a block's jump target during initialization, so don't check the jump kind/target - // yet. - // The checks will be done any time the jump kind/target is read or written to after initialization. block->bbKind = kind; - block->bbTarget = target; if (block->KindIs(BBJ_THROW)) { diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index c0e364aea268a5..fb3c6d9c5a661f 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -537,7 +537,7 @@ struct BasicBlock : private LIR::Range public: static BasicBlock* New(Compiler* compiler); - static BasicBlock* New(Compiler* compiler, BBKinds kind, BasicBlock* target = nullptr); + static BasicBlock* New(Compiler* compiler, BBKinds kind); static BasicBlock* New(Compiler* compiler, BBehfDesc* ehfTargets); static BasicBlock* New(Compiler* compiler, BBswtDesc* swtTargets); static BasicBlock* New(Compiler* compiler, BBKinds kind, unsigned targetOffs); diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 9c2f667a0ed777..28187dc6c13c60 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -5076,34 +5076,31 @@ class Compiler void fgExtendEHRegionBefore(BasicBlock* block); void fgExtendEHRegionAfter(BasicBlock* block); - BasicBlock* fgNewBBbefore(BBKinds jumpKind, BasicBlock* block, bool extendRegion, BasicBlock* jumpDest = nullptr); + BasicBlock* fgNewBBbefore(BBKinds jumpKind, BasicBlock* block, bool extendRegion); - BasicBlock* fgNewBBafter(BBKinds jumpKind, BasicBlock* block, bool extendRegion, BasicBlock* jumpDest = nullptr); + BasicBlock* fgNewBBafter(BBKinds jumpKind, BasicBlock* block, bool extendRegion); - BasicBlock* fgNewBBFromTreeAfter(BBKinds jumpKind, BasicBlock* block, GenTree* tree, DebugInfo& debugInfo, BasicBlock* jumpDest = nullptr, bool updateSideEffects = false); + BasicBlock* fgNewBBFromTreeAfter(BBKinds jumpKind, BasicBlock* block, GenTree* tree, DebugInfo& debugInfo, bool updateSideEffects = false); BasicBlock* fgNewBBinRegion(BBKinds jumpKind, unsigned tryIndex, unsigned hndIndex, BasicBlock* nearBlk, - BasicBlock* jumpDest = nullptr, bool putInFilter = false, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBKinds jumpKind, BasicBlock* srcBlk, - BasicBlock* jumpDest = nullptr, bool runRarely = false, bool insertAtEnd = false); - BasicBlock* fgNewBBinRegion(BBKinds jumpKind, BasicBlock* jumpDest = nullptr); + BasicBlock* fgNewBBinRegion(BBKinds jumpKind); BasicBlock* fgNewBBinRegionWorker(BBKinds jumpKind, BasicBlock* afterBlk, unsigned xcptnIndex, - bool putInTryRegion, - BasicBlock* jumpDest = nullptr); + bool putInTryRegion); void fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk); void fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk); diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index ea373eb154b99f..d89c113bde1f62 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -207,7 +207,7 @@ bool Compiler::fgEnsureFirstBBisScratch() assert(fgFirstBBScratch == nullptr); - BasicBlock* block = BasicBlock::New(this, BBJ_ALWAYS, fgFirstBB); + BasicBlock* block = BasicBlock::New(this, BBJ_ALWAYS); block->SetFlags(BBF_NONE_QUIRK); if (fgFirstBB != nullptr) @@ -228,6 +228,7 @@ bool Compiler::fgEnsureFirstBBisScratch() // The new scratch bb will fall through to the old first bb FlowEdge* const edge = fgAddRefPred(fgFirstBB, block); edge->setLikelihood(1.0); + block->SetTargetEdge(edge); fgInsertBBbefore(fgFirstBB, block); } else @@ -5054,14 +5055,14 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) // no fall-through path). For this case, simply insert a new // fall-through block after 'curr'. // TODO-NoFallThrough: Once bbFalseTarget can diverge from bbNext, this will be unnecessary for BBJ_COND - newBlock = fgNewBBafter(BBJ_ALWAYS, curr, true /* extendRegion */, /* jumpDest */ succ); + newBlock = fgNewBBafter(BBJ_ALWAYS, curr, true /* extendRegion */); newBlock->SetFlags(BBF_NONE_QUIRK); assert(newBlock->JumpsToNext()); } else { // The new block always jumps to 'succ' - newBlock = fgNewBBinRegion(BBJ_ALWAYS, curr, /* jumpDest */ succ, /* isRunRarely */ curr->isRunRarely()); + newBlock = fgNewBBinRegion(BBJ_ALWAYS, curr, /* isRunRarely */ curr->isRunRarely()); } newBlock->CopyFlags(curr, succ->GetFlagsRaw() & BBF_BACKWARD_JUMP); @@ -5074,6 +5075,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) // And 'succ' has 'newBlock' as a new predecessor. FlowEdge* const newEdge = fgAddRefPred(succ, newBlock); newEdge->setLikelihood(1.0); + newBlock->SetTargetEdge(newEdge); // This isn't accurate, but it is complex to compute a reasonable number so just assume that we take the // branch 50% of the time. @@ -5452,7 +5454,7 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) if (bSrc->KindIs(BBJ_COND) && bSrc->FalseTargetIs(bDst) && !bSrc->NextIs(bDst)) { // Add a new block after bSrc which jumps to 'bDst' - jmpBlk = fgNewBBafter(BBJ_ALWAYS, bSrc, true, bDst); + jmpBlk = fgNewBBafter(BBJ_ALWAYS, bSrc, true); bSrc->SetFalseTarget(jmpBlk); fgAddRefPred(jmpBlk, bSrc, fgGetPredForBlock(bDst, bSrc)); @@ -5501,6 +5503,7 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) fgReplacePred(bDst, bSrc, jmpBlk); + JITDUMP("Added an unconditional jump to " FMT_BB " after block " FMT_BB "\n", jmpBlk->GetTarget()->bbNum, bSrc->bbNum); } @@ -6059,12 +6062,11 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r BasicBlock* Compiler::fgNewBBbefore(BBKinds jumpKind, BasicBlock* block, - bool extendRegion, - BasicBlock* jumpDest /* = nullptr */) + bool extendRegion) { // Create a new BasicBlock and chain it in - BasicBlock* newBlk = BasicBlock::New(this, jumpKind, jumpDest); + BasicBlock* newBlk = BasicBlock::New(this, jumpKind); newBlk->SetFlags(BBF_INTERNAL); fgInsertBBbefore(block, newBlk); @@ -6101,12 +6103,11 @@ BasicBlock* Compiler::fgNewBBbefore(BBKinds jumpKind, BasicBlock* Compiler::fgNewBBafter(BBKinds jumpKind, BasicBlock* block, - bool extendRegion, - BasicBlock* jumpDest /* = nullptr */) + bool extendRegion) { // Create a new BasicBlock and chain it in - BasicBlock* newBlk = BasicBlock::New(this, jumpKind, jumpDest); + BasicBlock* newBlk = BasicBlock::New(this, jumpKind); newBlk->SetFlags(BBF_INTERNAL); fgInsertBBafter(block, newBlk); @@ -6146,7 +6147,6 @@ BasicBlock* Compiler::fgNewBBafter(BBKinds jumpKind, // tree - tree that will be wrapped into a statement and // inserted in the new block. // debugInfo - debug info to propagate into the new statement. -// jumpDest - the jump target of the new block. Defaults to nullptr. // updateSideEffects - update side effects for the whole statement. // // Return Value: @@ -6159,10 +6159,9 @@ BasicBlock* Compiler::fgNewBBFromTreeAfter(BBKinds jumpKind, BasicBlock* block, GenTree* tree, DebugInfo& debugInfo, - BasicBlock* jumpDest /* = nullptr */, bool updateSideEffects /* = false */) { - BasicBlock* newBlock = fgNewBBafter(jumpKind, block, true, jumpDest); + BasicBlock* newBlock = fgNewBBafter(jumpKind, block, true); newBlock->SetFlags(BBF_INTERNAL); Statement* stmt = fgNewStmtFromTree(tree, debugInfo); fgInsertStmtAtEnd(newBlock, stmt); @@ -6584,7 +6583,6 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, // [0..compHndBBtabCount]. // nearBlk - insert the new block closely after this block, if possible. If nullptr, put the new block anywhere // in the requested region. -// jumpDest - the jump target of the new block. Defaults to nullptr. // putInFilter - put the new block in the filter region given by hndIndex, as described above. // runRarely - 'true' if the new block is run rarely. // insertAtEnd - 'true' if the block should be inserted at the end of the region. Note: this is currently only @@ -6597,7 +6595,6 @@ BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, unsigned tryIndex, unsigned hndIndex, BasicBlock* nearBlk, - BasicBlock* jumpDest /* = nullptr */, bool putInFilter /* = false */, bool runRarely /* = false */, bool insertAtEnd /* = false */) @@ -6723,7 +6720,7 @@ _FoundAfterBlk:; bbKindNames[jumpKind], tryIndex, hndIndex, dspBool(putInFilter), dspBool(runRarely), dspBool(insertAtEnd), afterBlk->bbNum); - return fgNewBBinRegionWorker(jumpKind, afterBlk, regionIndex, putInTryRegion, jumpDest); + return fgNewBBinRegionWorker(jumpKind, afterBlk, regionIndex, putInTryRegion); } //------------------------------------------------------------------------ @@ -6734,7 +6731,6 @@ _FoundAfterBlk:; // Arguments: // jumpKind - the jump kind of the new block to create. // srcBlk - insert the new block in the same EH region as this block, and closely after it if possible. -// jumpDest - the jump target of the new block. Defaults to nullptr. // runRarely - 'true' if the new block is run rarely. // insertAtEnd - 'true' if the block should be inserted at the end of the region. Note: this is currently only // implemented when inserting into the main function (not into any EH region). @@ -6744,7 +6740,6 @@ _FoundAfterBlk:; BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, BasicBlock* srcBlk, - BasicBlock* jumpDest /* = nullptr */, bool runRarely /* = false */, bool insertAtEnd /* = false */) { @@ -6763,7 +6758,7 @@ BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, putInFilter = ehGetDsc(hndIndex - 1)->InFilterRegionBBRange(srcBlk); } - return fgNewBBinRegion(jumpKind, tryIndex, hndIndex, srcBlk, jumpDest, putInFilter, runRarely, insertAtEnd); + return fgNewBBinRegion(jumpKind, tryIndex, hndIndex, srcBlk, putInFilter, runRarely, insertAtEnd); } //------------------------------------------------------------------------ @@ -6773,14 +6768,13 @@ BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, // // Arguments: // jumpKind - the jump kind of the new block to create. -// jumpDest - the jump target of the new block. Defaults to nullptr. // // Return Value: // The new block. -BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, BasicBlock* jumpDest /* = nullptr */) +BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind) { - return fgNewBBinRegion(jumpKind, 0, 0, nullptr, jumpDest, /* putInFilter */ false, /* runRarely */ false, + return fgNewBBinRegion(jumpKind, 0, 0, nullptr, /* putInFilter */ false, /* runRarely */ false, /* insertAtEnd */ true); } @@ -6799,7 +6793,6 @@ BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, BasicBlock* jumpDest /* // set its handler index to the most nested handler region enclosing that 'try' region. // Otherwise, put the block in the handler region specified by 'regionIndex', and set its 'try' // index to the most nested 'try' region enclosing that handler region. -// jumpDest - the jump target of the new block. Defaults to nullptr. // // Return Value: // The new block. @@ -6807,13 +6800,12 @@ BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, BasicBlock* jumpDest /* BasicBlock* Compiler::fgNewBBinRegionWorker(BBKinds jumpKind, BasicBlock* afterBlk, unsigned regionIndex, - bool putInTryRegion, - BasicBlock* jumpDest /* = nullptr */) + bool putInTryRegion) { /* Insert the new block */ BasicBlock* afterBlkNext = afterBlk->Next(); (void)afterBlkNext; // prevent "unused variable" error from GCC - BasicBlock* newBlk = fgNewBBafter(jumpKind, afterBlk, false, jumpDest); + BasicBlock* newBlk = fgNewBBafter(jumpKind, afterBlk, false); if (putInTryRegion) { diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 4cd24d67830417..35a1027b7b85a3 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -2103,7 +2103,7 @@ void Compiler::fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock, assert(predBlock->KindIs(BBJ_COND)); assert(predBlock->FalseTargetIs(nonCanonicalBlock)); - BasicBlock* const newBlock = fgNewBBafter(BBJ_ALWAYS, predBlock, true, canonicalBlock); + BasicBlock* const newBlock = fgNewBBafter(BBJ_ALWAYS, predBlock, true); predBlock->SetFalseTarget(newBlock); JITDUMP("*** " FMT_BB " now falling through to empty " FMT_BB " and then to " FMT_BB "\n", predBlock->bbNum, @@ -2115,7 +2115,8 @@ void Compiler::fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock, // Wire up the new flow fgAddRefPred(newBlock, predBlock, predEdge); - fgAddRefPred(canonicalBlock, newBlock, predEdge); + FlowEdge* const newEdge = fgAddRefPred(canonicalBlock, newBlock, predEdge); + newBlock->SetTargetEdge(newEdge); // If nonCanonicalBlock has only one pred, all its flow transfers. // If it has multiple preds, then we need edge counts or likelihoods diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 009d4b0e1916ee..45c1549f99ba00 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -623,8 +623,8 @@ PhaseStatus Compiler::fgPostImportationCleanup() // What follows is similar to fgNewBBInRegion, but we can't call that // here as the oldTryEntry is no longer in the main bb list. - newTryEntry = BasicBlock::New(this, BBJ_ALWAYS, tryEntryPrev->Next()); - newTryEntry->SetFlags(BBF_IMPORTED | BBF_INTERNAL | BBF_NONE_QUIRK); + newTryEntry = BasicBlock::New(this); + newTryEntry->SetFlags(BBF_IMPORTED | BBF_INTERNAL); newTryEntry->bbRefs = 0; // Set the right EH region indices on this new block. @@ -649,6 +649,8 @@ PhaseStatus Compiler::fgPostImportationCleanup() { FlowEdge* const newEdge = fgAddRefPred(newTryEntry->Next(), newTryEntry); newEdge->setLikelihood(1.0); + newTryEntry->SetFlags(BBF_NONE_QUIRK); + newTryEntry->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); } JITDUMP("OSR: changing start of try region #%u from " FMT_BB " to new " FMT_BB "\n", @@ -2470,7 +2472,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // add an unconditional block after this block to jump to the target block's fallthrough block // assert(!target->IsLast()); - BasicBlock* next = fgNewBBafter(BBJ_ALWAYS, block, true, target->GetFalseTarget()); + BasicBlock* next = fgNewBBafter(BBJ_ALWAYS, block, true); // Fix up block's flow // @@ -2482,7 +2484,8 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // next->inheritWeight(block); fgAddRefPred(next, block); - fgAddRefPred(next->GetTarget(), next); + FlowEdge* const newEdge = fgAddRefPred(target->GetFalseTarget(), next); + next->SetTargetEdge(newEdge); JITDUMP("fgOptimizeUncondBranchToSimpleCond(from " FMT_BB " to cond " FMT_BB "), created new uncond " FMT_BB "\n", block->bbNum, target->bbNum, next->bbNum); @@ -4981,13 +4984,14 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication /* = false */, bool isPh if (bDest->KindIs(BBJ_COND) && !bDest->NextIs(bDest->GetFalseTarget())) { BasicBlock* const bDestFalseTarget = bDest->GetFalseTarget(); - BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true, bDestFalseTarget); + BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true); bDest->SetFalseTarget(bFixup); bFixup->inheritWeight(bDestFalseTarget); fgRemoveRefPred(bDestFalseTarget, bDest); fgAddRefPred(bFixup, bDest); - fgAddRefPred(bDestFalseTarget, bFixup); + FlowEdge* const newEdge = fgAddRefPred(bDestFalseTarget, bFixup); + bFixup->SetTargetEdge(newEdge); } } } diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index 02a4c22d0aade1..470cf35ebf1204 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -508,11 +508,12 @@ void BlockCountInstrumentor::RelocateProbes() if (criticalPreds.Height() > 0) { BasicBlock* const intermediary = - m_comp->fgNewBBbefore(BBJ_ALWAYS, block, /* extendRegion */ true, /* jumpDest */ block); + m_comp->fgNewBBbefore(BBJ_ALWAYS, block, /* extendRegion */ true); intermediary->SetFlags(BBF_IMPORTED | BBF_MARKED | BBF_NONE_QUIRK); intermediary->inheritWeight(block); FlowEdge* const newEdge = m_comp->fgAddRefPred(block, intermediary); newEdge->setLikelihood(1.0); + intermediary->SetTargetEdge(newEdge); SetModifiedFlow(); while (criticalPreds.Height() > 0) @@ -1680,11 +1681,12 @@ void EfficientEdgeCountInstrumentor::RelocateProbes() if (criticalPreds.Height() > 0) { BasicBlock* intermediary = - m_comp->fgNewBBbefore(BBJ_ALWAYS, block, /* extendRegion */ true, /* jumpDest */ block); + m_comp->fgNewBBbefore(BBJ_ALWAYS, block, /* extendRegion */ true); intermediary->SetFlags(BBF_IMPORTED | BBF_NONE_QUIRK); intermediary->inheritWeight(block); FlowEdge* const newEdge = m_comp->fgAddRefPred(block, intermediary); newEdge->setLikelihood(1.0); + intermediary->SetTargetEdge(newEdge); NewRelocatedProbe(intermediary, probe->source, probe->target, &leader); SetModifiedFlow(); diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 9bff7439d23e71..aa361908584050 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -2758,13 +2758,11 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) /* Allocate a new basic block */ - BasicBlock* newHead = BasicBlock::New(this, BBJ_ALWAYS); - newHead->SetFlags(BBF_INTERNAL | BBF_NONE_QUIRK); + BasicBlock* newHead = BasicBlock::New(this); newHead->inheritWeight(block); newHead->bbRefs = 0; fgInsertBBbefore(block, newHead); // insert the new block in the block list - assert(newHead->JumpsToNext()); fgExtendEHRegionBefore(block); // Update the EH table to make the prolog block the first block in the block's EH // block. @@ -2797,11 +2795,12 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) } } - assert(nullptr == fgGetPredForBlock(block, newHead)); + assert(fgGetPredForBlock(block, newHead) == nullptr); FlowEdge* const newEdge = fgAddRefPred(block, newHead); - newHead->SetTargetEdge(newEdge); + newHead->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); - assert(newHead->HasFlag(BBF_INTERNAL)); + assert(newHead->JumpsToNext()); + newHead->SetFlags(BBF_INTERNAL | BBF_NONE_QUIRK); } //------------------------------------------------------------------------ @@ -3375,7 +3374,7 @@ PhaseStatus Compiler::fgCreateThrowHelperBlocks() assert((add->acdKind == SCK_FAIL_FAST) || (bbThrowIndex(srcBlk) == add->acdData)); assert(add->acdKind != SCK_NONE); - BasicBlock* const newBlk = fgNewBBinRegion(jumpKinds[add->acdKind], srcBlk, /* jumpDest */ nullptr, + BasicBlock* const newBlk = fgNewBBinRegion(jumpKinds[add->acdKind], srcBlk, /* runRarely */ true, /* insertAtEnd */ true); // Update the descriptor diff --git a/src/coreclr/jit/helperexpansion.cpp b/src/coreclr/jit/helperexpansion.cpp index e91abebf320386..aa8ad8b2d0271d 100644 --- a/src/coreclr/jit/helperexpansion.cpp +++ b/src/coreclr/jit/helperexpansion.cpp @@ -320,7 +320,7 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm // Fallback basic block GenTree* fallbackValueDef = gtNewStoreLclVarNode(rtLookupLcl->GetLclNum(), call); BasicBlock* fallbackBb = - fgNewBBFromTreeAfter(BBJ_ALWAYS, nullcheckBb, fallbackValueDef, debugInfo, nullcheckBb->Next(), true); + fgNewBBFromTreeAfter(BBJ_ALWAYS, nullcheckBb, fallbackValueDef, debugInfo, true); assert(fallbackBb->JumpsToNext()); fallbackBb->SetFlags(BBF_NONE_QUIRK); @@ -330,7 +330,7 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm // Fast-path basic block GenTree* fastpathValueDef = gtNewStoreLclVarNode(rtLookupLcl->GetLclNum(), fastPathValueClone); - BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, nullcheckBb, fastpathValueDef, debugInfo, block); + BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, nullcheckBb, fastpathValueDef, debugInfo); // Set nullcheckBb's false jump target nullcheckBb->SetFalseTarget(fastPathBb); @@ -375,16 +375,24 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm GenTree* jtrue = gtNewOperNode(GT_JTRUE, TYP_VOID, sizeCheck); // sizeCheckBb fails - jump to fallbackBb - sizeCheckBb = fgNewBBFromTreeAfter(BBJ_COND, prevBb, jtrue, debugInfo, fallbackBb); - sizeCheckBb->SetFalseTarget(nullcheckBb); + sizeCheckBb = fgNewBBFromTreeAfter(BBJ_COND, prevBb, jtrue, debugInfo); } // // Update preds in all new blocks // fgRemoveRefPred(block, prevBb); - fgAddRefPred(block, fastPathBb); - fgAddRefPred(block, fallbackBb); + + { + FlowEdge* const newEdge = fgAddRefPred(block, fastPathBb); + fasthPathBb->SetTargetEdge(newEdge); + } + + { + FlowEdge* const newEdge = fgAddRefPred(block, fallbackBb); + fallbackBb->SetTargetEdge(newEdge); + } + assert(prevBb->KindIs(BBJ_ALWAYS)); if (needsSizeCheck) @@ -394,10 +402,15 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm prevBb->SetTargetEdge(newEdge); // sizeCheckBb flows into nullcheckBb in case if the size check passes - fgAddRefPred(nullcheckBb, sizeCheckBb); + { + FlowEdge* const trueEdge = fgAddRefPred(fallbackBb, sizeCheckBb); + FlowEdge* const falseEdge = fgAddRefPred(nullcheckBb, sizeCheckBb); + sizeCheckBb->SetTrueEdge(trueEdge); + sizeCheckBb->SetFalseEdge(falseEdge); + } + // fallbackBb is reachable from both nullcheckBb and sizeCheckBb fgAddRefPred(fallbackBb, nullcheckBb); - fgAddRefPred(fallbackBb, sizeCheckBb); // fastPathBb is only reachable from successful nullcheckBb fgAddRefPred(fastPathBb, nullcheckBb); } @@ -702,10 +715,10 @@ bool Compiler::fgExpandThreadLocalAccessForCallNativeAOT(BasicBlock** pBlock, St // fallbackBb GenTree* fallbackValueDef = gtNewStoreLclVarNode(finalLclNum, slowHelper); BasicBlock* fallbackBb = - fgNewBBFromTreeAfter(BBJ_ALWAYS, tlsRootNullCondBB, fallbackValueDef, debugInfo, block, true); + fgNewBBFromTreeAfter(BBJ_ALWAYS, tlsRootNullCondBB, fallbackValueDef, debugInfo, true); GenTree* fastPathValueDef = gtNewStoreLclVarNode(finalLclNum, gtCloneExpr(finalLcl)); - BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, fastPathValueDef, debugInfo, block, true); + BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, fastPathValueDef, debugInfo, true); *callUse = finalLcl; @@ -718,8 +731,15 @@ bool Compiler::fgExpandThreadLocalAccessForCallNativeAOT(BasicBlock** pBlock, St fgAddRefPred(fallbackBb, tlsRootNullCondBB); fgAddRefPred(fastPathBb, tlsRootNullCondBB); - fgAddRefPred(block, fallbackBb); - fgAddRefPred(block, fastPathBb); + { + FlowEdge* const newEdge = fgAddRefPred(block, fallbackBb); + fallbackBb->SetTargetEdge(newEdge); + } + + { + FlowEdge* const newEdge = fgAddRefPred(block, fastPathBb); + fastPathBb->SetTargetEdge(newEdge); + } tlsRootNullCondBB->SetTrueTarget(fastPathBb); tlsRootNullCondBB->SetFalseTarget(fallbackBb); @@ -1058,7 +1078,7 @@ bool Compiler::fgExpandThreadLocalAccessForCall(BasicBlock** pBlock, Statement* // fallbackBb GenTree* fallbackValueDef = gtNewStoreLclVarNode(threadStaticBlockLclNum, call); BasicBlock* fallbackBb = - fgNewBBFromTreeAfter(BBJ_ALWAYS, threadStaticBlockNullCondBB, fallbackValueDef, debugInfo, block, true); + fgNewBBFromTreeAfter(BBJ_ALWAYS, threadStaticBlockNullCondBB, fallbackValueDef, debugInfo, true); // fastPathBb if (isGCThreadStatic) @@ -1073,7 +1093,7 @@ bool Compiler::fgExpandThreadLocalAccessForCall(BasicBlock** pBlock, Statement* GenTree* fastPathValueDef = gtNewStoreLclVarNode(threadStaticBlockLclNum, gtCloneExpr(threadStaticBlockBaseLclValueUse)); - BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, fastPathValueDef, debugInfo, block, true); + BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, fastPathValueDef, debugInfo, true); // Set maxThreadStaticBlocksCondBB's jump targets maxThreadStaticBlocksCondBB->SetTrueTarget(fallbackBb); @@ -1097,8 +1117,15 @@ bool Compiler::fgExpandThreadLocalAccessForCall(BasicBlock** pBlock, Statement* fgAddRefPred(fastPathBb, threadStaticBlockNullCondBB); fgAddRefPred(fallbackBb, threadStaticBlockNullCondBB); - fgAddRefPred(block, fastPathBb); - fgAddRefPred(block, fallbackBb); + { + FlowEdge* const newEdge = fgAddRefPred(block, fastPathBb); + fasthPathBb->SetTargetEdge(newEdge); + } + + { + FlowEdge* const newEdge = fgAddRefPred(block, fallbackBb); + fallbackBb->SetTargetEdge(newEdge); + } // Inherit the weights block->inheritWeight(prevBb); @@ -1378,12 +1405,12 @@ bool Compiler::fgExpandStaticInitForCall(BasicBlock** pBlock, Statement* stmt, G GenTree* isInitedCmp = gtNewOperNode(GT_EQ, TYP_INT, isInitedActualValueNode, isInitedExpectedValue); isInitedCmp->gtFlags |= GTF_RELOP_JMP_USED; BasicBlock* isInitedBb = - fgNewBBFromTreeAfter(BBJ_COND, prevBb, gtNewOperNode(GT_JTRUE, TYP_VOID, isInitedCmp), debugInfo, block); + fgNewBBFromTreeAfter(BBJ_COND, prevBb, gtNewOperNode(GT_JTRUE, TYP_VOID, isInitedCmp), debugInfo); // Fallback basic block // TODO-CQ: for JIT we can replace the original call with CORINFO_HELP_INITCLASS // that only accepts a single argument - BasicBlock* helperCallBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, isInitedBb, call, debugInfo, isInitedBb->Next(), true); + BasicBlock* helperCallBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, isInitedBb, call, debugInfo, true); assert(helperCallBb->JumpsToNext()); helperCallBb->SetFlags(BBF_NONE_QUIRK); @@ -1448,8 +1475,15 @@ bool Compiler::fgExpandStaticInitForCall(BasicBlock** pBlock, Statement* stmt, G fgRemoveRefPred(prevBb->GetTargetEdge()); // Block has two preds now: either isInitedBb or helperCallBb - fgAddRefPred(block, isInitedBb); - fgAddRefPred(block, helperCallBb); + { + FlowEdge* const newEdge = fgAddRefPred(block, isInitedBb); + isInitedBb->SetTargetEdge(newEdge); + } + + { + FlowEdge* const newEdge = fgAddRefPred(block, helperCallBb); + helperCallBb->SetTargetEdge(newEdge); + } // prevBb always flows into isInitedBb assert(prevBb->KindIs(BBJ_ALWAYS)); @@ -1689,7 +1723,7 @@ bool Compiler::fgVNBasedIntrinsicExpansionForCall_ReadUtf8(BasicBlock** pBlock, // // Block 1: lengthCheckBb (we check that dstLen < srcLen) // - BasicBlock* lengthCheckBb = fgNewBBafter(BBJ_COND, prevBb, true, block); + BasicBlock* lengthCheckBb = fgNewBBafter(BBJ_COND, prevBb, true); lengthCheckBb->SetFlags(BBF_INTERNAL); // Set bytesWritten -1 by default, if the fast path is not taken we'll return it as the result. @@ -1711,7 +1745,7 @@ bool Compiler::fgVNBasedIntrinsicExpansionForCall_ReadUtf8(BasicBlock** pBlock, // In theory, we could just emit the const U8 data to the data section and use GT_BLK here // but that would be a bit less efficient since we would have to load the data from memory. // - BasicBlock* fastpathBb = fgNewBBafter(BBJ_ALWAYS, lengthCheckBb, true, lengthCheckBb->Next()); + BasicBlock* fastpathBb = fgNewBBafter(BBJ_ALWAYS, lengthCheckBb, true); assert(fastpathBb->JumpsToNext()); fastpathBb->SetFlags(BBF_INTERNAL | BBF_NONE_QUIRK); @@ -1770,8 +1804,12 @@ bool Compiler::fgVNBasedIntrinsicExpansionForCall_ReadUtf8(BasicBlock** pBlock, // prevBb flows into lengthCheckBb assert(prevBb->KindIs(BBJ_ALWAYS)); - FlowEdge* const newEdge = fgAddRefPred(lengthCheckBb, prevBb); - prevBb->SetTargetEdge(newEdge); + + { + FlowEdge* const newEdge = fgAddRefPred(lengthCheckBb, prevBb); + prevBb->SetTargetEdge(newEdge); + } + prevBb->SetFlags(BBF_NONE_QUIRK); assert(prevBb->JumpsToNext()); @@ -1779,8 +1817,12 @@ bool Compiler::fgVNBasedIntrinsicExpansionForCall_ReadUtf8(BasicBlock** pBlock, lengthCheckBb->SetFalseTarget(fastpathBb); fgAddRefPred(fastpathBb, lengthCheckBb); fgAddRefPred(block, lengthCheckBb); - // fastpathBb flows into block - fgAddRefPred(block, fastpathBb); + + { + // fastpathBb flows into block + FlowEdge* const newEdge = fgAddRefPred(block, fastpathBb); + fastPathBb->SetTargetEdge(newEdge); + } // // Re-distribute weights @@ -2349,7 +2391,7 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, GenTree* nullcheckOp = gtNewOperNode(GT_EQ, TYP_INT, tmpNode, gtNewNull()); nullcheckOp->gtFlags |= GTF_RELOP_JMP_USED; BasicBlock* nullcheckBb = fgNewBBFromTreeAfter(BBJ_COND, firstBb, gtNewOperNode(GT_JTRUE, TYP_VOID, nullcheckOp), - debugInfo, lastBb, true); + debugInfo, true); // The very first statement in the whole expansion is to assign obj to tmp. // We assume it's the value we're going to return in most cases. @@ -2389,7 +2431,7 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, GenTree* mtCheck = gtNewOperNode(GT_EQ, TYP_INT, gtNewMethodTableLookup(gtCloneExpr(tmpNode)), expectedClsNode); mtCheck->gtFlags |= GTF_RELOP_JMP_USED; GenTree* jtrue = gtNewOperNode(GT_JTRUE, TYP_VOID, mtCheck); - typeChecksBbs[candidateId] = fgNewBBFromTreeAfter(BBJ_COND, lastTypeCheckBb, jtrue, debugInfo, lastBb, true); + typeChecksBbs[candidateId] = fgNewBBFromTreeAfter(BBJ_COND, lastTypeCheckBb, jtrue, debugInfo, true); lastTypeCheckBb = typeChecksBbs[candidateId]; // Insert the CSE node as the first statement in the block @@ -2411,13 +2453,13 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, { // fallback call is used only to throw InvalidCastException call->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN; - fallbackBb = fgNewBBFromTreeAfter(BBJ_THROW, lastTypeCheckBb, call, debugInfo, nullptr, true); + fallbackBb = fgNewBBFromTreeAfter(BBJ_THROW, lastTypeCheckBb, call, debugInfo, true); } else if (typeCheckFailedAction == TypeCheckFailedAction::ReturnNull) { // if fallback call is not needed, we just assign null to tmp GenTree* fallbackTree = gtNewTempStore(tmpNum, gtNewNull()); - fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, lastTypeCheckBb, fallbackTree, debugInfo, lastBb, true); + fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, lastTypeCheckBb, fallbackTree, debugInfo, true); } else { @@ -2428,7 +2470,7 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, call->gtCallMethHnd = eeFindHelper(CORINFO_HELP_CHKCASTCLASS_SPECIAL); } GenTree* fallbackTree = gtNewTempStore(tmpNum, call); - fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, lastTypeCheckBb, fallbackTree, debugInfo, lastBb, true); + fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, lastTypeCheckBb, fallbackTree, debugInfo, true); } // Block 4: typeCheckSucceedBb @@ -2444,7 +2486,7 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, } BasicBlock* typeCheckSucceedBb = typeCheckNotNeeded ? nullptr - : fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, typeCheckSucceedTree, debugInfo, lastBb); + : fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, typeCheckSucceedTree, debugInfo); // // Wire up the blocks @@ -2488,13 +2530,15 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, else { fgAddRefPred(typeChecksBbs[0], nullcheckBb); - fgAddRefPred(lastBb, typeCheckSucceedBb); + FlowEdge* const newEdge = fgAddRefPred(lastBb, typeCheckSucceedBb); + typeCheckSucceedBb->SetTargetEdge(newEdge); } if (!fallbackBb->KindIs(BBJ_THROW)) { // if fallbackBb is BBJ_THROW then it has no successors - fgAddRefPred(lastBb, fallbackBb); + FlowEdge* const newEdge = fgAddRefPred(lastBb, fallbackBb); + fallbackBb->SetTargetEdge(newEdge); } // diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index b84a34eca7a8f9..bc4278215f8643 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -2020,13 +2020,14 @@ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_H { // Create extra basic block for the spill // - BasicBlock* newBlk = fgNewBBbefore(BBJ_ALWAYS, hndBlk, /* extendRegion */ true, /* jumpDest */ hndBlk); + BasicBlock* newBlk = fgNewBBbefore(BBJ_ALWAYS, hndBlk, /* extendRegion */ true); newBlk->SetFlags(BBF_IMPORTED | BBF_DONT_REMOVE | BBF_NONE_QUIRK); newBlk->inheritWeight(hndBlk); newBlk->bbCodeOffs = hndBlk->bbCodeOffs; FlowEdge* const newEdge = fgAddRefPred(hndBlk, newBlk); newEdge->setLikelihood(1.0); + newBlk->SetTargetEdge(newEdge); // Spill into a temp. unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg")); @@ -4531,7 +4532,7 @@ void Compiler::impImportLeave(BasicBlock* block) // Insert a new BB either in the try region indicated by tryIndex or // the handler region indicated by leaveTarget->bbHndIndex, // depending on which is the inner region. - BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step, leaveTarget); + BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step); finalStep->SetFlags(BBF_KEEP_BBJ_ALWAYS); // step's jump target shouldn't be set yet @@ -4573,6 +4574,7 @@ void Compiler::impImportLeave(BasicBlock* block) { FlowEdge* const newEdge = fgAddRefPred(leaveTarget, finalStep); newEdge->setLikelihood(1.0); + finalStep->SetTargetEdge(newEdge); } // Queue up the jump target for importing @@ -5070,10 +5072,11 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) // will be treated as pair and handled correctly. if (block->KindIs(BBJ_CALLFINALLY)) { - BasicBlock* dupBlock = BasicBlock::New(this, BBJ_CALLFINALLY, block->GetTarget()); + BasicBlock* dupBlock = BasicBlock::New(this); dupBlock->CopyFlags(block); - FlowEdge* const newEdge = fgAddRefPred(dupBlock->GetTarget(), dupBlock); + FlowEdge* const newEdge = fgAddRefPred(block->GetTarget(), dupBlock); newEdge->setLikelihood(1.0); + dupBlock->SetKindAndTargetEdge(BBJ_CALLFINALLY, newEdge); dupBlock->copyEHRegion(block); dupBlock->bbCatchTyp = block->bbCatchTyp; diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index f6d89dee1b4508..883590af49d3ff 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -222,9 +222,9 @@ class IndirectCallTransformer // // Return Value: // new basic block. - BasicBlock* CreateAndInsertBasicBlock(BBKinds jumpKind, BasicBlock* insertAfter, BasicBlock* jumpDest = nullptr) + BasicBlock* CreateAndInsertBasicBlock(BBKinds jumpKind, BasicBlock* insertAfter) { - BasicBlock* block = compiler->fgNewBBafter(jumpKind, insertAfter, true, jumpDest); + BasicBlock* block = compiler->fgNewBBafter(jumpKind, insertAfter, true); block->SetFlags(BBF_IMPORTED); return block; } @@ -376,7 +376,7 @@ class IndirectCallTransformer { assert(checkIdx == 0); - checkBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, currBlock, currBlock->Next()); + checkBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, currBlock); checkBlock->SetFlags(BBF_NONE_QUIRK); GenTree* fatPointerMask = new (compiler, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, FAT_POINTER_MASK); GenTree* fptrAddressCopy = compiler->gtCloneExpr(fptrAddress); @@ -395,7 +395,7 @@ class IndirectCallTransformer virtual void CreateThen(uint8_t checkIdx) { assert(remainderBlock != nullptr); - thenBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, checkBlock, remainderBlock); + thenBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, checkBlock); Statement* copyOfOriginalStmt = compiler->gtCloneStmt(stmt); compiler->fgInsertStmtAtEnd(thenBlock, copyOfOriginalStmt); } @@ -405,7 +405,7 @@ class IndirectCallTransformer // virtual void CreateElse() { - elseBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, thenBlock, thenBlock->Next()); + elseBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, thenBlock); elseBlock->SetFlags(BBF_NONE_QUIRK); GenTree* fixedFptrAddress = GetFixedFptrAddress(); @@ -1021,7 +1021,7 @@ class IndirectCallTransformer weight_t elseLikelihoodWt = max(1.0 - thenLikelihoodWt, 0.0); // thenBlock always jumps to remainderBlock - thenBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, checkBlock, remainderBlock); + thenBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, checkBlock); thenBlock->CopyFlags(currBlock, BBF_SPLIT_GAINED); thenBlock->inheritWeightPercentage(currBlock, thenLikelihood); @@ -1029,11 +1029,13 @@ class IndirectCallTransformer assert(checkBlock->KindIs(BBJ_ALWAYS)); FlowEdge* const thenEdge = compiler->fgAddRefPred(thenBlock, checkBlock); thenEdge->setLikelihood(thenLikelihoodWt); - FlowEdge* const elseEdge = compiler->fgAddRefPred(remainderBlock, thenBlock); - elseEdge->setLikelihood(elseLikelihoodWt); checkBlock->SetTargetEdge(thenEdge); checkBlock->SetFlags(BBF_NONE_QUIRK); assert(checkBlock->JumpsToNext()); + + FlowEdge* const elseEdge = compiler->fgAddRefPred(remainderBlock, thenBlock); + elseEdge->setLikelihood(elseLikelihoodWt); + thenBlock->SetTargetEdge(elseEdge); DevirtualizeCall(thenBlock, checkIdx); } @@ -1054,7 +1056,7 @@ class IndirectCallTransformer assert(elseLikelihood <= 100); weight_t elseLikelihoodDbl = ((weight_t)elseLikelihood) / 100.0; - elseBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, thenBlock, thenBlock->Next()); + elseBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, thenBlock); elseBlock->CopyFlags(currBlock, BBF_SPLIT_GAINED); elseBlock->SetFlags(BBF_NONE_QUIRK); @@ -1077,6 +1079,7 @@ class IndirectCallTransformer // elseBlock always flows into remainderBlock FlowEdge* const elseEdge = compiler->fgAddRefPred(remainderBlock, elseBlock); elseEdge->setLikelihood(1.0); + elseBlock->SetTargetEdge(elseEdge); // Remove everything related to inlining from the original call origCall->ClearInlineInfo(); diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index 354ecaae0012ff..3e122308ce7e8f 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -1984,10 +1984,11 @@ bool Compiler::fgNormalizeEHCase1() { // ...then we want to insert an empty, non-removable block outside the try to be the new first block of the // handler. - BasicBlock* newHndStart = BasicBlock::New(this, BBJ_ALWAYS, handlerStart); + BasicBlock* newHndStart = BasicBlock::New(this); fgInsertBBbefore(handlerStart, newHndStart); FlowEdge* newEdge = fgAddRefPred(handlerStart, newHndStart); newEdge->setLikelihood(1.0); + newHndStart->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); // Handler begins have an extra implicit ref count. // BasicBlock::New has already handled this for newHndStart. @@ -2154,11 +2155,12 @@ bool Compiler::fgNormalizeEHCase2() // We've got multiple 'try' blocks starting at the same place! // Add a new first 'try' block for 'ehOuter' that will be outside 'eh'. - BasicBlock* newTryStart = BasicBlock::New(this, BBJ_ALWAYS, insertBeforeBlk); + BasicBlock* newTryStart = BasicBlock::New(this); newTryStart->bbRefs = 0; fgInsertBBbefore(insertBeforeBlk, newTryStart); FlowEdge* const newEdge = fgAddRefPred(insertBeforeBlk, newTryStart); newEdge->setLikelihood(1.0); + newTryStart->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); // It's possible for a try to start at the beginning of a method. If so, we need // to adjust the implicit ref counts as we've just created a new first bb @@ -2346,7 +2348,7 @@ bool Compiler::fgCreateFiltersForGenericExceptions() // Create a new bb for the fake filter BasicBlock* handlerBb = eh->ebdHndBeg; - BasicBlock* filterBb = BasicBlock::New(this, BBJ_EHFILTERRET, handlerBb); + BasicBlock* filterBb = BasicBlock::New(this); // Now we need to spill CATCH_ARG (it should be the first thing evaluated) GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF); @@ -2376,6 +2378,7 @@ bool Compiler::fgCreateFiltersForGenericExceptions() fgInsertBBbefore(handlerBb, filterBb); FlowEdge* const newEdge = fgAddRefPred(handlerBb, filterBb); newEdge->setLikelihood(1.0); + filterBb->SetKindAndTargetEdge(BBJ_EHFILTERRET, newEdge); fgNewStmtAtEnd(filterBb, retFilt, handlerBb->firstStmt()->GetDebugInfo()); filterBb->bbCatchTyp = BBCT_FILTER; @@ -2632,7 +2635,7 @@ bool Compiler::fgNormalizeEHCase3() // Add a new last block for 'ehOuter' that will be outside the EH region with which it encloses and // shares a 'last' pointer - BasicBlock* newLast = BasicBlock::New(this, BBJ_ALWAYS, insertAfterBlk->Next()); + BasicBlock* newLast = BasicBlock::New(this); newLast->bbRefs = 0; assert(insertAfterBlk != nullptr); fgInsertBBafter(insertAfterBlk, newLast); @@ -2683,6 +2686,7 @@ bool Compiler::fgNormalizeEHCase3() newLast->SetFlags(BBF_INTERNAL | BBF_NONE_QUIRK); FlowEdge* const newEdge = fgAddRefPred(newLast, insertAfterBlk); newEdge->setLikelihood(1.0); + insertAfterBlk->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); // Move the insert pointer. More enclosing equivalent 'last' blocks will be inserted after this. insertAfterBlk = newLast; diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index 4d6f051b3b3e3b..41638141361c81 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -860,17 +860,18 @@ BasicBlock* LoopCloneContext::CondToStmtInBlock(Compiler* { for (unsigned i = 0; i < conds.Size(); ++i) { - BasicBlock* newBlk = comp->fgNewBBafter(BBJ_COND, insertAfter, /*extendRegion*/ true, slowPreheader); + BasicBlock* newBlk = comp->fgNewBBafter(BBJ_COND, insertAfter, /*extendRegion*/ true); newBlk->inheritWeight(insertAfter); - JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, newBlk->GetTrueTarget()->bbNum); - comp->fgAddRefPred(newBlk->GetTrueTarget(), newBlk); + JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, slowPreheader->bbNum); + FlowEdge* const trueEdge = comp->fgAddRefPred(slowPreheader, newBlk); + newBlk->SetTrueEdge(trueEdge); if (insertAfter->KindIs(BBJ_COND)) { JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", insertAfter->bbNum, newBlk->bbNum); - insertAfter->SetFalseTarget(newBlk); - comp->fgAddRefPred(newBlk, insertAfter); + FlowEdge* const falseEdge = comp->fgAddRefPred(newBlk, insertAfter); + insertAfter->SetFalseEdge(falseEdge); } JITDUMP("Adding conditions %u to " FMT_BB "\n", i, newBlk->bbNum); @@ -894,16 +895,18 @@ BasicBlock* LoopCloneContext::CondToStmtInBlock(Compiler* } else { - BasicBlock* newBlk = comp->fgNewBBafter(BBJ_COND, insertAfter, /*extendRegion*/ true, slowPreheader); + BasicBlock* newBlk = comp->fgNewBBafter(BBJ_COND, insertAfter, /*extendRegion*/ true); newBlk->inheritWeight(insertAfter); - JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, newBlk->GetTrueTarget()->bbNum); - comp->fgAddRefPred(newBlk->GetTrueTarget(), newBlk); + JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, slowPreheader->bbNum); + FlowEdge* const trueEdge = comp->fgAddRefPred(slowPreheader, newBlk); + newBlk->SetTrueEdge(trueEdge); - if (insertAfter->bbFallsThrough()) + if (insertAfter->KindIs(BBJ_COND)) { JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", insertAfter->bbNum, newBlk->bbNum); - comp->fgAddRefPred(newBlk, insertAfter); + FlowEdge* const falseEdge = comp->fgAddRefPred(newBlk, insertAfter); + insertAfter->SetFalseEdge(falseEdge); } JITDUMP("Adding conditions to " FMT_BB "\n", newBlk->bbNum); @@ -1960,11 +1963,11 @@ void Compiler::optCloneLoop(FlowGraphNaturalLoop* loop, LoopCloneContext* contex JITDUMP("Create new preheader block for fast loop\n"); BasicBlock* fastPreheader = - fgNewBBafter(BBJ_ALWAYS, preheader, /*extendRegion*/ true, /*jumpDest*/ loop->GetHeader()); + fgNewBBafter(BBJ_ALWAYS, preheader, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " after " FMT_BB "\n", fastPreheader->bbNum, preheader->bbNum); fastPreheader->bbWeight = fastPreheader->isRunRarely() ? BB_ZERO_WEIGHT : ambientWeight; - if (fastPreheader->JumpsToNext()) + if (fastPreheader->NextIs(loop->GetHeader())) { fastPreheader->SetFlags(BBF_NONE_QUIRK); } @@ -1972,7 +1975,10 @@ void Compiler::optCloneLoop(FlowGraphNaturalLoop* loop, LoopCloneContext* contex assert(preheader->KindIs(BBJ_ALWAYS)); assert(preheader->TargetIs(loop->GetHeader())); - fgReplacePred(loop->GetHeader(), preheader, fastPreheader); + FlowEdge* const oldEdge = preheader->GetTargetEdge(); + fgReplacePred(oldEdge, fastPreheader); + fastPreheader->SetTargetEdge(oldEdge); + JITDUMP("Replace " FMT_BB " -> " FMT_BB " with " FMT_BB " -> " FMT_BB "\n", preheader->bbNum, loop->GetHeader()->bbNum, fastPreheader->bbNum, loop->GetHeader()->bbNum); @@ -2062,8 +2068,8 @@ void Compiler::optCloneLoop(FlowGraphNaturalLoop* loop, LoopCloneContext* contex // And make sure we insert a pred link for the final fallthrough into the fast preheader. assert(condLast->NextIs(fastPreheader)); - condLast->SetFalseTarget(fastPreheader); - fgAddRefPred(fastPreheader, condLast); + FlowEdge* const falseEdge = fgAddRefPred(fastPreheader, condLast); + condLast->SetFalseEdge(falseEdge); } //------------------------------------------------------------------------- diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 1635496c9706ac..b31946c4e5f74b 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -1054,7 +1054,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // Remove the switch from the predecessor list of this case target's block. // We'll add the proper new predecessor edge later. - comp->fgRemoveRefPred(jumpTab[i]); + FlowEdge* const oldEdge = comp->fgRemoveRefPred(jumpTab[i]); if (targetBlock == followingBB) { @@ -1067,10 +1067,10 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // If we haven't used the afterDefaultCondBlock yet, then use that. if (fUsedAfterDefaultCondBlock) { - BasicBlock* newBlock = comp->fgNewBBafter(BBJ_ALWAYS, currentBlock, true, currentBlock->Next()); + BasicBlock* newBlock = comp->fgNewBBafter(BBJ_ALWAYS, currentBlock, true); newBlock->SetFlags(BBF_NONE_QUIRK); - currentBlock->SetFalseTarget(newBlock); - comp->fgAddRefPred(newBlock, currentBlock); // The fall-through predecessor. + FlowEdge* const falseEdge = comp->fgAddRefPred(newBlock, currentBlock); // The fall-through predecessor. + currentBlock->SetFalseEdge(falseEdge); currentBlock = newBlock; currentBBRange = &LIR::AsRange(currentBlock); } @@ -1081,7 +1081,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) } // Wire up the predecessor list for the "branch" case. - FlowEdge* const newEdge = comp->fgAddRefPred(targetBlock, currentBlock, jumpTab[i]); + FlowEdge* const newEdge = comp->fgAddRefPred(targetBlock, currentBlock, oldEdge); if (!fAnyTargetFollows && (i == jumpCnt - 2)) { @@ -1096,7 +1096,8 @@ GenTree* Lowering::LowerSwitch(GenTree* node) { // Otherwise, it's a conditional branch. Set the branch kind, then add the // condition statement. - currentBlock->SetCond(targetBlock, currentBlock->Next()); + FlowEdge* const edgeToNext = comp->fgAddRefPred(currentBlock->Next(), currentBlock); + currentBlock->SetCond(newEdge, edgeToNext); // Now, build the conditional statement for the current case that is // being evaluated: diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index eea95aa83fa007..d36bf14f999a43 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -14668,7 +14668,7 @@ bool Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) // gtReverseCond(condExpr); - thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true, remainderBlock); + thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true); thenBlock->SetFlags(propagateFlagsToAll); condBlock->SetCond(elseBlock, thenBlock); if (!block->HasFlag(BBF_INTERNAL)) @@ -14678,7 +14678,8 @@ bool Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) } fgAddRefPred(thenBlock, condBlock); - fgAddRefPred(remainderBlock, thenBlock); + FlowEdge* const newEdge = fgAddRefPred(remainderBlock, thenBlock); + thenBlock->SetTargetEdge(newEdge); thenBlock->inheritWeightPercentage(condBlock, qmark->ThenNodeLikelihood()); elseBlock->inheritWeightPercentage(condBlock, qmark->ElseNodeLikelihood()); diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 7959e949198486..5b3a699bd09e66 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -2145,7 +2145,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) bool foundCondTree = false; // Create a new block after `block` to put the copied condition code. - BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true, bJoin); + BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true); // Clone each statement in bTest and append to bNewCond. for (Statement* const stmt : bTest->Statements()) @@ -2204,9 +2204,10 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) // Update pred info // - bNewCond->SetFalseTarget(bTop); - fgAddRefPred(bJoin, bNewCond); - fgAddRefPred(bTop, bNewCond); + FlowEdge* const trueEdge = fgAddRefPred(bJoin, bNewCond); + FlowEdge* const falseEdge = fgAddRefPred(bTop, bNewCond); + bNewCond->SetTrueEdge(trueEdge); + bNewCond->SetFalseEdge(falseEdge); fgRemoveRefPred(block->GetTargetEdge()); FlowEdge* const newEdge = fgAddRefPred(bNewCond, block); @@ -2987,7 +2988,7 @@ bool Compiler::optCreatePreheader(FlowGraphNaturalLoop* loop) insertBefore = header; } - BasicBlock* preheader = fgNewBBbefore(BBJ_ALWAYS, insertBefore, false, header); + BasicBlock* preheader = fgNewBBbefore(BBJ_ALWAYS, insertBefore, false); preheader->SetFlags(BBF_INTERNAL); fgSetEHRegionForNewPreheaderOrExit(preheader); @@ -3000,7 +3001,8 @@ bool Compiler::optCreatePreheader(FlowGraphNaturalLoop* loop) JITDUMP("Created new preheader " FMT_BB " for " FMT_LP "\n", preheader->bbNum, loop->GetIndex()); - fgAddRefPred(header, preheader); + FlowEdge* const newEdge = fgAddRefPred(header, preheader); + preheader->SetTargetEdge(newEdge); for (FlowEdge* enterEdge : loop->EntryEdges()) { @@ -3103,26 +3105,27 @@ bool Compiler::optCanonicalizeExit(FlowGraphNaturalLoop* loop, BasicBlock* exit) BasicBlock* bottom = loop->GetLexicallyBottomMostBlock(); if (bottom->hasTryIndex() && (bottom->getTryIndex() == finallyBlock->getHndIndex()) && !bottom->hasHndIndex()) { - newExit = fgNewBBafter(BBJ_ALWAYS, bottom, true, exit); + newExit = fgNewBBafter(BBJ_ALWAYS, bottom, true); } else { // Otherwise just do the heavy-handed thing and insert it anywhere in the right region. - newExit = fgNewBBinRegion(BBJ_ALWAYS, finallyBlock->bbHndIndex, 0, nullptr, exit, /* putInFilter */ false, + newExit = fgNewBBinRegion(BBJ_ALWAYS, finallyBlock->bbHndIndex, 0, nullptr, /* putInFilter */ false, /* runRarely */ false, /* insertAtEnd */ true); } } else #endif { - newExit = fgNewBBbefore(BBJ_ALWAYS, exit, false, exit); + newExit = fgNewBBbefore(BBJ_ALWAYS, exit, false); newExit->SetFlags(BBF_NONE_QUIRK); fgSetEHRegionForNewPreheaderOrExit(newExit); } newExit->SetFlags(BBF_INTERNAL); - fgAddRefPred(exit, newExit); + FlowEdge* const newEdge = fgAddRefPred(exit, newExit); + newExit->SetTargetEdge(newEdge); newExit->bbCodeOffs = exit->bbCodeOffs; diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp index d1849a2b41be8a..f41b0b50979478 100644 --- a/src/coreclr/jit/patchpoint.cpp +++ b/src/coreclr/jit/patchpoint.cpp @@ -101,13 +101,12 @@ class PatchpointTransformer // Arguments: // jumpKind - jump kind for the new basic block // insertAfter - basic block, after which compiler has to insert the new one. - // jumpDest - jump target for the new basic block. Defaults to nullptr. // // Return Value: // new basic block. - BasicBlock* CreateAndInsertBasicBlock(BBKinds jumpKind, BasicBlock* insertAfter, BasicBlock* jumpDest = nullptr) + BasicBlock* CreateAndInsertBasicBlock(BBKinds jumpKind, BasicBlock* insertAfter) { - BasicBlock* block = compiler->fgNewBBafter(jumpKind, insertAfter, true, jumpDest); + BasicBlock* block = compiler->fgNewBBafter(jumpKind, insertAfter, true); block->SetFlags(BBF_IMPORTED); return block; } @@ -143,7 +142,7 @@ class PatchpointTransformer // Current block now becomes the test block BasicBlock* remainderBlock = compiler->fgSplitBlockAtBeginning(block); - BasicBlock* helperBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, block, block->Next()); + BasicBlock* helperBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, block); // Update flow and flags block->SetCond(remainderBlock, helperBlock); @@ -158,6 +157,7 @@ class PatchpointTransformer FlowEdge* const newEdge = compiler->fgAddRefPred(remainderBlock, helperBlock); newEdge->setLikelihood(1.0); + helperBlock->SetTargetEdge(newEdge); // Update weights remainderBlock->inheritWeight(block); diff --git a/src/coreclr/jit/switchrecognition.cpp b/src/coreclr/jit/switchrecognition.cpp index 1008b81194f8d5..1a523bcfbeec7a 100644 --- a/src/coreclr/jit/switchrecognition.cpp +++ b/src/coreclr/jit/switchrecognition.cpp @@ -363,16 +363,18 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* assert(lastBlock->FalseTargetIs(blockIfTrue)); fgRemoveRefPred(blockIfTrue, firstBlock); BasicBlock* targetBlock = blockIfTrue; - blockIfTrue = fgNewBBafter(BBJ_ALWAYS, firstBlock, true, targetBlock); + blockIfTrue = fgNewBBafter(BBJ_ALWAYS, firstBlock, true); FlowEdge* const newEdge = fgAddRefPred(targetBlock, blockIfTrue); skipPredRemoval = true; + blockIfTrue->SetTargetEdge(newEdge); } else { assert(lastBlock->FalseTargetIs(blockIfFalse)); BasicBlock* targetBlock = blockIfFalse; - blockIfFalse = fgNewBBafter(BBJ_ALWAYS, firstBlock, true, targetBlock); + blockIfFalse = fgNewBBafter(BBJ_ALWAYS, firstBlock, true); FlowEdge* const newEdge = fgAddRefPred(targetBlock, blockIfFalse); + blockIfFalse->SetTargetEdge(newEdge); } } From 0ae2c0a8a86f77a499813bff7a83e17f4d7f1f41 Mon Sep 17 00:00:00 2001 From: "Aman Khalid (from Dev Box)" Date: Fri, 23 Feb 2024 18:17:29 -0500 Subject: [PATCH 05/16] Add SetTrueEdge, SetFalseEdge --- src/coreclr/jit/block.h | 31 +++++--- src/coreclr/jit/fgbasic.cpp | 30 ++++---- src/coreclr/jit/fgehopt.cpp | 7 +- src/coreclr/jit/fgopt.cpp | 36 ++++----- src/coreclr/jit/helperexpansion.cpp | 115 +++++++++++++++------------- src/coreclr/jit/optimizebools.cpp | 43 ++++++----- 6 files changed, 143 insertions(+), 119 deletions(-) diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index fb3c6d9c5a661f..7b4fd8118aee1c 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -527,12 +527,12 @@ struct BasicBlock : private LIR::Range union { unsigned bbTargetOffs; // PC offset (temporary only) FlowEdge* bbTargetEdge; // successor edge for block kinds with only one successor (BBJ_ALWAYS, etc) - BasicBlock* bbTrueTarget; // BBJ_COND jump target when its condition is true (alias for bbTarget) + FlowEdge* bbTrueEdge; // BBJ_COND successor edge when its condition is true (alias for bbTargetEdge) BBswtDesc* bbSwtTargets; // switch descriptor BBehfDesc* bbEhfTargets; // BBJ_EHFINALLYRET descriptor }; - // Points to the successor of a BBJ_COND block if bbTrueTarget is not taken + // Successor edge of a BBJ_COND block if bbTrueEdge is not taken BasicBlock* bbFalseTarget; public: @@ -654,17 +654,26 @@ struct BasicBlock : private LIR::Range } BasicBlock* GetTrueTarget() const + { + return GetTrueEdge()->getDestinationBlock(); + } + + FlowEdge* GetTrueEdge() const { assert(KindIs(BBJ_COND)); - assert(bbTrueTarget != nullptr); - return bbTrueTarget; + assert(bbTrueEdge != nullptr); + assert(bbTrueEdge->getSourceBlock() == this); + assert(bbTrueEdge->getDestinationBlock() != nullptr); + return bbTrueEdge; } - void SetTrueTarget(BasicBlock* target) + void SetTrueEdge(FlowEdge* trueEdge) { assert(KindIs(BBJ_COND)); - assert(target != nullptr); - bbTrueTarget = target; + bbTrueEdge = trueEdge; + assert(bbTrueEdge != nullptr); + assert(bbTrueEdge->getSourceBlock() == this); + assert(bbTrueEdge->getDestinationBlock() != nullptr); } bool TrueTargetIs(const BasicBlock* target) const @@ -681,11 +690,13 @@ struct BasicBlock : private LIR::Range return bbFalseTarget; } - void SetFalseTarget(BasicBlock* target) + void SetFalseEdge(FlowEdge* falseEdge) { assert(KindIs(BBJ_COND)); - assert(target != nullptr); - bbFalseTarget = target; + bbFalseEdge = falseEdge; + assert(bbFalseEdge != nullptr); + assert(bbFalseEdge->getSourceBlock() == this); + assert(bbFalseEdge->getDestinationBlock() != nullptr); } bool FalseTargetIs(const BasicBlock* target) const diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index d89c113bde1f62..f53dbf07e02b90 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -207,8 +207,7 @@ bool Compiler::fgEnsureFirstBBisScratch() assert(fgFirstBBScratch == nullptr); - BasicBlock* block = BasicBlock::New(this, BBJ_ALWAYS); - block->SetFlags(BBF_NONE_QUIRK); + BasicBlock* block; if (fgFirstBB != nullptr) { @@ -226,14 +225,16 @@ bool Compiler::fgEnsureFirstBBisScratch() fgFirstBB->bbRefs--; // The new scratch bb will fall through to the old first bb + block = BasicBlock::New(this); FlowEdge* const edge = fgAddRefPred(fgFirstBB, block); edge->setLikelihood(1.0); - block->SetTargetEdge(edge); + block->SetKindAndTargetEdge(BBJ_ALWAYS, edge); fgInsertBBbefore(fgFirstBB, block); } else { noway_assert(fgLastBB == nullptr); + block = BasicBlock::New(this, BBJ_ALWAYS); fgFirstBB = block; fgLastBB = block; } @@ -241,7 +242,7 @@ bool Compiler::fgEnsureFirstBBisScratch() noway_assert(fgLastBB != nullptr); // Set the expected flags - block->SetFlags(BBF_INTERNAL | BBF_IMPORTED); + block->SetFlags(BBF_INTERNAL | BBF_IMPORTED | BBF_NONE_QUIRK); // This new first BB has an implicit ref, and no others. // @@ -2971,10 +2972,10 @@ void Compiler::fgLinkBasicBlocks() { BasicBlock* const trueTarget = fgLookupBB(curBBdesc->GetTargetOffs()); BasicBlock* const falseTarget = curBBdesc->Next(); - curBBdesc->SetTrueTarget(trueTarget); - curBBdesc->SetFalseTarget(falseTarget); - fgAddRefPred(trueTarget, curBBdesc); - fgAddRefPred(falseTarget, curBBdesc); + FlowEdge* const trueEdge = fgAddRefPred(trueTarget, curBBdesc); + FlowEdge* const falseEdge = fgAddRefPred(falseTarget, curBBdesc); + curBBdesc->SetTrueEdge(trueEdge); + curBBdesc->SetFalseEdge(falseEdge); if (trueTarget->bbNum <= curBBdesc->bbNum) { @@ -5455,15 +5456,17 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) { // Add a new block after bSrc which jumps to 'bDst' jmpBlk = fgNewBBafter(BBJ_ALWAYS, bSrc, true); - bSrc->SetFalseTarget(jmpBlk); - fgAddRefPred(jmpBlk, bSrc, fgGetPredForBlock(bDst, bSrc)); + FlowEdge* oldEdge = bSrc->GetFalseEdge(); + fgReplacePred(oldEdge, jmpBlk); + assert(jmpBlk->TargetIs(bDst)); + + FlowEdge* newEdge = fgAddRefPred(jmpBlk, bSrc, oldEdge); + bSrc->SetFalseEdge(newEdge); // When adding a new jmpBlk we will set the bbWeight and bbFlags // if (fgHaveValidEdgeWeights && fgHaveProfileWeights()) { - FlowEdge* const newEdge = fgGetPredForBlock(jmpBlk, bSrc); - jmpBlk->bbWeight = (newEdge->edgeWeightMin() + newEdge->edgeWeightMax()) / 2; if (bSrc->bbWeight == BB_ZERO_WEIGHT) { @@ -5501,9 +5504,6 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) } } - fgReplacePred(bDst, bSrc, jmpBlk); - - JITDUMP("Added an unconditional jump to " FMT_BB " after block " FMT_BB "\n", jmpBlk->GetTarget()->bbNum, bSrc->bbNum); } diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 35a1027b7b85a3..f2b73ff0e162a5 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -2104,16 +2104,17 @@ void Compiler::fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock, assert(predBlock->FalseTargetIs(nonCanonicalBlock)); BasicBlock* const newBlock = fgNewBBafter(BBJ_ALWAYS, predBlock, true); - predBlock->SetFalseTarget(newBlock); JITDUMP("*** " FMT_BB " now falling through to empty " FMT_BB " and then to " FMT_BB "\n", predBlock->bbNum, newBlock->bbNum, canonicalBlock->bbNum); // Remove the old flow - fgRemoveRefPred(nonCanonicalBlock, predBlock); + fgRemoveRefPred(predEdge); // Wire up the new flow - fgAddRefPred(newBlock, predBlock, predEdge); + FlowEdge* const falseEdge = fgAddRefPred(newBlock, predBlock, predEdge); + predBlock->SetFalseEdge(falseEdge); + assert(predBlock->FalseTargetIs(canonicalBlock)); FlowEdge* const newEdge = fgAddRefPred(canonicalBlock, newBlock, predEdge); newBlock->SetTargetEdge(newEdge); diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 45c1549f99ba00..90a8def1639097 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -1572,12 +1572,12 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc if (block->TrueTargetIs(bDest)) { assert(!block->FalseTargetIs(bDest)); - block->SetTrueTarget(bDest->GetTarget()); + block->SetTrueEdge(newEdge); } else { assert(block->FalseTargetIs(bDest)); - block->SetFalseTarget(bDest->GetTarget()); + block->SetFalseEdge(newEdge); } break; @@ -3517,11 +3517,11 @@ bool Compiler::fgReorderBlocks(bool useProfile) assert(test->OperIsConditionalJump()); test->AsOp()->gtOp1 = gtReverseCond(test->AsOp()->gtOp1); - BasicBlock* newFalseTarget = block->GetTrueTarget(); - BasicBlock* newTrueTarget = block->GetFalseTarget(); - block->SetTrueTarget(newTrueTarget); - block->SetFalseTarget(newFalseTarget); - assert(block->CanRemoveJumpToTarget(newFalseTarget, this)); + FlowEdge* const newFalseEdge = block->GetTrueEdge(); + FlowEdge* const newTrueEdge = block->GetFalseEdge(); + block->SetTrueEdge(newTrueEdge); + block->SetFalseEdge(newFalseEdge); + assert(block->CanRemoveJumpToTarget(block->GetFalseTarget(), this)); } else { @@ -4578,10 +4578,10 @@ bool Compiler::fgReorderBlocks(bool useProfile) noway_assert(condTest->gtOper == GT_JTRUE); condTest->AsOp()->gtOp1 = gtReverseCond(condTest->AsOp()->gtOp1); - BasicBlock* trueTarget = bPrev->GetTrueTarget(); - BasicBlock* falseTarget = bPrev->GetFalseTarget(); - bPrev->SetTrueTarget(falseTarget); - bPrev->SetFalseTarget(trueTarget); + FlowEdge* const trueEdge = bPrev->GetTrueEdge(); + FlowEdge* const falseEdge = bPrev->GetFalseEdge(); + bPrev->SetTrueEdge(falseEdge); + bPrev->SetFalseEdge(trueEdge); // may need to rethread // @@ -4985,11 +4985,12 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication /* = false */, bool isPh { BasicBlock* const bDestFalseTarget = bDest->GetFalseTarget(); BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true); - bDest->SetFalseTarget(bFixup); bFixup->inheritWeight(bDestFalseTarget); fgRemoveRefPred(bDestFalseTarget, bDest); - fgAddRefPred(bFixup, bDest); + FlowEdge* const falseEdge = fgAddRefPred(bFixup, bDest); + bDest->SetFalseEdge(falseEdge); + FlowEdge* const newEdge = fgAddRefPred(bDestFalseTarget, bFixup); bFixup->SetTargetEdge(newEdge); } @@ -5019,10 +5020,10 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication /* = false */, bool isPh } // Optimize the Conditional JUMP to go to the new target - block->SetTrueTarget(bNext->GetTarget()); - block->SetFalseTarget(bNext->Next()); - - fgAddRefPred(bNext->GetTarget(), block, fgRemoveRefPred(bNext->GetTarget(), bNext)); + fgRemoveRefPred(block->GetFalseEdge()); + block->SetFalseEdge(block->GetTrueEdge()); + FlowEdge* const newEdge = fgAddRefPred(bNext->GetTarget(), block, fgRemoveRefPred(bNext->GetTargetEdge())); + block->SetTrueEdge(newEdge); /* Unlink bNext from the BasicBlock list; note that we can @@ -5034,7 +5035,6 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication /* = false */, bool isPh to the final target by the time we're done here. */ - fgRemoveRefPred(bNext, block); fgUnlinkBlockForRemoval(bNext); /* Mark the block as removed */ diff --git a/src/coreclr/jit/helperexpansion.cpp b/src/coreclr/jit/helperexpansion.cpp index aa8ad8b2d0271d..8af6eab676409c 100644 --- a/src/coreclr/jit/helperexpansion.cpp +++ b/src/coreclr/jit/helperexpansion.cpp @@ -325,16 +325,10 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm assert(fallbackBb->JumpsToNext()); fallbackBb->SetFlags(BBF_NONE_QUIRK); - // Set nullcheckBb's true jump target - nullcheckBb->SetTrueTarget(fallbackBb); - // Fast-path basic block GenTree* fastpathValueDef = gtNewStoreLclVarNode(rtLookupLcl->GetLclNum(), fastPathValueClone); BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, nullcheckBb, fastpathValueDef, debugInfo); - // Set nullcheckBb's false jump target - nullcheckBb->SetFalseTarget(fastPathBb); - BasicBlock* sizeCheckBb = nullptr; if (needsSizeCheck) { @@ -410,9 +404,12 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm } // fallbackBb is reachable from both nullcheckBb and sizeCheckBb - fgAddRefPred(fallbackBb, nullcheckBb); + FlowEdge* const trueEdge = fgAddRefPred(fallbackBb, nullcheckBb); + nullcheckBb->SetTrueEdge(trueEdge); + // fastPathBb is only reachable from successful nullcheckBb - fgAddRefPred(fastPathBb, nullcheckBb); + FlowEdge* const falseEdge = fgAddRefPred(fastPathBb, nullcheckBb); + nullcheckBb->SetFalseEdge(falseEdge); } else { @@ -728,8 +725,10 @@ bool Compiler::fgExpandThreadLocalAccessForCallNativeAOT(BasicBlock** pBlock, St // // Update preds in all new blocks // - fgAddRefPred(fallbackBb, tlsRootNullCondBB); - fgAddRefPred(fastPathBb, tlsRootNullCondBB); + FlowEdge* const trueEdge = fgAddRefPred(fallbackBb, tlsRootNullCondBB); + FlowEdge* const trueEdge = fgAddRefPred(fastPathBb, tlsRootNullCondBB); + tlsRootNullCondBB->SetTrueEdge(trueEdge); + tlsRootNullCondBB->SetFalseEdge(falseEdge); { FlowEdge* const newEdge = fgAddRefPred(block, fallbackBb); @@ -741,9 +740,6 @@ bool Compiler::fgExpandThreadLocalAccessForCallNativeAOT(BasicBlock** pBlock, St fastPathBb->SetTargetEdge(newEdge); } - tlsRootNullCondBB->SetTrueTarget(fastPathBb); - tlsRootNullCondBB->SetFalseTarget(fallbackBb); - // Inherit the weights block->inheritWeight(prevBb); tlsRootNullCondBB->inheritWeight(prevBb); @@ -1095,27 +1091,30 @@ bool Compiler::fgExpandThreadLocalAccessForCall(BasicBlock** pBlock, Statement* gtNewStoreLclVarNode(threadStaticBlockLclNum, gtCloneExpr(threadStaticBlockBaseLclValueUse)); BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, fastPathValueDef, debugInfo, true); - // Set maxThreadStaticBlocksCondBB's jump targets - maxThreadStaticBlocksCondBB->SetTrueTarget(fallbackBb); - maxThreadStaticBlocksCondBB->SetFalseTarget(threadStaticBlockNullCondBB); - - // Set threadStaticBlockNullCondBB's jump targets - threadStaticBlockNullCondBB->SetTrueTarget(fastPathBb); - threadStaticBlockNullCondBB->SetFalseTarget(fallbackBb); - // // Update preds in all new blocks // assert(prevBb->KindIs(BBJ_ALWAYS)); fgRemoveRefPred(prevBb->GetTargetEdge()); - FlowEdge* const newEdge = fgAddRefPred(maxThreadStaticBlocksCondBB, prevBb); - prevBb->SetTargetEdge(newEdge); + + { + FlowEdge* const newEdge = fgAddRefPred(maxThreadStaticBlocksCondBB, prevBb); + prevBb->SetTargetEdge(newEdge); + } - fgAddRefPred(threadStaticBlockNullCondBB, maxThreadStaticBlocksCondBB); - fgAddRefPred(fallbackBb, maxThreadStaticBlocksCondBB); + { + FlowEdge* const trueEdge = fgAddRefPred(threadStaticBlockNullCondBB, maxThreadStaticBlocksCondBB); + FlowEdge* const falseEdge = fgAddRefPred(fallbackBb, maxThreadStaticBlocksCondBB); + maxThreadStaticBlocksCondBB->SetTrueEdge(trueEdge); + maxThreadStaticBlocksCondBB->SetFalseEdge(falseEdge); + } - fgAddRefPred(fastPathBb, threadStaticBlockNullCondBB); - fgAddRefPred(fallbackBb, threadStaticBlockNullCondBB); + { + FlowEdge* const trueEdge = fgAddRefPred(fastPathBb, threadStaticBlockNullCondBB); + FlowEdge* const falseEdge = fgAddRefPred(fallbackBb, threadStaticBlockNullCondBB); + threadStaticBlockNullCondBB->SetTrueEdge(trueEdge); + threadStaticBlockNullCondBB->SetFalseEdge(falseEdge); + } { FlowEdge* const newEdge = fgAddRefPred(block, fastPathBb); @@ -1493,8 +1492,8 @@ bool Compiler::fgExpandStaticInitForCall(BasicBlock** pBlock, Statement* stmt, G assert(prevBb->JumpsToNext()); // Both fastPathBb and helperCallBb have a single common pred - isInitedBb - isInitedBb->SetFalseTarget(helperCallBb); - fgAddRefPred(helperCallBb, isInitedBb); + FlowEdge* const falseEdge = fgAddRefPred(helperCallBb, isInitedBb); + isInitedBb->SetFalseEdge(falseEdge); // // Re-distribute weights @@ -1813,10 +1812,13 @@ bool Compiler::fgVNBasedIntrinsicExpansionForCall_ReadUtf8(BasicBlock** pBlock, prevBb->SetFlags(BBF_NONE_QUIRK); assert(prevBb->JumpsToNext()); - // lengthCheckBb has two successors: block and fastpathBb - lengthCheckBb->SetFalseTarget(fastpathBb); - fgAddRefPred(fastpathBb, lengthCheckBb); - fgAddRefPred(block, lengthCheckBb); + { + // lengthCheckBb has two successors: block and fastpathBb + FlowEdge* const trueEdge = fgAddRefPred(block, lengthCheckBb); + FlowEdge* const falseEdge = fgAddRefPred(fastpathBb, lengthCheckBb); + lengthCheckBb->SetTrueEdge(trueEdge); + lengthCheckBb->SetFalseEdge(falseEdge); + } { // fastpathBb flows into block @@ -2491,8 +2493,6 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, // // Wire up the blocks // - nullcheckBb->SetTrueTarget(lastBb); - nullcheckBb->SetFalseTarget(typeCheckNotNeeded ? fallbackBb : typeChecksBbs[0]); // Tricky case - wire up multiple type check blocks (in most cases there is only one) for (int candidateId = 0; candidateId < numOfCandidates; candidateId++) @@ -2500,47 +2500,50 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, BasicBlock* curTypeCheckBb = typeChecksBbs[candidateId]; // All type checks jump straight to the typeCheckSucceedBb on success - curTypeCheckBb->SetTrueTarget(typeCheckSucceedBb); - fgAddRefPred(typeCheckSucceedBb, curTypeCheckBb); + FlowEdge* const trueEdge = fgAddRefPred(typeCheckSucceedBb, curTypeCheckBb); + curTypeCheckBb->SetTrueEdge(trueEdge); // or ... if (candidateId == numOfCandidates - 1) { // ... jump to the fallbackBb on last type check's failure - curTypeCheckBb->SetFalseTarget(fallbackBb); - fgAddRefPred(fallbackBb, curTypeCheckBb); + FlowEdge* const falseEdge = fgAddRefPred(fallbackBb, curTypeCheckBb); + curTypeCheckBb->SetFalseEdge(falseEdge); } else { // ... jump to the next type check on failure - curTypeCheckBb->SetFalseTarget(typeChecksBbs[candidateId + 1]); - fgAddRefPred(typeChecksBbs[candidateId + 1], curTypeCheckBb); + FlowEdge* const falseEdge = fgAddRefPred(typeChecksBbs[candidateId + 1], curTypeCheckBb); + curTypeCheckBb->SetFalseEdge(falseEdge); } } fgRemoveRefPred(firstBb->GetTargetEdge()); - FlowEdge* const newEdge = fgAddRefPred(nullcheckBb, firstBb); - firstBb->SetTargetEdge(newEdge); + + { + FlowEdge* const newEdge = fgAddRefPred(nullcheckBb, firstBb); + firstBb->SetTargetEdge(newEdge); + } + + { + FlowEdge* const trueEdge = fgAddRefPred(lastBb, nullcheckBb); + nullcheckBb->SetTrueEdge(trueEdge); + } - fgAddRefPred(lastBb, nullcheckBb); if (typeCheckNotNeeded) { - fgAddRefPred(fallbackBb, nullcheckBb); + FlowEdge* const falseEdge = fgAddRefPred(fallbackBb, nullcheckBb); + nullcheckBb->SetFalseEdge(falseEdge); } else { - fgAddRefPred(typeChecksBbs[0], nullcheckBb); + FlowEdge* const falseEdge = fgAddRefPred(typeChecksBbs[0], nullcheckBb); + nullcheckBb->SetFalseEdge(falseEdge); + FlowEdge* const newEdge = fgAddRefPred(lastBb, typeCheckSucceedBb); typeCheckSucceedBb->SetTargetEdge(newEdge); } - if (!fallbackBb->KindIs(BBJ_THROW)) - { - // if fallbackBb is BBJ_THROW then it has no successors - FlowEdge* const newEdge = fgAddRefPred(lastBb, fallbackBb); - fallbackBb->SetTargetEdge(newEdge); - } - // // Re-distribute weights // @@ -2570,12 +2573,18 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, } else { + assert(fallbackBb->KindIs(BBJ_ALWAYS)); + FlowEdge* const newEdge = fgAddRefPred(lastBb, fallbackBb); + fallbackBb->SetTargetEdge(newEdge); + fallbackBb->inheritWeightPercentage(lastTypeCheckBb, 100 - totalLikelihood); } + if (!typeCheckNotNeeded) { typeCheckSucceedBb->inheritWeightPercentage(typeChecksBbs[0], totalLikelihood); } + lastBb->inheritWeight(firstBb); // diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index bb95bfb0546940..79ef602d69626b 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -807,22 +807,28 @@ bool OptBoolsDsc::optOptimizeRangeTests() return false; } + // Remove the 2nd condition block as we no longer need it + m_comp->fgRemoveRefPred(m_b1->GetFalseEdge()); + m_comp->fgRemoveBlock(m_b2, true); + // Re-direct firstBlock to jump to inRangeBb - m_comp->fgAddRefPred(inRangeBb, m_b1); + FlowEdge* const newEdge = m_comp->fgAddRefPred(inRangeBb, m_b1); + if (!cmp2IsReversed) { - m_b1->SetTrueTarget(inRangeBb); - m_b1->SetFalseTarget(notInRangeBb); + m_b1->SetFalseEdge(m_b1->GetTrueEdge()); + m_b1->SetTrueEdge(newEdge); + assert(m_b1->TrueTargetIs(inRangeBb)); + assert(m_b1->FalseTargetIs(notInRangeBb)); } else { - m_b1->SetFalseTarget(inRangeBb); + m_b1->SetFalseEdge(newEdge); + m_b1->SetTrueEdge(m_b1->GetFalseEdge()); + assert(m_b1->TrueTargetIs(notInRangeBb)); + assert(m_b1->FalseTargetIs(inRangeBb)); } - // Remove the 2nd condition block as we no longer need it - m_comp->fgRemoveRefPred(m_b2, m_b1); - m_comp->fgRemoveBlock(m_b2, true); - Statement* stmt = m_b1->lastStmt(); m_comp->gtSetStmtInfo(stmt); m_comp->fgSetStmtSeq(stmt); @@ -1266,22 +1272,19 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() { // Update edges if m_b1: BBJ_COND and m_b2: BBJ_COND - FlowEdge* edge1 = m_comp->fgGetPredForBlock(m_b1->GetTrueTarget(), m_b1); + FlowEdge* edge1 = m_b1->GetTrueEdge(); FlowEdge* edge2; if (m_sameTarget) { - edge2 = m_comp->fgGetPredForBlock(m_b2->GetTrueTarget(), m_b2); + edge2 = m_b2->GetTrueEdge(); } else { - edge2 = m_comp->fgGetPredForBlock(m_b2->GetFalseTarget(), m_b2); - - m_comp->fgRemoveRefPred(m_b1->GetTrueTarget(), m_b1); - - m_b1->SetTrueTarget(m_b2->GetTrueTarget()); - - m_comp->fgAddRefPred(m_b2->GetTrueTarget(), m_b1); + edge2 = m_b2->GetFalseEdge(); + m_comp->fgRemoveRefPred(m_b1->GetTrueEdge()); + FlowEdge* const newEdge = m_comp->fgAddRefPred(m_b2->GetTrueTarget(), m_b1); + m_b1->SetTrueEdge(newEdge); } assert(edge1 != nullptr); @@ -1324,9 +1327,9 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() // // Replace pred 'm_b2' for 'm_b2->bbFalseTarget' with 'm_b1' // Remove pred 'm_b2' for 'm_b2->bbTrueTarget' - m_comp->fgReplacePred(m_b2->GetFalseTarget(), m_b2, m_b1); - m_comp->fgRemoveRefPred(m_b2->GetTrueTarget(), m_b2); - m_b1->SetFalseTarget(m_b2->GetFalseTarget()); + m_comp->fgReplacePred(m_b2->GetFalseEdge(), m_b1); + m_comp->fgRemoveRefPred(m_b2->GetTrueEdge()); + m_b1->SetFalseEdge(m_b2->GetFalseEdge()); } // Get rid of the second block From 9cbded54f167c2252753acb51b2331f7f956a3fa Mon Sep 17 00:00:00 2001 From: "Aman Khalid (from Dev Box)" Date: Sat, 24 Feb 2024 16:23:42 -0500 Subject: [PATCH 06/16] Update SetCond --- src/coreclr/jit/block.cpp | 2 +- src/coreclr/jit/block.h | 11 +++--- src/coreclr/jit/fgopt.cpp | 43 ++++++++++----------- src/coreclr/jit/flowgraph.cpp | 6 +-- src/coreclr/jit/indirectcalltransformer.cpp | 6 +-- src/coreclr/jit/lower.cpp | 18 ++++----- src/coreclr/jit/morph.cpp | 18 ++++++--- src/coreclr/jit/optimizer.cpp | 6 +-- src/coreclr/jit/patchpoint.cpp | 3 +- 9 files changed, 59 insertions(+), 54 deletions(-) diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index af630d5c82e764..7327aeb9b8397f 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -858,7 +858,7 @@ void BasicBlock::TransferTarget(BasicBlock* from) from->bbEhfTargets = nullptr; // Make sure nobody uses the descriptor after this. break; case BBJ_COND: - SetCond(from->GetTrueTarget(), from->GetFalseTarget()); + SetCond(from->GetTrueEdge(), from->GetFalseEdge()); break; case BBJ_ALWAYS: SetKindAndTargetEdge(BBJ_ALWAYS, from->GetTargetEdge()); diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 7b4fd8118aee1c..6758c2f28f1ec4 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -706,12 +706,13 @@ struct BasicBlock : private LIR::Range return (bbFalseTarget == target); } - void SetCond(BasicBlock* trueTarget, BasicBlock* falseTarget) + void SetCond(FlowEdge* trueEdge, FlowEdge* falseEdge) { - assert(trueTarget != nullptr); - bbKind = BBJ_COND; - bbTrueTarget = trueTarget; - bbFalseTarget = falseTarget; + assert(trueEdge != nullptr); + assert(falseEdge != nullptr); + bbKind = BBJ_COND; + bbTrueEdge = trueEdge; + bbFalseEdge = falseEdge; } // Set both the block kind and target edge. This can clear `bbTargetEdge` when setting diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 90a8def1639097..3f75effcaac9b8 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -776,7 +776,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() fromBlock->SetFlags(BBF_INTERNAL); newBlock->RemoveFlags(BBF_DONT_REMOVE); addedBlocks++; - FlowEdge* const normalTryEntryEdge = fgGetPredForBlock(newBlock, fromBlock); + FlowEdge* const normalTryEntryEdge = fromBlock->GetFalseEdge(); GenTree* const entryStateLcl = gtNewLclvNode(entryStateVar, TYP_INT); GenTree* const compareEntryStateToZero = @@ -784,9 +784,9 @@ PhaseStatus Compiler::fgPostImportationCleanup() GenTree* const jumpIfEntryStateZero = gtNewOperNode(GT_JTRUE, TYP_VOID, compareEntryStateToZero); fgNewStmtAtBeg(fromBlock, jumpIfEntryStateZero); - fromBlock->SetCond(toBlock, newBlock); FlowEdge* const osrTryEntryEdge = fgAddRefPred(toBlock, fromBlock); newBlock->inheritWeight(fromBlock); + fromBlock->SetCond(osrTryEntryEdge, normalTryEntryEdge); // Not sure what the correct edge likelihoods are just yet; // for now we'll say the OSR path is the likely one. @@ -1295,16 +1295,16 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) break; case BBJ_COND: - block->SetCond(bNext->GetTrueTarget(), bNext->GetFalseTarget()); + /* Update the predecessor list for bNext's true target */ + fgReplacePred(bNext->GetTrueEdge(), block); - /* Update the predecessor list for 'bNext->bbTrueTarget' */ - fgReplacePred(bNext->GetTrueTarget(), bNext, block); - - /* Update the predecessor list for 'bNext->bbFalseTarget' if it is different than 'bNext->bbTrueTarget' */ - if (!bNext->TrueTargetIs(bNext->GetFalseTarget())) + /* Update the predecessor list for bNext's false target if it is different from the true target */ + if (!bNext->TrueEdgeIs(bNext->GetFalseEdge())) { - fgReplacePred(bNext->GetFalseTarget(), bNext, block); + fgReplacePred(bNext->GetFalseEdge(), block); } + + block->SetCond(bNext->GetTrueEdge(), bNext->GetFalseEdge()); break; case BBJ_EHFINALLYRET: @@ -2059,9 +2059,9 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) fgSetStmtSeq(switchStmt); } - BasicBlock* const trueTarget = block->GetSwitchTargets()->bbsDstTab[0]->getDestinationBlock(); - BasicBlock* const falseTarget = block->GetSwitchTargets()->bbsDstTab[1]->getDestinationBlock(); - block->SetCond(trueTarget, falseTarget); + FlowEdge* const trueEdge = block->GetSwitchTargets()->bbsDstTab[0]; + FlowEdge* const falseEdge = block->GetSwitchTargets()->bbsDstTab[1]; + block->SetCond(trueEdge, falseEdge); JITDUMP("After:\n"); DISPNODE(switchTree); @@ -2476,14 +2476,15 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // Fix up block's flow // - block->SetCond(target->GetTrueTarget(), next); - fgAddRefPred(block->GetTrueTarget(), block); fgRemoveRefPred(target, block); + + FlowEdge* const trueEdge = fgAddRefPred(target->GetTrueTarget(), block); + FlowEdge* const falseEdge = fgAddRefPred(next, block); + block->SetCond(trueEdge, falseEdge); // The new block 'next' will inherit its weight from 'block' // next->inheritWeight(block); - fgAddRefPred(next, block); FlowEdge* const newEdge = fgAddRefPred(target->GetFalseTarget(), next); next->SetTargetEdge(newEdge); @@ -2884,13 +2885,11 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) // We need to update the following flags of the bJump block if they were set in the bDest block bJump->CopyFlags(bDest, BBF_COPY_PROPAGATE); - bJump->SetCond(bDestNormalTarget, bJump->Next()); - /* Update bbRefs and bbPreds */ // bJump now falls through into the next block // - fgAddRefPred(bJump->GetFalseTarget(), bJump); + FlowEdge* const falseEdge = fgAddRefPred(bJump->Next(), bJump); // bJump no longer jumps to bDest // @@ -2898,7 +2897,9 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) // bJump now jumps to bDest's normal jump target // - fgAddRefPred(bDestNormalTarget, bJump); + FlowEdge* const trueEdge = fgAddRefPred(bDestNormalTarget, bJump); + + bJump->SetCond(trueEdge, falseEdge); if (weightJump > 0) { @@ -3044,11 +3045,9 @@ bool Compiler::fgOptimizeSwitchJumps() // Wire up the new control flow. // - block->SetCond(dominantTarget, newBlock); FlowEdge* const blockToTargetEdge = fgAddRefPred(dominantTarget, block); FlowEdge* const blockToNewBlockEdge = newBlock->bbPreds; - assert(blockToNewBlockEdge->getSourceBlock() == block); - assert(blockToTargetEdge->getSourceBlock() == block); + block->SetCond(blockToTargetEdge, blockToNewBlockEdge); // Update profile data // diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index aa361908584050..6c11def93d6e64 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -361,10 +361,10 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) } #endif - top->SetCond(bottom, poll); // Bottom has Top and Poll as its predecessors. Poll has just Top as a predecessor. - fgAddRefPred(bottom, top); - fgAddRefPred(poll, top); + FlowEdge* const trueEdge = fgAddRefPred(bottom, top); + FlowEdge* const falseEdge = fgAddRefPred(poll, top); + top->SetCond(trueEdge, falseEdge); FlowEdge* const newEdge = fgAddRefPred(bottom, poll); poll->SetTargetEdge(newEdge); diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index 883590af49d3ff..f8b58880c3a152 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -281,11 +281,11 @@ class IndirectCallTransformer // Todo: get likelihoods right // assert(checkBlock->KindIs(BBJ_ALWAYS)); - checkBlock->SetCond(elseBlock, thenBlock); FlowEdge* const thenEdge = compiler->fgAddRefPred(thenBlock, checkBlock); thenEdge->setLikelihood(0.5); FlowEdge* const elseEdge = compiler->fgAddRefPred(elseBlock, checkBlock); elseEdge->setLikelihood(0.5); + checkBlock->SetCond(elseEdge, thenEdge); // thenBlock assert(thenBlock->TargetIs(remainderBlock)); @@ -614,10 +614,10 @@ class IndirectCallTransformer weight_t checkLikelihoodWt = ((weight_t)checkLikelihood) / 100.0; // prevCheckBlock is expected to jump to this new check (if its type check doesn't succeed) - prevCheckBlock->SetCond(checkBlock, prevCheckBlock->Next()); FlowEdge* const checkEdge = compiler->fgAddRefPred(checkBlock, prevCheckBlock); checkEdge->setLikelihood(checkLikelihoodWt); checkBlock->inheritWeightPercentage(currBlock, checkLikelihood); + prevCheckBlock->SetCond(checkEdge, prevCheckBlock->GetFalseEdge()); } // Find last arg with a side effect. All args with any effect @@ -1064,9 +1064,9 @@ class IndirectCallTransformer // where we know the last check is always true (in case of "exact" GDV) if (!checkFallsThrough) { - checkBlock->SetCond(elseBlock, checkBlock->Next()); FlowEdge* const checkEdge = compiler->fgAddRefPred(elseBlock, checkBlock); checkEdge->setLikelihood(elseLikelihoodDbl); + checkBlock->SetCond(checkEdge, checkBlock->GetFalseEdge()); } else { diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index b31946c4e5f74b..8c7d22a85ea775 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -951,7 +951,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // originalSwitchBB is now a BBJ_ALWAYS, and there is a predecessor edge in afterDefaultCondBlock // representing the fall-through flow from originalSwitchBB. assert(originalSwitchBB->KindIs(BBJ_ALWAYS)); - assert(originalSwitchBB->NextIs(afterDefaultCondBlock)); + assert(originalSwitchBB->TargetEdgeIs(afterDefaultCondBlock)); assert(afterDefaultCondBlock->KindIs(BBJ_SWITCH)); assert(afterDefaultCondBlock->GetSwitchTargets()->bbsHasDefault); assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet. @@ -962,10 +962,10 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // as a predecessor, but the fgSplitBlockAfterStatement() moved all predecessors to point // to afterDefaultCondBlock. comp->fgRemoveRefPred(jumpTab[jumpCnt - 1]); - comp->fgAddRefPred(defaultBB, originalSwitchBB, jumpTab[jumpCnt - 1]); + FlowEdge* const trueEdge = comp->fgAddRefPred(defaultBB, originalSwitchBB, jumpTab[jumpCnt - 1]); // Turn originalSwitchBB into a BBJ_COND. - originalSwitchBB->SetCond(defaultBB, afterDefaultCondBlock); + originalSwitchBB->SetCond(trueEdge, originalSwitchBB->GetTargetEdge()); bool useJumpSequence = jumpCnt < minSwitchTabJumpCnt; @@ -1309,11 +1309,15 @@ bool Lowering::TryLowerSwitchToBitTest( comp->fgRemoveAllRefPreds(bbCase1, bbSwitch); comp->fgRemoveAllRefPreds(bbCase0, bbSwitch); + // TODO: Use old edges to influence new edge likelihoods? + FlowEdge* const case0Edge = comp->fgAddRefPred(bbCase0, bbSwitch); + FlowEdge* const case1Edge = comp->fgAddRefPred(bbCase1, bbSwitch); + if (bbSwitch->NextIs(bbCase0)) { // GenCondition::C generates JC so we jump to bbCase1 when the bit is set bbSwitchCondition = GenCondition::C; - bbSwitch->SetCond(bbCase1, bbCase0); + bbSwitch->SetCond(case1Edge, case0Edge); } else { @@ -1321,13 +1325,9 @@ bool Lowering::TryLowerSwitchToBitTest( // GenCondition::NC generates JNC so we jump to bbCase0 when the bit is not set bbSwitchCondition = GenCondition::NC; - bbSwitch->SetCond(bbCase0, bbCase1); + bbSwitch->SetCond(case0Edge, case1Edge); } - // TODO: Use old edges to influence new edge likelihoods? - comp->fgAddRefPred(bbCase0, bbSwitch); - comp->fgAddRefPred(bbCase1, bbSwitch); - var_types bitTableType = (bitCount <= (genTypeSize(TYP_INT) * 8)) ? TYP_INT : TYP_LONG; GenTree* bitTableIcon = comp->gtNewIconNode(bitTable, bitTableType); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index d36bf14f999a43..54f61fe64fc55f 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -14670,16 +14670,18 @@ bool Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true); thenBlock->SetFlags(propagateFlagsToAll); - condBlock->SetCond(elseBlock, thenBlock); if (!block->HasFlag(BBF_INTERNAL)) { thenBlock->RemoveFlags(BBF_INTERNAL); thenBlock->SetFlags(BBF_IMPORTED); } - fgAddRefPred(thenBlock, condBlock); FlowEdge* const newEdge = fgAddRefPred(remainderBlock, thenBlock); thenBlock->SetTargetEdge(newEdge); + + assert(condBlock->TargetIs(elseBlock)); + FlowEdge* const falseEdge = fgAddRefPred(thenBlock, condBlock); + condBlock->SetCond(condBlock->GetTargetEdge(), falseEdge); thenBlock->inheritWeightPercentage(condBlock, qmark->ThenNodeLikelihood()); elseBlock->inheritWeightPercentage(condBlock, qmark->ElseNodeLikelihood()); @@ -14693,8 +14695,11 @@ bool Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) // bbj_cond(true) // gtReverseCond(condExpr); - condBlock->SetCond(remainderBlock, elseBlock); - fgAddRefPred(remainderBlock, condBlock); + + assert(condBlock->TargetIs(elseBlock)); + FlowEdge* const trueEdge = fgAddRefPred(remainderBlock, condBlock); + condBlock->SetCond(trueEdge, condBlock->GetTargetEdge()); + // Since we have no false expr, use the one we'd already created. thenBlock = elseBlock; elseBlock = nullptr; @@ -14709,8 +14714,9 @@ bool Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) // +-->------------+ // bbj_cond(true) // - condBlock->SetCond(remainderBlock, elseBlock); - fgAddRefPred(remainderBlock, condBlock); + assert(condBlock->TargetIs(elseBlock)); + FlowEdge* const trueEdge = fgAddRefPred(remainderBlock, condBlock); + condBlock->SetCond(trueEdge, condBlock->GetTargetEdge()); elseBlock->inheritWeightPercentage(condBlock, qmark->ElseNodeLikelihood()); } diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 5b3a699bd09e66..90d9eae7e2502f 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -636,9 +636,9 @@ void Compiler::optSetMappedBlockTargets(BasicBlock* blk, BasicBlock* newBlk, Blo falseTarget = blk->GetFalseTarget(); } - fgAddRefPred(trueTarget, newBlk); - fgAddRefPred(falseTarget, newBlk); - newBlk->SetCond(trueTarget, falseTarget); + FlowEdge* const trueEdge = fgAddRefPred(trueTarget, newBlk); + FlowEdge* const falseEdge = fgAddRefPred(falseTarget, newBlk); + newBlk->SetCond(trueEdge, falseEdge); break; } diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp index f41b0b50979478..a22534c9fe2941 100644 --- a/src/coreclr/jit/patchpoint.cpp +++ b/src/coreclr/jit/patchpoint.cpp @@ -145,15 +145,14 @@ class PatchpointTransformer BasicBlock* helperBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, block); // Update flow and flags - block->SetCond(remainderBlock, helperBlock); block->SetFlags(BBF_INTERNAL); - helperBlock->SetFlags(BBF_BACKWARD_JUMP | BBF_NONE_QUIRK); FlowEdge* const falseEdge = compiler->fgAddRefPred(helperBlock, block); FlowEdge* const trueEdge = compiler->fgGetPredForBlock(remainderBlock, block); trueEdge->setLikelihood(HIGH_PROBABILITY / 100.0); falseEdge->setLikelihood((100 - HIGH_PROBABILITY) / 100.0); + block->SetCond(trueEdge, falseEdge); FlowEdge* const newEdge = compiler->fgAddRefPred(remainderBlock, helperBlock); newEdge->setLikelihood(1.0); From 6e807aea7623d621d49918964b672fd3f4e6d919 Mon Sep 17 00:00:00 2001 From: "Aman Khalid (from Dev Box)" Date: Sat, 24 Feb 2024 16:47:36 -0500 Subject: [PATCH 07/16] Clean up bbTrueTarget/bbFalseTarget usage --- src/coreclr/jit/block.cpp | 18 +++++++++--------- src/coreclr/jit/clrjit.natvis | 2 +- src/coreclr/jit/codegenxarch.cpp | 2 +- src/coreclr/jit/compiler.hpp | 20 ++++++++++---------- src/coreclr/jit/fgbasic.cpp | 6 +++--- src/coreclr/jit/fgopt.cpp | 2 +- src/coreclr/jit/fgprofile.cpp | 2 +- src/coreclr/jit/fgprofilesynthesis.cpp | 2 +- src/coreclr/jit/flowgraph.cpp | 2 +- src/coreclr/jit/gentree.h | 2 +- src/coreclr/jit/importer.cpp | 8 ++++---- src/coreclr/jit/jiteh.cpp | 8 ++++---- src/coreclr/jit/optimizebools.cpp | 17 +++++++++-------- src/coreclr/jit/optimizer.cpp | 4 ++-- 14 files changed, 48 insertions(+), 47 deletions(-) diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 7327aeb9b8397f..355921da105b9e 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -715,7 +715,7 @@ void BasicBlock::dspKind() const break; case BBJ_COND: - printf(" -> %s,%s (cond)", dspBlockNum(bbTrueTarget), dspBlockNum(bbFalseTarget)); + printf(" -> %s,%s (cond)", dspBlockNum(GetTrueTarget()), dspBlockNum(GetFalseTarget())); break; case BBJ_SWITCH: @@ -1145,7 +1145,7 @@ unsigned BasicBlock::NumSucc() const return 1; case BBJ_COND: - if (bbTrueTarget == bbFalseTarget) + if (TrueEdgeIs(GetFalseEdge())) { return 1; } @@ -1204,13 +1204,13 @@ BasicBlock* BasicBlock::GetSucc(unsigned i) const case BBJ_COND: if (i == 0) { - return bbFalseTarget; + return GetFalseTarget(); } else { assert(i == 1); - assert(bbFalseTarget != bbTrueTarget); - return bbTrueTarget; + assert(!TrueEdgeIs(GetFalseEdge())); + return GetTrueTarget(); } case BBJ_EHFINALLYRET: @@ -1270,7 +1270,7 @@ unsigned BasicBlock::NumSucc(Compiler* comp) return 1; case BBJ_COND: - if (bbTrueTarget == bbFalseTarget) + if (TrueEdgeIs(GetFalseEdge())) { return 1; } @@ -1327,13 +1327,13 @@ BasicBlock* BasicBlock::GetSucc(unsigned i, Compiler* comp) case BBJ_COND: if (i == 0) { - return bbFalseTarget; + return GetFalseTarget(); } else { assert(i == 1); - assert(bbFalseTarget != bbTrueTarget); - return bbTrueTarget; + assert(!TrueEdgeIs(GetFalseEdge())); + return GetTrueTarget(); } case BBJ_SWITCH: diff --git a/src/coreclr/jit/clrjit.natvis b/src/coreclr/jit/clrjit.natvis index 95dd3dc305689b..f413fd316c3fa6 100644 --- a/src/coreclr/jit/clrjit.natvis +++ b/src/coreclr/jit/clrjit.natvis @@ -21,7 +21,7 @@ Documentation for VS debugger format specifiers: https://docs.microsoft.com/en-u - BB{bbNum,d}->BB{bbTarget->bbNum,d}; {bbKind,en} + BB{bbNum,d}->BB{bbTargetEdge->getDestinationBlock()->bbNum,d}; {bbKind,en} BB{bbNum,d}; {bbKind,en}; {bbSwtTargets->bbsCount} cases BB{bbNum,d}; {bbKind,en}; {bbEhfTargets->bbeCount} succs BB{bbNum,d}; {bbKind,en} diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 2aba181486435b..900612d392e43e 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -332,7 +332,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) #endif // !FEATURE_EH_FUNCLETS // The BBJ_CALLFINALLYRET is used because the BBJ_CALLFINALLY can't point to the - // jump target using bbTarget - that is already used to point + // jump target using bbTargetEdge - that is already used to point // to the finally block. So just skip past the BBJ_CALLFINALLYRET unless the // block is RETLESS. if (!block->HasFlag(BBF_RETLESS_CALL)) diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 67f5c59d93265e..99c8ad5076d346 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -664,27 +664,27 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) return VisitEHSuccs(comp, func); case BBJ_CALLFINALLY: - RETURN_ON_ABORT(func(bbTarget)); + RETURN_ON_ABORT(func(GetTarget())); return ::VisitEHSuccs(comp, this, func); case BBJ_CALLFINALLYRET: // These are "pseudo-blocks" and control never actually flows into them // (codegen directly jumps to its successor after finally calls). - return func(bbTarget); + return func(GetTarget()); case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: case BBJ_LEAVE: case BBJ_ALWAYS: - RETURN_ON_ABORT(func(bbTarget)); + RETURN_ON_ABORT(func(GetTarget())); return VisitEHSuccs(comp, func); case BBJ_COND: - RETURN_ON_ABORT(func(bbFalseTarget)); + RETURN_ON_ABORT(func(GetFalseTarget())); - if (bbTrueTarget != bbFalseTarget) + if (!TrueEdgeIs(GetFalseEdge())) { - RETURN_ON_ABORT(func(bbTrueTarget)); + RETURN_ON_ABORT(func(GetTrueTarget())); } return VisitEHSuccs(comp, func); @@ -744,14 +744,14 @@ BasicBlockVisit BasicBlock::VisitRegularSuccs(Compiler* comp, TFunc func) case BBJ_EHFILTERRET: case BBJ_LEAVE: case BBJ_ALWAYS: - return func(bbTarget); + return func(GetTarget()); case BBJ_COND: - RETURN_ON_ABORT(func(bbFalseTarget)); + RETURN_ON_ABORT(func(GetFalseTarget())); - if (bbTrueTarget != bbFalseTarget) + if (!TrueEdgeIs(GetFalseEdge())) { - RETURN_ON_ABORT(func(bbTrueTarget)); + RETURN_ON_ABORT(func(GetTrueTarget())); } return BasicBlockVisit::Continue; diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index f53dbf07e02b90..f0e35243610d95 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -3576,7 +3576,7 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F noway_assert(codeAddr == codeEndp); - /* Finally link up the bbTarget of the blocks together */ + /* Finally link up the targets of the blocks together */ fgLinkBasicBlocks(); @@ -4821,7 +4821,7 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) curr->RemoveFlags(BBF_HAS_JMP | BBF_RETLESS_CALL); // Transfer the kind and target. Do this after the code above, to avoid null-ing out the old targets used by the - // above code (and so newBlock->bbNext is valid, so SetCond() can initialize bbFalseTarget if newBlock is a + // above code (and so newBlock->bbNext is valid, so SetCond() can initialize the false target if newBlock is a // BBJ_COND). newBlock->TransferTarget(curr); @@ -5055,7 +5055,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) // an immediately following block of a BBJ_SWITCH (which has // no fall-through path). For this case, simply insert a new // fall-through block after 'curr'. - // TODO-NoFallThrough: Once bbFalseTarget can diverge from bbNext, this will be unnecessary for BBJ_COND + // TODO-NoFallThrough: Once false target can diverge from bbNext, this will be unnecessary for BBJ_COND newBlock = fgNewBBafter(BBJ_ALWAYS, curr, true /* extendRegion */); newBlock->SetFlags(BBF_NONE_QUIRK); assert(newBlock->JumpsToNext()); diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 3f75effcaac9b8..439cf35731135f 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -4821,7 +4821,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication /* = false */, bool isPh bDest = block->GetTrueTarget(); bNext = block->GetFalseTarget(); - // TODO-NoFallThrough: Adjust the above logic once bbFalseTarget can diverge from bbNext + // TODO-NoFallThrough: Adjust the above logic once false target can diverge from bbNext assert(block->NextIs(bNext)); } } diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index 470cf35ebf1204..b2bdeb87a8bfeb 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -4032,7 +4032,7 @@ void EfficientEdgeCountReconstructor::PropagateEdges(BasicBlock* block, BlockInf // // This can happen because bome BBJ_LEAVE blocks may have been missed during // our spanning tree walk since we don't know where all the finallies can return - // to just yet (specially, in WalkSpanningTree, we may not add the bbTarget of + // to just yet (specially, in WalkSpanningTree, we may not add the target of // a BBJ_LEAVE to the worklist). // // Worst case those missed blocks dominate other blocks so we can't limit diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index e315e33015e138..7683f3b8d07521 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -190,7 +190,7 @@ void ProfileSynthesis::AssignLikelihoodNext(BasicBlock* block) //------------------------------------------------------------------------ // AssignLikelihoodJump: update edge likelihood for a block that always -// transfers control to bbTarget +// transfers control to its target block // // Arguments; // block -- block in question diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 6c11def93d6e64..55769509d5237b 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -3438,7 +3438,7 @@ PhaseStatus Compiler::fgCreateThrowHelperBlocks() #endif // DEBUG // Mark the block as added by the compiler and not removable by future flow - // graph optimizations. Note that no bbTarget points to these blocks. + // graph optimizations. Note that no target block points to these blocks. // newBlk->SetFlags(BBF_IMPORTED | BBF_DONT_REMOVE); diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index ff99a9e7202af6..ce40951426b2b1 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -194,7 +194,7 @@ inline AssertionIndex GetAssertionIndex(unsigned index) class AssertionInfo { - // true if the assertion holds on the bbNext edge instead of the bbTarget edge (for GT_JTRUE nodes) + // true if the assertion holds on the false edge instead of the true edge (for GT_JTRUE nodes) unsigned short m_isNextEdgeAssertion : 1; // 1-based index of the assertion unsigned short m_assertionIndex : 15; diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index bc4278215f8643..39a37ad8582f9a 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -7348,7 +7348,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) fgRemoveRefPred(block->GetFalseTarget(), block); block->SetKind(BBJ_ALWAYS); - // TODO-NoFallThrough: Once bbFalseTarget can diverge from bbNext, it may not make sense to + // TODO-NoFallThrough: Once false target can diverge from bbNext, it may not make sense to // set BBF_NONE_QUIRK block->SetFlags(BBF_NONE_QUIRK); @@ -7425,13 +7425,13 @@ void Compiler::impImportBlockCode(BasicBlock* block) } else { - // TODO-NoFallThrough: Update once bbFalseTarget can diverge from bbNext + // TODO-NoFallThrough: Update once false target can diverge from bbNext assert(block->NextIs(block->GetFalseTarget())); JITDUMP("\nThe block jumps to the next " FMT_BB "\n", block->Next()->bbNum); fgRemoveRefPred(block->GetTrueEdge()); block->SetKindAndTargetEdge(BBJ_ALWAYS, block->GetFalseEdge()); - // TODO-NoFallThrough: Once bbFalseTarget can diverge from bbNext, it may not make sense + // TODO-NoFallThrough: Once false target can diverge from bbNext, it may not make sense // to set BBF_NONE_QUIRK block->SetFlags(BBF_NONE_QUIRK); } @@ -7610,7 +7610,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) fgRemoveRefPred(block->GetFalseTarget(), block); block->SetKind(BBJ_ALWAYS); - // TODO-NoFallThrough: Once bbFalseTarget can diverge from bbNext, it may not make sense to + // TODO-NoFallThrough: Once false target can diverge from bbNext, it may not make sense to // set BBF_NONE_QUIRK block->SetFlags(BBF_NONE_QUIRK); diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index 3e122308ce7e8f..d2223f46150a06 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -4329,8 +4329,8 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) #endif // FEATURE_EH_FUNCLETS // If this is a handler for a filter, the last block of the filter will end with - // a BBJ_EHFILTERRET block that has a bbTarget that jumps to the first block of - // its handler. So we need to update it to keep things in sync. + // a BBJ_EHFILTERRET block that jumps to the first block of its handler. + // So we need to update it to keep things in sync. // if (HBtab->HasFilter()) { @@ -4341,11 +4341,11 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) #ifdef DEBUG if (verbose) { - printf("EH#%u: Updating bbTarget for filter ret block: " FMT_BB " => " FMT_BB "\n", + printf("EH#%u: Updating target for filter ret block: " FMT_BB " => " FMT_BB "\n", ehGetIndex(HBtab), bFilterLast->bbNum, bPrev->bbNum); } #endif // DEBUG - // Change the bbTarget for bFilterLast from the old first 'block' to the new first 'bPrev' + // Change the target for bFilterLast from the old first 'block' to the new first 'bPrev' fgRemoveRefPred(bFilterLast->GetTargetEdge()); FlowEdge* const newEdge = fgAddRefPred(bPrev, bFilterLast); newEdge->setLikelihood(1.0); diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index 79ef602d69626b..44a3db6e840b59 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -89,7 +89,7 @@ class OptBoolsDsc // Notes: // m_b1 and m_b2 are set on entry. // -// Case 1: if b1.bbTarget == b2.bbTarget, it transforms +// Case 1: if b1->TargetIs(b2->GetTarget()), it transforms // B1 : brtrue(t1, Bx) // B2 : brtrue(t2, Bx) // B3 : @@ -107,7 +107,7 @@ class OptBoolsDsc // B3: GT_RETURN (BBJ_RETURN) // B4: GT_RETURN (BBJ_RETURN) // -// Case 2: if B2->FalseTargetIs(B1.bbTarget), it transforms +// Case 2: if B2->FalseTargetIs(B1->GetTarget()), it transforms // B1 : brtrue(t1, B3) // B2 : brtrue(t2, Bx) // B3 : @@ -123,7 +123,7 @@ bool OptBoolsDsc::optOptimizeBoolsCondBlock() m_t3 = nullptr; - // Check if m_b1 and m_b2 have the same bbTarget + // Check if m_b1 and m_b2 have the same target if (m_b1->TrueTargetIs(m_b2->GetTrueTarget())) { @@ -1325,11 +1325,12 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() { // Update bbRefs and bbPreds // - // Replace pred 'm_b2' for 'm_b2->bbFalseTarget' with 'm_b1' - // Remove pred 'm_b2' for 'm_b2->bbTrueTarget' - m_comp->fgReplacePred(m_b2->GetFalseEdge(), m_b1); + // Replace pred 'm_b2' for m_b2's false target with 'm_b1' + // Remove pred 'm_b2' for m_b2's true target + FlowEdge* falseEdge = m_b2->GetFalseEdge(); + m_comp->fgReplacePred(falseEdge, m_b1); m_comp->fgRemoveRefPred(m_b2->GetTrueEdge()); - m_b1->SetFalseEdge(m_b2->GetFalseEdge()); + m_b1->SetFalseEdge(falseEdge); } // Get rid of the second block @@ -1364,7 +1365,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() // Notes: // m_b1, m_b2 and m_b3 of OptBoolsDsc are set on entry. // -// if B1.bbTarget == b3, it transforms +// if B1->TargetIs(b3), it transforms // B1 : brtrue(t1, B3) // B2 : ret(t2) // B3 : ret(0) diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 90d9eae7e2502f..6ce307a7f0413b 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -1933,7 +1933,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) return false; } - // Since bTest is a BBJ_COND it will have a bbFalseTarget + // Since bTest is a BBJ_COND it will have a false target // BasicBlock* const bJoin = bTest->GetFalseTarget(); noway_assert(bJoin != nullptr); @@ -1955,7 +1955,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) } // It has to be a forward jump. Defer this check until after all the cheap checks - // are done, since it iterates forward in the block list looking for bbTarget. + // are done, since it iterates forward in the block list looking for block's target. // TODO-CQ: Check if we can also optimize the backwards jump as well. // if (!fgIsForwardBranch(block, block->GetTarget())) From 141f263223fd0f71b909c301bf56fc939fbbe7fa Mon Sep 17 00:00:00 2001 From: "Aman Khalid (from Dev Box)" Date: Sat, 24 Feb 2024 16:53:40 -0500 Subject: [PATCH 08/16] Clean up TrueEdgeIs/GetFalseEdge usage --- src/coreclr/jit/block.h | 2 +- src/coreclr/jit/fgbasic.cpp | 2 +- src/coreclr/jit/fgdiagnostic.cpp | 2 +- src/coreclr/jit/fgflow.cpp | 2 +- src/coreclr/jit/fgopt.cpp | 6 +++--- src/coreclr/jit/importer.cpp | 4 ++-- src/coreclr/jit/valuenum.cpp | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 6758c2f28f1ec4..ddf33835192b2b 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -1969,7 +1969,7 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) // If both fall-through and branch successors are identical, then only include // them once in the iteration (this is the same behavior as NumSucc()/GetSucc()). - if (block->TrueTargetIs(block->GetFalseTarget())) + if (block->TrueEdgeIs(block->GetFalseEdge())) { m_end = &m_succs[1]; } diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index f0e35243610d95..170729ab8db634 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -5379,7 +5379,7 @@ BasicBlock* Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) assert(!bPrev->FalseTargetIs(block)); /* Check if both sides of the BBJ_COND now jump to the same block */ - if (bPrev->TrueTargetIs(bPrev->GetFalseTarget())) + if (bPrev->TrueEdgeIs(bPrev->GetFalseEdge())) { fgRemoveConditionalJump(bPrev); } diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index 6a2068169e1a54..4e545a649f990d 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -134,7 +134,7 @@ void Compiler::fgDebugCheckUpdate() // Check for an unnecessary jumps to the next block. // A conditional branch should never jump to the next block as it can be folded into a BBJ_ALWAYS. - if (block->KindIs(BBJ_COND) && block->TrueTargetIs(block->GetFalseTarget())) + if (block->KindIs(BBJ_COND) && block->TrueEdgeIs(block->GetFalseEdge())) { noway_assert(!"Unnecessary jump to the next block!"); } diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp index 4359449fde46fc..a5c5071ab19d9d 100644 --- a/src/coreclr/jit/fgflow.cpp +++ b/src/coreclr/jit/fgflow.cpp @@ -151,7 +151,7 @@ FlowEdge* Compiler::fgAddRefPred(BasicBlock* block, BasicBlock* blockPred, FlowE // their successor edge should never have a duplicate count over 1. // assert(blockPred->KindIs(BBJ_COND)); - assert(blockPred->TrueTargetIs(blockPred->GetFalseTarget())); + assert(blockPred->TrueEdgeIs(blockPred->GetFalseEdge())); flow->setLikelihood(1.0); } else diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 439cf35731135f..ed1dc45503c14f 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -2505,7 +2505,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* void Compiler::fgRemoveConditionalJump(BasicBlock* block) { assert(block->KindIs(BBJ_COND)); - assert(block->TrueTargetIs(block->GetFalseTarget())); + assert(block->TrueEdgeIs(block->GetFalseEdge())); BasicBlock* target = block->GetTrueTarget(); @@ -2625,7 +2625,7 @@ void Compiler::fgRemoveConditionalJump(BasicBlock* block) * block are counted twice so we have to remove one of them */ noway_assert(target->countOfInEdges() > 1); - fgRemoveRefPred(target, block); + fgRemoveRefPred(block->GetTargetEdge()); } //------------------------------------------------------------- @@ -5841,7 +5841,7 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early) // ternaries in C#). // The logic below could be generalized to BBJ_SWITCH, but this currently // has almost no CQ benefit but does have a TP impact. - if (!block->KindIs(BBJ_COND) || block->TrueTargetIs(block->GetFalseTarget())) + if (!block->KindIs(BBJ_COND) || block->TrueEdgeIs(block->GetFalseEdge())) { return false; } diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 39a37ad8582f9a..a9a1e82522edc8 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -7341,7 +7341,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // We may have already modified `block`'s jump kind, if this is a re-importation. // bool jumpToNextOptimization = false; - if (block->KindIs(BBJ_COND) && block->TrueTargetIs(block->GetFalseTarget())) + if (block->KindIs(BBJ_COND) && block->TrueEdgeIs(block->GetFalseEdge())) { JITDUMP(FMT_BB " always branches to " FMT_BB ", changing to BBJ_ALWAYS\n", block->bbNum, block->GetFalseTarget()->bbNum); @@ -7603,7 +7603,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // We may have already modified `block`'s jump kind, if this is a re-importation. // bool jumpToNextOptimization = false; - if (block->KindIs(BBJ_COND) && block->TrueTargetIs(block->GetFalseTarget())) + if (block->KindIs(BBJ_COND) && block->TrueEdgeIs(block->GetFalseEdge())) { JITDUMP(FMT_BB " always branches to " FMT_BB ", changing to BBJ_ALWAYS\n", block->bbNum, block->GetFalseTarget()->bbNum); diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index 5b2af35004cb6e..a523434e107f97 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -9805,7 +9805,7 @@ class ValueNumberState return false; } - if (!predBlock->KindIs(BBJ_COND) || predBlock->TrueTargetIs(predBlock->GetFalseTarget())) + if (!predBlock->KindIs(BBJ_COND) || predBlock->TrueEdgeIs(predBlock->GetFalseEdge())) { return true; } From 5f062e41bbd7b98a250dc5ab20406d06f6f8d9cf Mon Sep 17 00:00:00 2001 From: "Aman Khalid (from Dev Box)" Date: Sat, 24 Feb 2024 17:03:11 -0500 Subject: [PATCH 09/16] Remove BasicBlock iterator quirks --- src/coreclr/jit/block.h | 84 +++++++++++------------------------------ 1 file changed, 23 insertions(+), 61 deletions(-) diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index ddf33835192b2b..53cf65a387d57d 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -314,22 +314,10 @@ class PredBlockList // class BBArrayIterator { - // Quirk: Some BasicBlock kinds refer to their successors with BasicBlock pointers, - // while others use FlowEdge pointers. Eventually, every type will use FlowEdge pointers. - // For now, support iterating with both types. - union { - BasicBlock* const* m_bbEntry; - FlowEdge* const* m_edgeEntry; - }; - - bool iterateEdges; + FlowEdge* const* m_edgeEntry; public: - BBArrayIterator(BasicBlock* const* bbEntry) : m_bbEntry(bbEntry), iterateEdges(false) - { - } - - BBArrayIterator(FlowEdge* const* edgeEntry) : m_edgeEntry(edgeEntry), iterateEdges(true) + BBArrayIterator(FlowEdge* const* edgeEntry) : m_edgeEntry(edgeEntry) { } @@ -337,14 +325,14 @@ class BBArrayIterator BBArrayIterator& operator++() { - assert(m_bbEntry != nullptr); - ++m_bbEntry; + assert(m_edgeEntry != nullptr); + ++m_edgeEntry; return *this; } bool operator!=(const BBArrayIterator& i) const { - return m_bbEntry != i.m_bbEntry; + return m_edgeEntry != i.m_edgeEntry; } }; @@ -1585,21 +1573,8 @@ struct BasicBlock : private LIR::Range // points of the iteration, for use by BBArrayIterator. `m_begin` and `m_end` will either point at // `m_succs` or at the switch table successor array. BasicBlock* m_succs[2]; - - // Quirk: Some BasicBlock kinds refer to their successors with BasicBlock pointers, - // while others use FlowEdge pointers. Eventually, every type will use FlowEdge pointers. - // For now, support iterating with both types. - union { - BasicBlock* const* m_begin; - FlowEdge* const* m_beginEdge; - }; - - union { - BasicBlock* const* m_end; - FlowEdge* const* m_endEdge; - }; - - bool iterateEdges; + FlowEdge* const* m_begin; + FlowEdge* const* m_end; public: BBSuccList(const BasicBlock* block); @@ -1940,7 +1915,6 @@ inline BBArrayIterator BBEhfSuccList::end() const inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) { assert(block != nullptr); - iterateEdges = false; switch (block->bbKind) { @@ -1958,13 +1932,13 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: case BBJ_LEAVE: - m_succs[0] = block->GetTarget(); + m_succs[0] = block->GetTargetEdge(); m_begin = &m_succs[0]; m_end = &m_succs[1]; break; case BBJ_COND: - m_succs[0] = block->bbFalseTarget; + m_succs[0] = block->GetFalseEdge(); m_begin = &m_succs[0]; // If both fall-through and branch successors are identical, then only include @@ -1975,7 +1949,7 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) } else { - m_succs[1] = block->bbTrueTarget; + m_succs[1] = block->GetTrueEdge(); m_end = &m_succs[2]; } break; @@ -1986,26 +1960,22 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) // been computed. if (block->GetEhfTargets() == nullptr) { - m_beginEdge = nullptr; - m_endEdge = nullptr; + m_begin = nullptr; + m_end = nullptr; } else { - m_beginEdge = block->GetEhfTargets()->bbeSuccs; - m_endEdge = block->GetEhfTargets()->bbeSuccs + block->GetEhfTargets()->bbeCount; + m_begin = block->GetEhfTargets()->bbeSuccs; + m_end = block->GetEhfTargets()->bbeSuccs + block->GetEhfTargets()->bbeCount; } - - iterateEdges = true; break; case BBJ_SWITCH: // We don't use the m_succs in-line data for switches; use the existing jump table in the block. assert(block->bbSwtTargets != nullptr); assert(block->bbSwtTargets->bbsDstTab != nullptr); - m_beginEdge = block->bbSwtTargets->bbsDstTab; - m_endEdge = block->bbSwtTargets->bbsDstTab + block->bbSwtTargets->bbsCount; - - iterateEdges = true; + m_begin = block->bbSwtTargets->bbsDstTab; + m_end = block->bbSwtTargets->bbsDstTab + block->bbSwtTargets->bbsCount; break; default: @@ -2017,12 +1987,12 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) inline BBArrayIterator BasicBlock::BBSuccList::begin() const { - return (iterateEdges ? BBArrayIterator(m_beginEdge) : BBArrayIterator(m_begin)); + return BBArrayIterator(m_begin); } inline BBArrayIterator BasicBlock::BBSuccList::end() const { - return (iterateEdges ? BBArrayIterator(m_endEdge) : BBArrayIterator(m_end)); + return BBArrayIterator(m_end); } // We have a simpler struct, BasicBlockList, which is simply a singly-linked @@ -2229,19 +2199,11 @@ struct FlowEdge inline BasicBlock* BBArrayIterator::operator*() const { - if (iterateEdges) - { - assert(m_edgeEntry != nullptr); - FlowEdge* edgeTarget = *m_edgeEntry; - assert(edgeTarget != nullptr); - assert(edgeTarget->getDestinationBlock() != nullptr); - return edgeTarget->getDestinationBlock(); - } - - assert(m_bbEntry != nullptr); - BasicBlock* bTarget = *m_bbEntry; - assert(bTarget != nullptr); - return bTarget; + assert(m_edgeEntry != nullptr); + FlowEdge* edgeTarget = *m_edgeEntry; + assert(edgeTarget != nullptr); + assert(edgeTarget->getDestinationBlock() != nullptr); + return edgeTarget->getDestinationBlock(); } // Pred list iterator implementations (that are required to be defined after the declaration of BasicBlock and FlowEdge) From 0573ada596ff48ef697308823663694b340d9014 Mon Sep 17 00:00:00 2001 From: "Aman Khalid (from Dev Box)" Date: Mon, 26 Feb 2024 09:47:27 -0500 Subject: [PATCH 10/16] Fix comment --- src/coreclr/jit/block.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 53cf65a387d57d..ed8174155ae171 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -309,7 +309,7 @@ class PredBlockList // BBArrayIterator: forward iterator for an array of BasicBlock*, such as the BBswtDesc->bbsDstTab. // It is an error (with assert) to yield a nullptr BasicBlock* in this array. -// `m_bbEntry` can be nullptr, but it only makes sense if both the begin and end of an iteration range are nullptr +// `m_edgeEntry` can be nullptr, but it only makes sense if both the begin and end of an iteration range are nullptr // (meaning, no actual iteration will happen). // class BBArrayIterator From 950970699c66ded09c396c4683aa6c1e0d683dac Mon Sep 17 00:00:00 2001 From: "Aman Khalid (from Dev Box)" Date: Mon, 26 Feb 2024 09:55:45 -0500 Subject: [PATCH 11/16] Remove remaining bbTrueTarget/bbFalseTarget usage --- src/coreclr/jit/block.h | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index ed8174155ae171..929a4a9b90c1a0 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -521,7 +521,7 @@ struct BasicBlock : private LIR::Range }; // Successor edge of a BBJ_COND block if bbTrueEdge is not taken - BasicBlock* bbFalseTarget; + FlowEdge* bbFalseEdge; public: static BasicBlock* New(Compiler* compiler); @@ -666,16 +666,26 @@ struct BasicBlock : private LIR::Range bool TrueTargetIs(const BasicBlock* target) const { - assert(KindIs(BBJ_COND)); - assert(bbTrueTarget != nullptr); - return (bbTrueTarget == target); + return (GetTrueTarget() == target); + } + + bool TrueEdgeIs(const FlowEdge* targetEdge) const + { + return (GetTrueEdge() == targetEdge); } BasicBlock* GetFalseTarget() const + { + return GetFalseEdge()->getDestinationBlock(); + } + + FlowEdge* GetFalseEdge() const { assert(KindIs(BBJ_COND)); - assert(bbFalseTarget != nullptr); - return bbFalseTarget; + assert(bbFalseEdge != nullptr); + assert(bbFalseEdge->getSourceBlock() == this); + assert(bbFalseEdge->getDestinationBlock() != nullptr); + return bbFalseEdge; } void SetFalseEdge(FlowEdge* falseEdge) @@ -689,9 +699,7 @@ struct BasicBlock : private LIR::Range bool FalseTargetIs(const BasicBlock* target) const { - assert(KindIs(BBJ_COND)); - assert(bbFalseTarget != nullptr); - return (bbFalseTarget == target); + return (GetFalseTarget() == target); } void SetCond(FlowEdge* trueEdge, FlowEdge* falseEdge) @@ -786,14 +794,14 @@ struct BasicBlock : private LIR::Range BasicBlock* GetTrueTargetRaw() const { assert(KindIs(BBJ_COND)); - return bbTrueTarget; + return (bbTrueEdge == nullptr) ? nullptr : bbTrueEdge->getDestinationBlock(); } // Return the BBJ_COND false target; it might be null. Only used during dumping. BasicBlock* GetFalseTargetRaw() const { assert(KindIs(BBJ_COND)); - return bbFalseTarget; + return (bbFalseEdge == nullptr) ? nullptr : bbFalseEdge->getDestinationBlock(); } #endif // DEBUG From 4d092c103bcc85df3966216f0743a54799aa9474 Mon Sep 17 00:00:00 2001 From: "Aman Khalid (from Dev Box)" Date: Mon, 26 Feb 2024 13:52:26 -0500 Subject: [PATCH 12/16] Clean replay --- src/coreclr/jit/block.cpp | 14 +- src/coreclr/jit/block.h | 375 ++++++++++---------- src/coreclr/jit/fgbasic.cpp | 27 +- src/coreclr/jit/fgehopt.cpp | 1 - src/coreclr/jit/fgopt.cpp | 24 +- src/coreclr/jit/flowgraph.cpp | 26 +- src/coreclr/jit/helperexpansion.cpp | 77 ++-- src/coreclr/jit/indirectcalltransformer.cpp | 14 +- src/coreclr/jit/lower.cpp | 10 +- src/coreclr/jit/optimizebools.cpp | 11 +- src/coreclr/jit/optimizer.cpp | 2 +- 11 files changed, 302 insertions(+), 279 deletions(-) diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 355921da105b9e..35a73bfa950bd3 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -858,10 +858,10 @@ void BasicBlock::TransferTarget(BasicBlock* from) from->bbEhfTargets = nullptr; // Make sure nobody uses the descriptor after this. break; case BBJ_COND: - SetCond(from->GetTrueEdge(), from->GetFalseEdge()); + SetCond(from->bbTrueEdge, from->bbFalseEdge); break; case BBJ_ALWAYS: - SetKindAndTargetEdge(BBJ_ALWAYS, from->GetTargetEdge()); + SetKindAndTargetEdge(BBJ_ALWAYS, from->bbTargetEdge); CopyFlags(from, BBF_NONE_QUIRK); break; case BBJ_CALLFINALLY: @@ -869,7 +869,7 @@ void BasicBlock::TransferTarget(BasicBlock* from) case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: case BBJ_LEAVE: - SetKindAndTargetEdge(from->GetKind(), from->GetTargetEdge()); + SetKindAndTargetEdge(from->GetKind(), from->bbTargetEdge); break; default: SetKindAndTargetEdge(from->GetKind()); // Clear the target @@ -1145,7 +1145,7 @@ unsigned BasicBlock::NumSucc() const return 1; case BBJ_COND: - if (TrueEdgeIs(GetFalseEdge())) + if (bbTrueEdge == bbFalseEdge) { return 1; } @@ -1209,7 +1209,7 @@ BasicBlock* BasicBlock::GetSucc(unsigned i) const else { assert(i == 1); - assert(!TrueEdgeIs(GetFalseEdge())); + assert(bbTrueEdge != bbFalseEdge); return GetTrueTarget(); } @@ -1270,7 +1270,7 @@ unsigned BasicBlock::NumSucc(Compiler* comp) return 1; case BBJ_COND: - if (TrueEdgeIs(GetFalseEdge())) + if (bbTrueEdge == bbFalseEdge) { return 1; } @@ -1332,7 +1332,7 @@ BasicBlock* BasicBlock::GetSucc(unsigned i, Compiler* comp) else { assert(i == 1); - assert(!TrueEdgeIs(GetFalseEdge())); + assert(bbTrueEdge != bbFalseEdge); return GetTrueTarget(); } diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 929a4a9b90c1a0..3ea13e2fe1438d 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -494,6 +494,185 @@ enum class BasicBlockVisit // clang-format on +//------------------------------------------------------------------------- +// FlowEdge -- control flow edge +// +// In compiler terminology the control flow between two BasicBlocks +// is typically referred to as an "edge". Most well known are the +// backward branches for loops, which are often called "back-edges". +// +// "struct FlowEdge" is the type that represents our control flow edges. +// This type is a linked list of zero or more "edges". +// (The list of zero edges is represented by NULL.) +// Every BasicBlock has a field called bbPreds of this type. This field +// represents the list of "edges" that flow into this BasicBlock. +// The FlowEdge type only stores the BasicBlock* of the source for the +// control flow edge. The destination block for the control flow edge +// is implied to be the block which contained the bbPreds field. +// +// For a switch branch target there may be multiple "edges" that have +// the same source block (and destination block). We need to count the +// number of these edges so that during optimization we will know when +// we have zero of them. Rather than have extra FlowEdge entries we +// track this via the DupCount property. +// +// When we have Profile weight for the BasicBlocks we can usually compute +// the number of times each edge was executed by examining the adjacent +// BasicBlock weights. As we are doing for BasicBlocks, we call the number +// of times that a control flow edge was executed the "edge weight". +// In order to compute the edge weights we need to use a bounded range +// for every edge weight. These two fields, 'flEdgeWeightMin' and 'flEdgeWeightMax' +// are used to hold a bounded range. Most often these will converge such +// that both values are the same and that value is the exact edge weight. +// Sometimes we are left with a rage of possible values between [Min..Max] +// which represents an inexact edge weight. +// +// The bbPreds list is initially created by Compiler::fgLinkBasicBlocks() +// and is incrementally kept up to date. +// +// The edge weight are computed by Compiler::fgComputeEdgeWeights() +// the edge weights are used to straighten conditional branches +// by Compiler::fgReorderBlocks() +// +struct FlowEdge +{ +private: + // The next predecessor edge in the list, nullptr for end of list. + FlowEdge* m_nextPredEdge; + + // The source of the control flow + BasicBlock* m_sourceBlock; + + // The destination of the control flow + BasicBlock* m_destBlock; + + // Edge weights + weight_t m_edgeWeightMin; + weight_t m_edgeWeightMax; + + // Likelihood that m_sourceBlock transfers control along this edge. + // Values in range [0..1] + weight_t m_likelihood; + + // The count of duplicate "edges" (used for switch stmts or degenerate branches) + unsigned m_dupCount; + + // True if likelihood has been set + bool m_likelihoodSet; + +public: + FlowEdge(BasicBlock* sourceBlock, BasicBlock* destBlock, FlowEdge* rest) + : m_nextPredEdge(rest) + , m_sourceBlock(sourceBlock) + , m_destBlock(destBlock) + , m_edgeWeightMin(0) + , m_edgeWeightMax(0) + , m_likelihood(0) + , m_dupCount(0) + , m_likelihoodSet(false) + { + } + + FlowEdge* getNextPredEdge() const + { + return m_nextPredEdge; + } + + FlowEdge** getNextPredEdgeRef() + { + return &m_nextPredEdge; + } + + void setNextPredEdge(FlowEdge* newEdge) + { + m_nextPredEdge = newEdge; + } + + BasicBlock* getSourceBlock() const + { + assert(m_sourceBlock != nullptr); + return m_sourceBlock; + } + + void setSourceBlock(BasicBlock* newBlock) + { + assert(newBlock != nullptr); + m_sourceBlock = newBlock; + } + + BasicBlock* getDestinationBlock() const + { + assert(m_destBlock != nullptr); + return m_destBlock; + } + + void setDestinationBlock(BasicBlock* newBlock) + { + assert(newBlock != nullptr); + m_destBlock = newBlock; + } + + weight_t edgeWeightMin() const + { + return m_edgeWeightMin; + } + + weight_t edgeWeightMax() const + { + return m_edgeWeightMax; + } + + // These two methods are used to set new values for edge weights. + // They return false if the newWeight is not between the current [min..max] + // when slop is non-zero we allow for the case where our weights might be off by 'slop' + // + bool setEdgeWeightMinChecked(weight_t newWeight, BasicBlock* bDst, weight_t slop, bool* wbUsedSlop); + bool setEdgeWeightMaxChecked(weight_t newWeight, BasicBlock* bDst, weight_t slop, bool* wbUsedSlop); + void setEdgeWeights(weight_t newMinWeight, weight_t newMaxWeight, BasicBlock* bDst); + + weight_t getLikelihood() const + { + return m_likelihood; + } + + void setLikelihood(weight_t likelihood) + { + assert(likelihood >= 0.0); + assert(likelihood <= 1.0); + m_likelihoodSet = true; + m_likelihood = likelihood; + } + + void clearLikelihood() + { + m_likelihood = 0.0; + m_likelihoodSet = false; + } + + bool hasLikelihood() const + { + return m_likelihoodSet; + } + + weight_t getLikelyWeight() const; + + unsigned getDupCount() const + { + return m_dupCount; + } + + void incrementDupCount() + { + m_dupCount++; + } + + void decrementDupCount() + { + assert(m_dupCount >= 1); + m_dupCount--; + } +}; + //------------------------------------------------------------------------ // BasicBlock: describes a basic block in the flowgraph. // @@ -702,6 +881,11 @@ struct BasicBlock : private LIR::Range return (GetFalseTarget() == target); } + bool FalseEdgeIs(const FlowEdge* targetEdge) const + { + return (GetFalseEdge() == targetEdge); + } + void SetCond(FlowEdge* trueEdge, FlowEdge* falseEdge) { assert(trueEdge != nullptr); @@ -778,7 +962,7 @@ struct BasicBlock : private LIR::Range BasicBlock* GetFinallyContinuation() const { assert(KindIs(BBJ_CALLFINALLYRET)); - return ; + return GetTarget(); } #ifdef DEBUG @@ -1580,7 +1764,7 @@ struct BasicBlock : private LIR::Range // need to call a function or execute another `switch` to get them. Also, pre-compute the begin and end // points of the iteration, for use by BBArrayIterator. `m_begin` and `m_end` will either point at // `m_succs` or at the switch table successor array. - BasicBlock* m_succs[2]; + FlowEdge* m_succs[2]; FlowEdge* const* m_begin; FlowEdge* const* m_end; @@ -2020,188 +2204,13 @@ struct BasicBlockList } }; -//------------------------------------------------------------------------- -// FlowEdge -- control flow edge -// -// In compiler terminology the control flow between two BasicBlocks -// is typically referred to as an "edge". Most well known are the -// backward branches for loops, which are often called "back-edges". -// -// "struct FlowEdge" is the type that represents our control flow edges. -// This type is a linked list of zero or more "edges". -// (The list of zero edges is represented by NULL.) -// Every BasicBlock has a field called bbPreds of this type. This field -// represents the list of "edges" that flow into this BasicBlock. -// The FlowEdge type only stores the BasicBlock* of the source for the -// control flow edge. The destination block for the control flow edge -// is implied to be the block which contained the bbPreds field. -// -// For a switch branch target there may be multiple "edges" that have -// the same source block (and destination block). We need to count the -// number of these edges so that during optimization we will know when -// we have zero of them. Rather than have extra FlowEdge entries we -// track this via the DupCount property. -// -// When we have Profile weight for the BasicBlocks we can usually compute -// the number of times each edge was executed by examining the adjacent -// BasicBlock weights. As we are doing for BasicBlocks, we call the number -// of times that a control flow edge was executed the "edge weight". -// In order to compute the edge weights we need to use a bounded range -// for every edge weight. These two fields, 'flEdgeWeightMin' and 'flEdgeWeightMax' -// are used to hold a bounded range. Most often these will converge such -// that both values are the same and that value is the exact edge weight. -// Sometimes we are left with a rage of possible values between [Min..Max] -// which represents an inexact edge weight. -// -// The bbPreds list is initially created by Compiler::fgLinkBasicBlocks() -// and is incrementally kept up to date. -// -// The edge weight are computed by Compiler::fgComputeEdgeWeights() -// the edge weights are used to straighten conditional branches -// by Compiler::fgReorderBlocks() -// -struct FlowEdge -{ -private: - // The next predecessor edge in the list, nullptr for end of list. - FlowEdge* m_nextPredEdge; - - // The source of the control flow - BasicBlock* m_sourceBlock; - - // The destination of the control flow - BasicBlock* m_destBlock; - - // Edge weights - weight_t m_edgeWeightMin; - weight_t m_edgeWeightMax; - - // Likelihood that m_sourceBlock transfers control along this edge. - // Values in range [0..1] - weight_t m_likelihood; - - // The count of duplicate "edges" (used for switch stmts or degenerate branches) - unsigned m_dupCount; - - // True if likelihood has been set - bool m_likelihoodSet; - -public: - FlowEdge(BasicBlock* sourceBlock, BasicBlock* destBlock, FlowEdge* rest) - : m_nextPredEdge(rest) - , m_sourceBlock(sourceBlock) - , m_destBlock(destBlock) - , m_edgeWeightMin(0) - , m_edgeWeightMax(0) - , m_likelihood(0) - , m_dupCount(0) - , m_likelihoodSet(false) - { - } - - FlowEdge* getNextPredEdge() const - { - return m_nextPredEdge; - } - - FlowEdge** getNextPredEdgeRef() - { - return &m_nextPredEdge; - } - - void setNextPredEdge(FlowEdge* newEdge) - { - m_nextPredEdge = newEdge; - } - - BasicBlock* getSourceBlock() const - { - assert(m_sourceBlock != nullptr); - return m_sourceBlock; - } - - void setSourceBlock(BasicBlock* newBlock) - { - assert(newBlock != nullptr); - m_sourceBlock = newBlock; - } - - BasicBlock* getDestinationBlock() const - { - assert(m_destBlock != nullptr); - return m_destBlock; - } - - void setDestinationBlock(BasicBlock* newBlock) - { - assert(newBlock != nullptr); - m_destBlock = newBlock; - } - - weight_t edgeWeightMin() const - { - return m_edgeWeightMin; - } - - weight_t edgeWeightMax() const - { - return m_edgeWeightMax; - } - - // These two methods are used to set new values for edge weights. - // They return false if the newWeight is not between the current [min..max] - // when slop is non-zero we allow for the case where our weights might be off by 'slop' - // - bool setEdgeWeightMinChecked(weight_t newWeight, BasicBlock* bDst, weight_t slop, bool* wbUsedSlop); - bool setEdgeWeightMaxChecked(weight_t newWeight, BasicBlock* bDst, weight_t slop, bool* wbUsedSlop); - void setEdgeWeights(weight_t newMinWeight, weight_t newMaxWeight, BasicBlock* bDst); +// FlowEdge implementations (that are required to be defined after the declaration of BasicBlock) - weight_t getLikelihood() const - { - return m_likelihood; - } - - void setLikelihood(weight_t likelihood) - { - assert(likelihood >= 0.0); - assert(likelihood <= 1.0); - m_likelihoodSet = true; - m_likelihood = likelihood; - } - - void clearLikelihood() - { - m_likelihood = 0.0; - m_likelihoodSet = false; - } - - bool hasLikelihood() const - { - return m_likelihoodSet; - } - - weight_t getLikelyWeight() const - { - assert(m_likelihoodSet); - return m_likelihood * m_sourceBlock->bbWeight; - } - - unsigned getDupCount() const - { - return m_dupCount; - } - - void incrementDupCount() - { - m_dupCount++; - } - - void decrementDupCount() - { - assert(m_dupCount >= 1); - m_dupCount--; - } -}; +inline weight_t FlowEdge::getLikelyWeight() const +{ + assert(m_likelihoodSet); + return m_likelihood * m_sourceBlock->bbWeight; +} // BasicBlock iterator implementations (that are required to be defined after the declaration of FlowEdge) diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 170729ab8db634..b14927b61c7c21 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -211,12 +211,6 @@ bool Compiler::fgEnsureFirstBBisScratch() if (fgFirstBB != nullptr) { - // If we have profile data the new block will inherit fgFirstBlock's weight - if (fgFirstBB->hasProfileWeight()) - { - block->inheritWeight(fgFirstBB); - } - // The first block has an implicit ref count which we must // remove. Note the ref count could be greater than one, if // the first block is not scratch and is targeted by a @@ -224,8 +218,15 @@ bool Compiler::fgEnsureFirstBBisScratch() assert(fgFirstBB->bbRefs >= 1); fgFirstBB->bbRefs--; - // The new scratch bb will fall through to the old first bb block = BasicBlock::New(this); + + // If we have profile data the new block will inherit fgFirstBlock's weight + if (fgFirstBB->hasProfileWeight()) + { + block->inheritWeight(fgFirstBB); + } + + // The new scratch bb will fall through to the old first bb FlowEdge* const edge = fgAddRefPred(fgFirstBB, block); edge->setLikelihood(1.0); block->SetKindAndTargetEdge(BBJ_ALWAYS, edge); @@ -660,7 +661,7 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* oldTarget, Bas { FlowEdge* const oldEdge = block->GetTrueEdge(); - if (block->TrueEdgeIs(oldEdge)) + if (block->FalseEdgeIs(oldEdge)) { // fgRemoveRefPred returns nullptr for BBJ_COND blocks with two flow edges to target fgRemoveConditionalJump(block); @@ -4820,14 +4821,14 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) // Remove flags from the old block that are no longer possible. curr->RemoveFlags(BBF_HAS_JMP | BBF_RETLESS_CALL); + // Default to fallthrough, and add the arc for that. + FlowEdge* const newEdge = fgAddRefPred(newBlock, curr); + newEdge->setLikelihood(1.0); + // Transfer the kind and target. Do this after the code above, to avoid null-ing out the old targets used by the // above code (and so newBlock->bbNext is valid, so SetCond() can initialize the false target if newBlock is a // BBJ_COND). newBlock->TransferTarget(curr); - - // Default to fallthrough, and add the arc for that. - FlowEdge* const newEdge = fgAddRefPred(newBlock, curr); - newEdge->setLikelihood(1.0); curr->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); curr->SetFlags(BBF_NONE_QUIRK); @@ -5058,7 +5059,6 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) // TODO-NoFallThrough: Once false target can diverge from bbNext, this will be unnecessary for BBJ_COND newBlock = fgNewBBafter(BBJ_ALWAYS, curr, true /* extendRegion */); newBlock->SetFlags(BBF_NONE_QUIRK); - assert(newBlock->JumpsToNext()); } else { @@ -5458,6 +5458,7 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) jmpBlk = fgNewBBafter(BBJ_ALWAYS, bSrc, true); FlowEdge* oldEdge = bSrc->GetFalseEdge(); fgReplacePred(oldEdge, jmpBlk); + jmpBlk->SetTargetEdge(oldEdge); assert(jmpBlk->TargetIs(bDst)); FlowEdge* newEdge = fgAddRefPred(jmpBlk, bSrc, oldEdge); diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index f2b73ff0e162a5..18eb96c754dbc3 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -2114,7 +2114,6 @@ void Compiler::fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock, // Wire up the new flow FlowEdge* const falseEdge = fgAddRefPred(newBlock, predBlock, predEdge); predBlock->SetFalseEdge(falseEdge); - assert(predBlock->FalseTargetIs(canonicalBlock)); FlowEdge* const newEdge = fgAddRefPred(canonicalBlock, newBlock, predEdge); newBlock->SetTargetEdge(newEdge); diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index ed1dc45503c14f..25b051d4ff72ee 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -776,7 +776,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() fromBlock->SetFlags(BBF_INTERNAL); newBlock->RemoveFlags(BBF_DONT_REMOVE); addedBlocks++; - FlowEdge* const normalTryEntryEdge = fromBlock->GetFalseEdge(); + FlowEdge* const normalTryEntryEdge = fromBlock->GetTargetEdge(); GenTree* const entryStateLcl = gtNewLclvNode(entryStateVar, TYP_INT); GenTree* const compareEntryStateToZero = @@ -1288,24 +1288,31 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: + { /* Update the predecessor list for bNext's target */ - fgReplacePred(bNext->GetTargetEdge(), block); + FlowEdge* const targetEdge = bNext->GetTargetEdge(); + fgReplacePred(targetEdge, block); - block->SetKindAndTargetEdge(bNext->GetKind(), bNext->GetTargetEdge()); + block->SetKindAndTargetEdge(bNext->GetKind(), targetEdge); break; + } case BBJ_COND: + { /* Update the predecessor list for bNext's true target */ - fgReplacePred(bNext->GetTrueEdge(), block); + FlowEdge* const trueEdge = bNext->GetTrueEdge(); + FlowEdge* const falseEdge = bNext->GetFalseEdge(); + fgReplacePred(trueEdge, block); /* Update the predecessor list for bNext's false target if it is different from the true target */ - if (!bNext->TrueEdgeIs(bNext->GetFalseEdge())) + if (trueEdge != falseEdge) { - fgReplacePred(bNext->GetFalseEdge(), block); + fgReplacePred(falseEdge, block); } - block->SetCond(bNext->GetTrueEdge(), bNext->GetFalseEdge()); + block->SetCond(trueEdge, falseEdge); break; + } case BBJ_EHFINALLYRET: block->SetEhf(bNext->GetEhfTargets()); @@ -5020,8 +5027,9 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication /* = false */, bool isPh // Optimize the Conditional JUMP to go to the new target fgRemoveRefPred(block->GetFalseEdge()); + fgRemoveRefPred(bNext->GetTargetEdge()); block->SetFalseEdge(block->GetTrueEdge()); - FlowEdge* const newEdge = fgAddRefPred(bNext->GetTarget(), block, fgRemoveRefPred(bNext->GetTargetEdge())); + FlowEdge* const newEdge = fgAddRefPred(bNext->GetTarget(), block, bNext->GetTargetEdge()); block->SetTrueEdge(newEdge); /* diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 55769509d5237b..b9cd454693fe36 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -267,13 +267,10 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) // top -> poll -> bottom (lexically) // so that we jump over poll to get to bottom. BasicBlock* top = block; - BBKinds oldJumpKind = top->GetKind(); BasicBlock* poll = fgNewBBafter(BBJ_ALWAYS, top, true); bottom = fgNewBBafter(top->GetKind(), poll, true); - bottom->TransferTarget(top); - // Update block flags const BasicBlockFlags originalFlags = top->GetFlagsRaw() | BBF_GC_SAFE_POINT; @@ -297,7 +294,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) } // Remove the last statement from Top and add it to Bottom if necessary. - if ((oldJumpKind == BBJ_COND) || (oldJumpKind == BBJ_RETURN) || (oldJumpKind == BBJ_THROW)) + if (top->KindIs(BBJ_COND, BBJ_RETURN, BBJ_THROW)) { Statement* stmt = top->firstStmt(); while (stmt->GetNextStmt() != nullptr) @@ -364,7 +361,6 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) // Bottom has Top and Poll as its predecessors. Poll has just Top as a predecessor. FlowEdge* const trueEdge = fgAddRefPred(bottom, top); FlowEdge* const falseEdge = fgAddRefPred(poll, top); - top->SetCond(trueEdge, falseEdge); FlowEdge* const newEdge = fgAddRefPred(bottom, poll); poll->SetTargetEdge(newEdge); @@ -372,30 +368,37 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) // Replace Top with Bottom in the predecessor list of all outgoing edges from Bottom // (1 for unconditional branches, 2 for conditional branches, N for switches). - switch (oldJumpKind) + switch (top->GetKind()) { case BBJ_RETURN: case BBJ_THROW: // no successors break; + case BBJ_COND: // replace predecessor in true/false successors. noway_assert(!bottom->IsLast()); - fgReplacePred(bottom->GetFalseTarget(), top, bottom); - fgReplacePred(bottom->GetTrueTarget(), top, bottom); + fgReplacePred(top->GetFalseEdge(), bottom); + fgReplacePred(top->GetTrueEdge(), bottom); break; case BBJ_ALWAYS: case BBJ_CALLFINALLY: - fgReplacePred(bottom->GetTarget(), top, bottom); + fgReplacePred(top->GetTargetEdge(), bottom); break; + case BBJ_SWITCH: NO_WAY("SWITCH should be a call rather than an inlined poll."); break; + default: NO_WAY("Unknown block type for updating predecessor lists."); + break; } + bottom->TransferTarget(top); + top->SetCond(trueEdge, falseEdge); + if (compCurBB == top) { compCurBB = bottom; @@ -2759,6 +2762,7 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) /* Allocate a new basic block */ BasicBlock* newHead = BasicBlock::New(this); + newHead->SetFlags(BBF_INTERNAL | BBF_NONE_QUIRK); newHead->inheritWeight(block); newHead->bbRefs = 0; @@ -2780,11 +2784,13 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) switch (predBlock->GetKind()) { case BBJ_CALLFINALLY: + { noway_assert(predBlock->TargetIs(block)); fgRemoveRefPred(predBlock->GetTargetEdge()); FlowEdge* const newEdge = fgAddRefPred(newHead, predBlock); predBlock->SetTargetEdge(newEdge); break; + } default: // The only way into the handler is via a BBJ_CALLFINALLY (to a finally handler), or @@ -2798,9 +2804,7 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) assert(fgGetPredForBlock(block, newHead) == nullptr); FlowEdge* const newEdge = fgAddRefPred(block, newHead); newHead->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); - assert(newHead->JumpsToNext()); - newHead->SetFlags(BBF_INTERNAL | BBF_NONE_QUIRK); } //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/helperexpansion.cpp b/src/coreclr/jit/helperexpansion.cpp index 8af6eab676409c..d9801afbdafc82 100644 --- a/src/coreclr/jit/helperexpansion.cpp +++ b/src/coreclr/jit/helperexpansion.cpp @@ -322,9 +322,6 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm BasicBlock* fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, nullcheckBb, fallbackValueDef, debugInfo, true); - assert(fallbackBb->JumpsToNext()); - fallbackBb->SetFlags(BBF_NONE_QUIRK); - // Fast-path basic block GenTree* fastpathValueDef = gtNewStoreLclVarNode(rtLookupLcl->GetLclNum(), fastPathValueClone); BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, nullcheckBb, fastpathValueDef, debugInfo); @@ -379,12 +376,14 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm { FlowEdge* const newEdge = fgAddRefPred(block, fastPathBb); - fasthPathBb->SetTargetEdge(newEdge); + fastPathBb->SetTargetEdge(newEdge); } { FlowEdge* const newEdge = fgAddRefPred(block, fallbackBb); fallbackBb->SetTargetEdge(newEdge); + assert(fallbackBb->JumpsToNext()); + fallbackBb->SetFlags(BBF_NONE_QUIRK); } assert(prevBb->KindIs(BBJ_ALWAYS)); @@ -404,12 +403,7 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm } // fallbackBb is reachable from both nullcheckBb and sizeCheckBb - FlowEdge* const trueEdge = fgAddRefPred(fallbackBb, nullcheckBb); - nullcheckBb->SetTrueEdge(trueEdge); - // fastPathBb is only reachable from successful nullcheckBb - FlowEdge* const falseEdge = fgAddRefPred(fastPathBb, nullcheckBb); - nullcheckBb->SetFalseEdge(falseEdge); } else { @@ -418,11 +412,14 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm prevBb->SetTargetEdge(newEdge); // No size check, nullcheckBb jumps to fast path - fgAddRefPred(fastPathBb, nullcheckBb); // fallbackBb is only reachable from nullcheckBb (jump destination) - fgAddRefPred(fallbackBb, nullcheckBb); } + FlowEdge* const trueEdge = fgAddRefPred(fallbackBb, nullcheckBb); + FlowEdge* const falseEdge = fgAddRefPred(fastPathBb, nullcheckBb); + nullcheckBb->SetTrueEdge(trueEdge); + nullcheckBb->SetFalseEdge(falseEdge); + // // Re-distribute weights (see '[weight: X]' on the diagrams above) // TODO: consider marking fallbackBb as rarely-taken @@ -725,8 +722,8 @@ bool Compiler::fgExpandThreadLocalAccessForCallNativeAOT(BasicBlock** pBlock, St // // Update preds in all new blocks // - FlowEdge* const trueEdge = fgAddRefPred(fallbackBb, tlsRootNullCondBB); - FlowEdge* const trueEdge = fgAddRefPred(fastPathBb, tlsRootNullCondBB); + FlowEdge* const trueEdge = fgAddRefPred(fastPathBb, tlsRootNullCondBB); + FlowEdge* const falseEdge = fgAddRefPred(fallbackBb, tlsRootNullCondBB); tlsRootNullCondBB->SetTrueEdge(trueEdge); tlsRootNullCondBB->SetFalseEdge(falseEdge); @@ -1118,7 +1115,7 @@ bool Compiler::fgExpandThreadLocalAccessForCall(BasicBlock** pBlock, Statement* { FlowEdge* const newEdge = fgAddRefPred(block, fastPathBb); - fasthPathBb->SetTargetEdge(newEdge); + fastPathBb->SetTargetEdge(newEdge); } { @@ -1410,8 +1407,6 @@ bool Compiler::fgExpandStaticInitForCall(BasicBlock** pBlock, Statement* stmt, G // TODO-CQ: for JIT we can replace the original call with CORINFO_HELP_INITCLASS // that only accepts a single argument BasicBlock* helperCallBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, isInitedBb, call, debugInfo, true); - assert(helperCallBb->JumpsToNext()); - helperCallBb->SetFlags(BBF_NONE_QUIRK); GenTree* replacementNode = nullptr; if (retValKind == SHRV_STATIC_BASE_PTR) @@ -1473,27 +1468,30 @@ bool Compiler::fgExpandStaticInitForCall(BasicBlock** pBlock, Statement* stmt, G // Unlink block and prevBb fgRemoveRefPred(prevBb->GetTargetEdge()); - // Block has two preds now: either isInitedBb or helperCallBb - { - FlowEdge* const newEdge = fgAddRefPred(block, isInitedBb); - isInitedBb->SetTargetEdge(newEdge); - } - { + // Block has two preds now: either isInitedBb or helperCallBb FlowEdge* const newEdge = fgAddRefPred(block, helperCallBb); helperCallBb->SetTargetEdge(newEdge); + assert(helperCallBb->JumpsToNext()); + helperCallBb->SetFlags(BBF_NONE_QUIRK); } - // prevBb always flows into isInitedBb - assert(prevBb->KindIs(BBJ_ALWAYS)); - FlowEdge* const newEdge = fgAddRefPred(isInitedBb, prevBb); - prevBb->SetTargetEdge(newEdge); - prevBb->SetFlags(BBF_NONE_QUIRK); - assert(prevBb->JumpsToNext()); + { + // prevBb always flows into isInitedBb + assert(prevBb->KindIs(BBJ_ALWAYS)); + FlowEdge* const newEdge = fgAddRefPred(isInitedBb, prevBb); + prevBb->SetTargetEdge(newEdge); + prevBb->SetFlags(BBF_NONE_QUIRK); + assert(prevBb->JumpsToNext()); + } - // Both fastPathBb and helperCallBb have a single common pred - isInitedBb - FlowEdge* const falseEdge = fgAddRefPred(helperCallBb, isInitedBb); - isInitedBb->SetFalseEdge(falseEdge); + { + // Both fastPathBb and helperCallBb have a single common pred - isInitedBb + FlowEdge* const trueEdge = fgAddRefPred(block, isInitedBb); + FlowEdge* const falseEdge = fgAddRefPred(helperCallBb, isInitedBb); + isInitedBb->SetTrueEdge(trueEdge); + isInitedBb->SetFalseEdge(falseEdge); + } // // Re-distribute weights @@ -1745,8 +1743,7 @@ bool Compiler::fgVNBasedIntrinsicExpansionForCall_ReadUtf8(BasicBlock** pBlock, // but that would be a bit less efficient since we would have to load the data from memory. // BasicBlock* fastpathBb = fgNewBBafter(BBJ_ALWAYS, lengthCheckBb, true); - assert(fastpathBb->JumpsToNext()); - fastpathBb->SetFlags(BBF_INTERNAL | BBF_NONE_QUIRK); + fastpathBb->SetFlags(BBF_INTERNAL); // The widest type we can use for loads const var_types maxLoadType = roundDownMaxType(srcLenU8); @@ -1801,17 +1798,15 @@ bool Compiler::fgVNBasedIntrinsicExpansionForCall_ReadUtf8(BasicBlock** pBlock, // block is no longer a predecessor of prevBb fgRemoveRefPred(prevBb->GetTargetEdge()); - // prevBb flows into lengthCheckBb - assert(prevBb->KindIs(BBJ_ALWAYS)); - { + // prevBb flows into lengthCheckBb + assert(prevBb->KindIs(BBJ_ALWAYS)); FlowEdge* const newEdge = fgAddRefPred(lengthCheckBb, prevBb); prevBb->SetTargetEdge(newEdge); + prevBb->SetFlags(BBF_NONE_QUIRK); + assert(prevBb->JumpsToNext()); } - prevBb->SetFlags(BBF_NONE_QUIRK); - assert(prevBb->JumpsToNext()); - { // lengthCheckBb has two successors: block and fastpathBb FlowEdge* const trueEdge = fgAddRefPred(block, lengthCheckBb); @@ -1823,7 +1818,9 @@ bool Compiler::fgVNBasedIntrinsicExpansionForCall_ReadUtf8(BasicBlock** pBlock, { // fastpathBb flows into block FlowEdge* const newEdge = fgAddRefPred(block, fastpathBb); - fastPathBb->SetTargetEdge(newEdge); + fastpathBb->SetTargetEdge(newEdge); + assert(fastpathBb->JumpsToNext()); + fastpathBb->SetFlags(BBF_NONE_QUIRK); } // diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index f8b58880c3a152..a6221664925082 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -218,7 +218,6 @@ class IndirectCallTransformer // Arguments: // jumpKind - jump kind for the new basic block // insertAfter - basic block, after which compiler has to insert the new one. - // jumpDest - jump target for the new basic block. Defaults to nullptr. // // Return Value: // new basic block. @@ -288,16 +287,19 @@ class IndirectCallTransformer checkBlock->SetCond(elseEdge, thenEdge); // thenBlock - assert(thenBlock->TargetIs(remainderBlock)); { + assert(thenBlock->KindIs(BBJ_ALWAYS)); FlowEdge* const newEdge = compiler->fgAddRefPred(remainderBlock, thenBlock); newEdge->setLikelihood(1.0); + thenBlock->SetTargetEdge(newEdge); } // elseBlock { + assert(elseBlock->KindIs(BBJ_ALWAYS)); FlowEdge* const newEdge = compiler->fgAddRefPred(remainderBlock, elseBlock); newEdge->setLikelihood(1.0); + elseBlock->SetTargetEdge(newEdge); } } @@ -614,10 +616,12 @@ class IndirectCallTransformer weight_t checkLikelihoodWt = ((weight_t)checkLikelihood) / 100.0; // prevCheckBlock is expected to jump to this new check (if its type check doesn't succeed) + assert(prevCheckBlock->KindIs(BBJ_ALWAYS)); + assert(prevCheckBlock->JumpsToNext()); FlowEdge* const checkEdge = compiler->fgAddRefPred(checkBlock, prevCheckBlock); checkEdge->setLikelihood(checkLikelihoodWt); checkBlock->inheritWeightPercentage(currBlock, checkLikelihood); - prevCheckBlock->SetCond(checkEdge, prevCheckBlock->GetFalseEdge()); + prevCheckBlock->SetCond(checkEdge, prevCheckBlock->GetTargetEdge()); } // Find last arg with a side effect. All args with any effect @@ -1064,9 +1068,11 @@ class IndirectCallTransformer // where we know the last check is always true (in case of "exact" GDV) if (!checkFallsThrough) { + assert(checkBlock->KindIs(BBJ_ALWAYS)); + assert(checkBlock->JumpsToNext()); FlowEdge* const checkEdge = compiler->fgAddRefPred(elseBlock, checkBlock); checkEdge->setLikelihood(elseLikelihoodDbl); - checkBlock->SetCond(checkEdge, checkBlock->GetFalseEdge()); + checkBlock->SetCond(checkEdge, checkBlock->GetTargetEdge()); } else { diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 8c7d22a85ea775..8716b6997ba243 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -951,7 +951,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // originalSwitchBB is now a BBJ_ALWAYS, and there is a predecessor edge in afterDefaultCondBlock // representing the fall-through flow from originalSwitchBB. assert(originalSwitchBB->KindIs(BBJ_ALWAYS)); - assert(originalSwitchBB->TargetEdgeIs(afterDefaultCondBlock)); + assert(originalSwitchBB->TargetIs(afterDefaultCondBlock)); assert(afterDefaultCondBlock->KindIs(BBJ_SWITCH)); assert(afterDefaultCondBlock->GetSwitchTargets()->bbsHasDefault); assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet. @@ -1054,7 +1054,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // Remove the switch from the predecessor list of this case target's block. // We'll add the proper new predecessor edge later. - FlowEdge* const oldEdge = comp->fgRemoveRefPred(jumpTab[i]); + comp->fgRemoveRefPred(jumpTab[i]); if (targetBlock == followingBB) { @@ -1081,7 +1081,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) } // Wire up the predecessor list for the "branch" case. - FlowEdge* const newEdge = comp->fgAddRefPred(targetBlock, currentBlock, oldEdge); + FlowEdge* const newEdge = comp->fgAddRefPred(targetBlock, currentBlock, jumpTab[i]); if (!fAnyTargetFollows && (i == jumpCnt - 2)) { @@ -1310,8 +1310,8 @@ bool Lowering::TryLowerSwitchToBitTest( comp->fgRemoveAllRefPreds(bbCase0, bbSwitch); // TODO: Use old edges to influence new edge likelihoods? - FlowEdge* const case0Edge = comp->fgAddRefPred(bbCase0, bbSwitch); - FlowEdge* const case1Edge = comp->fgAddRefPred(bbCase1, bbSwitch); + case0Edge = comp->fgAddRefPred(bbCase0, bbSwitch); + case1Edge = comp->fgAddRefPred(bbCase1, bbSwitch); if (bbSwitch->NextIs(bbCase0)) { diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index 44a3db6e840b59..283a1cee47295b 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -806,14 +806,10 @@ bool OptBoolsDsc::optOptimizeRangeTests() { return false; } - - // Remove the 2nd condition block as we no longer need it - m_comp->fgRemoveRefPred(m_b1->GetFalseEdge()); - m_comp->fgRemoveBlock(m_b2, true); // Re-direct firstBlock to jump to inRangeBb FlowEdge* const newEdge = m_comp->fgAddRefPred(inRangeBb, m_b1); - + if (!cmp2IsReversed) { m_b1->SetFalseEdge(m_b1->GetTrueEdge()); @@ -824,11 +820,14 @@ bool OptBoolsDsc::optOptimizeRangeTests() else { m_b1->SetFalseEdge(newEdge); - m_b1->SetTrueEdge(m_b1->GetFalseEdge()); assert(m_b1->TrueTargetIs(notInRangeBb)); assert(m_b1->FalseTargetIs(inRangeBb)); } + // Remove the 2nd condition block as we no longer need it + m_comp->fgRemoveRefPred(m_b2, m_b1); + m_comp->fgRemoveBlock(m_b2, true); + Statement* stmt = m_b1->lastStmt(); m_comp->gtSetStmtInfo(stmt); m_comp->fgSetStmtSeq(stmt); diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 6ce307a7f0413b..db1cc4c9a61107 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -597,7 +597,7 @@ void Compiler::optSetMappedBlockTargets(BasicBlock* blk, BasicBlock* newBlk, Blo if (redirectMap->Lookup(blk->GetTarget(), &newTarget)) { // newBlk needs to be redirected to a new target - newEdge = fgAddRefPred(newTarget, newblk); + newEdge = fgAddRefPred(newTarget, newBlk); } else { From fa11d835aae2fb3601aeb8c81928b3c5480f478e Mon Sep 17 00:00:00 2001 From: "Aman Khalid (from Dev Box)" Date: Tue, 27 Feb 2024 09:15:37 -0500 Subject: [PATCH 13/16] Fix JitStress --- src/coreclr/jit/block.h | 14 +++++++++----- src/coreclr/jit/clrjit.natvis | 2 +- src/coreclr/jit/fgopt.cpp | 7 ++++--- src/coreclr/jit/lower.cpp | 8 +++++--- 4 files changed, 19 insertions(+), 12 deletions(-) diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 3ea13e2fe1438d..fd83319214ec6a 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -888,11 +888,15 @@ struct BasicBlock : private LIR::Range void SetCond(FlowEdge* trueEdge, FlowEdge* falseEdge) { - assert(trueEdge != nullptr); - assert(falseEdge != nullptr); - bbKind = BBJ_COND; - bbTrueEdge = trueEdge; - bbFalseEdge = falseEdge; + bbKind = BBJ_COND; + SetTrueEdge(trueEdge); + SetFalseEdge(falseEdge); + } + + void SetCond(FlowEdge* trueEdge) + { + bbKind = BBJ_COND; + SetTrueEdge(trueEdge); } // Set both the block kind and target edge. This can clear `bbTargetEdge` when setting diff --git a/src/coreclr/jit/clrjit.natvis b/src/coreclr/jit/clrjit.natvis index f413fd316c3fa6..d87d36fcf1410b 100644 --- a/src/coreclr/jit/clrjit.natvis +++ b/src/coreclr/jit/clrjit.natvis @@ -21,7 +21,7 @@ Documentation for VS debugger format specifiers: https://docs.microsoft.com/en-u - BB{bbNum,d}->BB{bbTargetEdge->getDestinationBlock()->bbNum,d}; {bbKind,en} + BB{bbNum,d}->BB{bbTargetEdge->m_destBlock->bbNum,d}; {bbKind,en} BB{bbNum,d}; {bbKind,en}; {bbSwtTargets->bbsCount} cases BB{bbNum,d}; {bbKind,en}; {bbEhfTargets->bbeCount} succs BB{bbNum,d}; {bbKind,en} diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 5bf36cc55a157f..6112dd091a984f 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -2483,6 +2483,10 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // assert(!target->IsLast()); BasicBlock* next = fgNewBBafter(BBJ_ALWAYS, block, true); + + // The new block 'next' will inherit its weight from 'block' + // + next->inheritWeight(block); // Fix up block's flow // @@ -2492,9 +2496,6 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* FlowEdge* const falseEdge = fgAddRefPred(next, block); block->SetCond(trueEdge, falseEdge); - // The new block 'next' will inherit its weight from 'block' - // - next->inheritWeight(block); FlowEdge* const newEdge = fgAddRefPred(target->GetFalseTarget(), next); next->SetTargetEdge(newEdge); diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index f35c4441bb2cc9..e5f50cadaeb100 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -952,6 +952,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // representing the fall-through flow from originalSwitchBB. assert(originalSwitchBB->KindIs(BBJ_ALWAYS)); assert(originalSwitchBB->TargetIs(afterDefaultCondBlock)); + assert(originalSwitchBB->JumpsToNext()); assert(afterDefaultCondBlock->KindIs(BBJ_SWITCH)); assert(afterDefaultCondBlock->GetSwitchTargets()->bbsHasDefault); assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet. @@ -1096,8 +1097,8 @@ GenTree* Lowering::LowerSwitch(GenTree* node) { // Otherwise, it's a conditional branch. Set the branch kind, then add the // condition statement. - FlowEdge* const edgeToNext = comp->fgAddRefPred(currentBlock->Next(), currentBlock); - currentBlock->SetCond(newEdge, edgeToNext); + // We will set the false edge in a later iteration of the loop, or after. + currentBlock->SetCond(newEdge); // Now, build the conditional statement for the current case that is // being evaluated: @@ -1119,7 +1120,8 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // There is a fall-through to the following block. In the loop // above, we deleted all the predecessor edges from the switch. // In this case, we need to add one back. - // comp->fgAddRefPred(currentBlock->Next(), currentBlock); + FlowEdge* const falseEdge = comp->fgAddRefPred(currentBlock->Next(), currentBlock); + currentBlock->SetFalseEdge(falseEdge); } if (!fUsedAfterDefaultCondBlock) From d97a303885f4deda2b36fabc971c07bb6b9431be Mon Sep 17 00:00:00 2001 From: "Aman Khalid (from Dev Box)" Date: Tue, 27 Feb 2024 09:51:12 -0500 Subject: [PATCH 14/16] Style --- src/coreclr/jit/block.cpp | 2 +- src/coreclr/jit/block.h | 18 ++++++---- src/coreclr/jit/fgbasic.cpp | 31 +++++++--------- src/coreclr/jit/fgehopt.cpp | 6 ++-- src/coreclr/jit/fginline.cpp | 2 +- src/coreclr/jit/fgopt.cpp | 14 ++++---- src/coreclr/jit/fgprofile.cpp | 6 ++-- src/coreclr/jit/flowgraph.cpp | 8 ++--- src/coreclr/jit/helperexpansion.cpp | 39 ++++++++++----------- src/coreclr/jit/importer.cpp | 10 +++--- src/coreclr/jit/indirectcalltransformer.cpp | 2 +- src/coreclr/jit/jiteh.cpp | 4 +-- src/coreclr/jit/loopcloning.cpp | 7 ++-- src/coreclr/jit/morph.cpp | 5 ++- src/coreclr/jit/optimizebools.cpp | 2 +- src/coreclr/jit/optimizer.cpp | 2 +- 16 files changed, 73 insertions(+), 85 deletions(-) diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 35a73bfa950bd3..29c9718d9cd3f0 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -1588,7 +1588,7 @@ BasicBlock* BasicBlock::New(Compiler* compiler) BasicBlock* BasicBlock::New(Compiler* compiler, BBKinds kind) { BasicBlock* block = BasicBlock::New(compiler); - block->bbKind = kind; + block->bbKind = kind; if (block->KindIs(BBJ_THROW)) { diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index fd83319214ec6a..12f97668967088 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -692,11 +692,11 @@ struct BasicBlock : private LIR::Range /* The following union describes the jump target(s) of this block */ union { - unsigned bbTargetOffs; // PC offset (temporary only) - FlowEdge* bbTargetEdge; // successor edge for block kinds with only one successor (BBJ_ALWAYS, etc) - FlowEdge* bbTrueEdge; // BBJ_COND successor edge when its condition is true (alias for bbTargetEdge) - BBswtDesc* bbSwtTargets; // switch descriptor - BBehfDesc* bbEhfTargets; // BBJ_EHFINALLYRET descriptor + unsigned bbTargetOffs; // PC offset (temporary only) + FlowEdge* bbTargetEdge; // successor edge for block kinds with only one successor (BBJ_ALWAYS, etc) + FlowEdge* bbTrueEdge; // BBJ_COND successor edge when its condition is true (alias for bbTargetEdge) + BBswtDesc* bbSwtTargets; // switch descriptor + BBehfDesc* bbEhfTargets; // BBJ_EHFINALLYRET descriptor }; // Successor edge of a BBJ_COND block if bbTrueEdge is not taken @@ -893,6 +893,10 @@ struct BasicBlock : private LIR::Range SetFalseEdge(falseEdge); } + // In most cases, a block's true and false targets are known by the time SetCond is called. + // To simplify the few cases where the false target isn't available until later, + // overload SetCond to initialize only the true target. + // This simplifies, for example, lowering switch blocks into jump sequences. void SetCond(FlowEdge* trueEdge) { bbKind = BBJ_COND; @@ -903,7 +907,7 @@ struct BasicBlock : private LIR::Range // block kinds that don't use `bbTargetEdge`. void SetKindAndTargetEdge(BBKinds kind, FlowEdge* targetEdge = nullptr) { - bbKind = kind; + bbKind = kind; bbTargetEdge = targetEdge; // If bbKind indicates this block has a jump, bbTargetEdge cannot be null. @@ -1768,7 +1772,7 @@ struct BasicBlock : private LIR::Range // need to call a function or execute another `switch` to get them. Also, pre-compute the begin and end // points of the iteration, for use by BBArrayIterator. `m_begin` and `m_end` will either point at // `m_succs` or at the switch table successor array. - FlowEdge* m_succs[2]; + FlowEdge* m_succs[2]; FlowEdge* const* m_begin; FlowEdge* const* m_end; diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 0d9659c9710382..4ef37c9188249f 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -218,13 +218,13 @@ bool Compiler::fgEnsureFirstBBisScratch() fgFirstBB->bbRefs--; block = BasicBlock::New(this); - + // If we have profile data the new block will inherit fgFirstBlock's weight if (fgFirstBB->hasProfileWeight()) { block->inheritWeight(fgFirstBB); } - + // The new scratch bb will fall through to the old first bb FlowEdge* const edge = fgAddRefPred(fgFirstBB, block); edge->setLikelihood(1.0); @@ -234,7 +234,7 @@ bool Compiler::fgEnsureFirstBBisScratch() else { noway_assert(fgLastBB == nullptr); - block = BasicBlock::New(this, BBJ_ALWAYS); + block = BasicBlock::New(this, BBJ_ALWAYS); fgFirstBB = block; fgLastBB = block; } @@ -2974,8 +2974,8 @@ void Compiler::fgLinkBasicBlocks() { BasicBlock* const trueTarget = fgLookupBB(curBBdesc->GetTargetOffs()); BasicBlock* const falseTarget = curBBdesc->Next(); - FlowEdge* const trueEdge = fgAddRefPred(trueTarget, curBBdesc); - FlowEdge* const falseEdge = fgAddRefPred(falseTarget, curBBdesc); + FlowEdge* const trueEdge = fgAddRefPred(trueTarget, curBBdesc); + FlowEdge* const falseEdge = fgAddRefPred(falseTarget, curBBdesc); curBBdesc->SetTrueEdge(trueEdge); curBBdesc->SetFalseEdge(falseEdge); @@ -4825,12 +4825,12 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) // Default to fallthrough, and add the arc for that. FlowEdge* const newEdge = fgAddRefPred(newBlock, curr); newEdge->setLikelihood(1.0); - + // Transfer the kind and target. Do this after the code above, to avoid null-ing out the old targets used by the // above code (and so newBlock->bbNext is valid, so SetCond() can initialize the false target if newBlock is a // BBJ_COND). newBlock->TransferTarget(curr); - + curr->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); curr->SetFlags(BBF_NONE_QUIRK); assert(curr->JumpsToNext()); @@ -5456,7 +5456,7 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) if (bSrc->KindIs(BBJ_COND) && bSrc->FalseTargetIs(bDst) && !bSrc->NextIs(bDst)) { // Add a new block after bSrc which jumps to 'bDst' - jmpBlk = fgNewBBafter(BBJ_ALWAYS, bSrc, true); + jmpBlk = fgNewBBafter(BBJ_ALWAYS, bSrc, true); FlowEdge* oldEdge = bSrc->GetFalseEdge(); fgReplacePred(oldEdge, jmpBlk); jmpBlk->SetTargetEdge(oldEdge); @@ -6062,9 +6062,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r * Insert a BasicBlock before the given block. */ -BasicBlock* Compiler::fgNewBBbefore(BBKinds jumpKind, - BasicBlock* block, - bool extendRegion) +BasicBlock* Compiler::fgNewBBbefore(BBKinds jumpKind, BasicBlock* block, bool extendRegion) { // Create a new BasicBlock and chain it in @@ -6103,9 +6101,7 @@ BasicBlock* Compiler::fgNewBBbefore(BBKinds jumpKind, * Insert a BasicBlock after the given block. */ -BasicBlock* Compiler::fgNewBBafter(BBKinds jumpKind, - BasicBlock* block, - bool extendRegion) +BasicBlock* Compiler::fgNewBBafter(BBKinds jumpKind, BasicBlock* block, bool extendRegion) { // Create a new BasicBlock and chain it in @@ -6157,11 +6153,8 @@ BasicBlock* Compiler::fgNewBBafter(BBKinds jumpKind, // Notes: // The new block will have BBF_INTERNAL flag and EH region will be extended // -BasicBlock* Compiler::fgNewBBFromTreeAfter(BBKinds jumpKind, - BasicBlock* block, - GenTree* tree, - DebugInfo& debugInfo, - bool updateSideEffects /* = false */) +BasicBlock* Compiler::fgNewBBFromTreeAfter( + BBKinds jumpKind, BasicBlock* block, GenTree* tree, DebugInfo& debugInfo, bool updateSideEffects /* = false */) { BasicBlock* newBlock = fgNewBBafter(jumpKind, block, true); newBlock->SetFlags(BBF_INTERNAL); diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 18eb96c754dbc3..54a36e5c0d1697 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -170,7 +170,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() // Ref count updates. fgRemoveRefPred(currentBlock->GetTargetEdge()); FlowEdge* const newEdge = fgAddRefPred(postTryFinallyBlock, currentBlock); - + currentBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); currentBlock->RemoveFlags(BBF_RETLESS_CALL); // no longer a BBJ_CALLFINALLY @@ -1138,7 +1138,7 @@ PhaseStatus Compiler::fgCloneFinally() // Ref count updates. fgRemoveRefPred(currentBlock->GetTargetEdge()); FlowEdge* const newEdge = fgAddRefPred(firstCloneBlock, currentBlock); - + // This call returns to the expected spot, so retarget it to branch to the clone. currentBlock->RemoveFlags(BBF_RETLESS_CALL); // no longer a BBJ_CALLFINALLY currentBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); @@ -2151,7 +2151,7 @@ void Compiler::fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock, // Wire up the new flow FlowEdge* const newEdge = fgAddRefPred(canonicalBlock, predBlock, predEdge); - + if (predBlock->KindIs(BBJ_ALWAYS)) { assert(predBlock->TargetIs(nonCanonicalBlock)); diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index f1673e4c32eef1..8497e3c904164a 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -1553,7 +1553,7 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) assert(topBlock->TargetIs(bottomBlock)); FlowEdge* const oldEdge = fgRemoveRefPred(bottomBlock, topBlock); FlowEdge* const newEdge = fgAddRefPred(InlineeCompiler->fgFirstBB, topBlock, oldEdge); - + topBlock->SetNext(InlineeCompiler->fgFirstBB); topBlock->SetTargetEdge(newEdge); topBlock->SetFlags(BBF_NONE_QUIRK); diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 6112dd091a984f..ec6e0c84b91000 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -1292,7 +1292,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) /* Update the predecessor list for bNext's target */ FlowEdge* const targetEdge = bNext->GetTargetEdge(); fgReplacePred(targetEdge, block); - + block->SetKindAndTargetEdge(bNext->GetKind(), targetEdge); break; } @@ -1309,7 +1309,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) { fgReplacePred(falseEdge, block); } - + block->SetCond(trueEdge, falseEdge); break; } @@ -1567,7 +1567,7 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc // Optimize the JUMP to empty unconditional JUMP to go to the new target FlowEdge* const newEdge = fgAddRefPred(bDest->GetTarget(), block, fgRemoveRefPred(bDest, block)); - + switch (block->GetKind()) { case BBJ_ALWAYS: @@ -2483,7 +2483,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // assert(!target->IsLast()); BasicBlock* next = fgNewBBafter(BBJ_ALWAYS, block, true); - + // The new block 'next' will inherit its weight from 'block' // next->inheritWeight(block); @@ -2491,7 +2491,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // Fix up block's flow // fgRemoveRefPred(target, block); - + FlowEdge* const trueEdge = fgAddRefPred(target->GetTrueTarget(), block); FlowEdge* const falseEdge = fgAddRefPred(next, block); block->SetCond(trueEdge, falseEdge); @@ -4994,7 +4994,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication /* = false */, bool isPh if (bDest->KindIs(BBJ_COND) && !bDest->NextIs(bDest->GetFalseTarget())) { BasicBlock* const bDestFalseTarget = bDest->GetFalseTarget(); - BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true); + BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true); bFixup->inheritWeight(bDestFalseTarget); fgRemoveRefPred(bDestFalseTarget, bDest); @@ -5682,7 +5682,7 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early) { fgRemoveRefPred(commSucc, predBlock); } - + FlowEdge* const newEdge = fgAddRefPred(crossJumpTarget, predBlock); predBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); } diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index 29f0d77cd3090d..1499042cb8b42e 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -507,8 +507,7 @@ void BlockCountInstrumentor::RelocateProbes() // if (criticalPreds.Height() > 0) { - BasicBlock* const intermediary = - m_comp->fgNewBBbefore(BBJ_ALWAYS, block, /* extendRegion */ true); + BasicBlock* const intermediary = m_comp->fgNewBBbefore(BBJ_ALWAYS, block, /* extendRegion */ true); intermediary->SetFlags(BBF_IMPORTED | BBF_MARKED | BBF_NONE_QUIRK); intermediary->inheritWeight(block); FlowEdge* const newEdge = m_comp->fgAddRefPred(block, intermediary); @@ -1680,8 +1679,7 @@ void EfficientEdgeCountInstrumentor::RelocateProbes() // if (criticalPreds.Height() > 0) { - BasicBlock* intermediary = - m_comp->fgNewBBbefore(BBJ_ALWAYS, block, /* extendRegion */ true); + BasicBlock* intermediary = m_comp->fgNewBBbefore(BBJ_ALWAYS, block, /* extendRegion */ true); intermediary->SetFlags(BBF_IMPORTED | BBF_NONE_QUIRK); intermediary->inheritWeight(block); FlowEdge* const newEdge = m_comp->fgAddRefPred(block, intermediary); diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index b9cd454693fe36..b621739a41cddb 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -266,7 +266,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) // I want to create: // top -> poll -> bottom (lexically) // so that we jump over poll to get to bottom. - BasicBlock* top = block; + BasicBlock* top = block; BasicBlock* poll = fgNewBBafter(BBJ_ALWAYS, top, true); bottom = fgNewBBafter(top->GetKind(), poll, true); @@ -361,7 +361,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) // Bottom has Top and Poll as its predecessors. Poll has just Top as a predecessor. FlowEdge* const trueEdge = fgAddRefPred(bottom, top); FlowEdge* const falseEdge = fgAddRefPred(poll, top); - + FlowEdge* const newEdge = fgAddRefPred(bottom, poll); poll->SetTargetEdge(newEdge); assert(poll->JumpsToNext()); @@ -2767,8 +2767,8 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) newHead->bbRefs = 0; fgInsertBBbefore(block, newHead); // insert the new block in the block list - fgExtendEHRegionBefore(block); // Update the EH table to make the prolog block the first block in the block's EH - // block. + fgExtendEHRegionBefore(block); // Update the EH table to make the prolog block the first block in the block's EH + // block. // Distribute the pred list between newHead and block. Incoming edges coming from outside // the handler go to the prolog. Edges coming from with the handler are back-edges, and diff --git a/src/coreclr/jit/helperexpansion.cpp b/src/coreclr/jit/helperexpansion.cpp index d9801afbdafc82..d6a9ac01df9694 100644 --- a/src/coreclr/jit/helperexpansion.cpp +++ b/src/coreclr/jit/helperexpansion.cpp @@ -319,8 +319,7 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm // Fallback basic block GenTree* fallbackValueDef = gtNewStoreLclVarNode(rtLookupLcl->GetLclNum(), call); - BasicBlock* fallbackBb = - fgNewBBFromTreeAfter(BBJ_ALWAYS, nullcheckBb, fallbackValueDef, debugInfo, true); + BasicBlock* fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, nullcheckBb, fallbackValueDef, debugInfo, true); // Fast-path basic block GenTree* fastpathValueDef = gtNewStoreLclVarNode(rtLookupLcl->GetLclNum(), fastPathValueClone); @@ -373,7 +372,7 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm // Update preds in all new blocks // fgRemoveRefPred(block, prevBb); - + { FlowEdge* const newEdge = fgAddRefPred(block, fastPathBb); fastPathBb->SetTargetEdge(newEdge); @@ -396,12 +395,12 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm // sizeCheckBb flows into nullcheckBb in case if the size check passes { - FlowEdge* const trueEdge = fgAddRefPred(fallbackBb, sizeCheckBb); + FlowEdge* const trueEdge = fgAddRefPred(fallbackBb, sizeCheckBb); FlowEdge* const falseEdge = fgAddRefPred(nullcheckBb, sizeCheckBb); sizeCheckBb->SetTrueEdge(trueEdge); sizeCheckBb->SetFalseEdge(falseEdge); } - + // fallbackBb is reachable from both nullcheckBb and sizeCheckBb // fastPathBb is only reachable from successful nullcheckBb } @@ -415,7 +414,7 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm // fallbackBb is only reachable from nullcheckBb (jump destination) } - FlowEdge* const trueEdge = fgAddRefPred(fallbackBb, nullcheckBb); + FlowEdge* const trueEdge = fgAddRefPred(fallbackBb, nullcheckBb); FlowEdge* const falseEdge = fgAddRefPred(fastPathBb, nullcheckBb); nullcheckBb->SetTrueEdge(trueEdge); nullcheckBb->SetFalseEdge(falseEdge); @@ -708,11 +707,10 @@ bool Compiler::fgExpandThreadLocalAccessForCallNativeAOT(BasicBlock** pBlock, St // fallbackBb GenTree* fallbackValueDef = gtNewStoreLclVarNode(finalLclNum, slowHelper); - BasicBlock* fallbackBb = - fgNewBBFromTreeAfter(BBJ_ALWAYS, tlsRootNullCondBB, fallbackValueDef, debugInfo, true); + BasicBlock* fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, tlsRootNullCondBB, fallbackValueDef, debugInfo, true); GenTree* fastPathValueDef = gtNewStoreLclVarNode(finalLclNum, gtCloneExpr(finalLcl)); - BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, fastPathValueDef, debugInfo, true); + BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, fastPathValueDef, debugInfo, true); *callUse = finalLcl; @@ -1093,21 +1091,21 @@ bool Compiler::fgExpandThreadLocalAccessForCall(BasicBlock** pBlock, Statement* // assert(prevBb->KindIs(BBJ_ALWAYS)); fgRemoveRefPred(prevBb->GetTargetEdge()); - + { FlowEdge* const newEdge = fgAddRefPred(maxThreadStaticBlocksCondBB, prevBb); prevBb->SetTargetEdge(newEdge); } { - FlowEdge* const trueEdge = fgAddRefPred(threadStaticBlockNullCondBB, maxThreadStaticBlocksCondBB); - FlowEdge* const falseEdge = fgAddRefPred(fallbackBb, maxThreadStaticBlocksCondBB); + FlowEdge* const trueEdge = fgAddRefPred(fallbackBb, maxThreadStaticBlocksCondBB); + FlowEdge* const falseEdge = fgAddRefPred(threadStaticBlockNullCondBB, maxThreadStaticBlocksCondBB); maxThreadStaticBlocksCondBB->SetTrueEdge(trueEdge); maxThreadStaticBlocksCondBB->SetFalseEdge(falseEdge); } { - FlowEdge* const trueEdge = fgAddRefPred(fastPathBb, threadStaticBlockNullCondBB); + FlowEdge* const trueEdge = fgAddRefPred(fastPathBb, threadStaticBlockNullCondBB); FlowEdge* const falseEdge = fgAddRefPred(fallbackBb, threadStaticBlockNullCondBB); threadStaticBlockNullCondBB->SetTrueEdge(trueEdge); threadStaticBlockNullCondBB->SetFalseEdge(falseEdge); @@ -1814,7 +1812,7 @@ bool Compiler::fgVNBasedIntrinsicExpansionForCall_ReadUtf8(BasicBlock** pBlock, lengthCheckBb->SetTrueEdge(trueEdge); lengthCheckBb->SetFalseEdge(falseEdge); } - + { // fastpathBb flows into block FlowEdge* const newEdge = fgAddRefPred(block, fastpathBb); @@ -2389,8 +2387,8 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, // it's too late to rely on upstream phases to do this for us (unless we do optRepeat). GenTree* nullcheckOp = gtNewOperNode(GT_EQ, TYP_INT, tmpNode, gtNewNull()); nullcheckOp->gtFlags |= GTF_RELOP_JMP_USED; - BasicBlock* nullcheckBb = fgNewBBFromTreeAfter(BBJ_COND, firstBb, gtNewOperNode(GT_JTRUE, TYP_VOID, nullcheckOp), - debugInfo, true); + BasicBlock* nullcheckBb = + fgNewBBFromTreeAfter(BBJ_COND, firstBb, gtNewOperNode(GT_JTRUE, TYP_VOID, nullcheckOp), debugInfo, true); // The very first statement in the whole expansion is to assign obj to tmp. // We assume it's the value we're going to return in most cases. @@ -2458,7 +2456,7 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, { // if fallback call is not needed, we just assign null to tmp GenTree* fallbackTree = gtNewTempStore(tmpNum, gtNewNull()); - fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, lastTypeCheckBb, fallbackTree, debugInfo, true); + fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, lastTypeCheckBb, fallbackTree, debugInfo, true); } else { @@ -2469,7 +2467,7 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, call->gtCallMethHnd = eeFindHelper(CORINFO_HELP_CHKCASTCLASS_SPECIAL); } GenTree* fallbackTree = gtNewTempStore(tmpNum, call); - fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, lastTypeCheckBb, fallbackTree, debugInfo, true); + fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, lastTypeCheckBb, fallbackTree, debugInfo, true); } // Block 4: typeCheckSucceedBb @@ -2484,8 +2482,7 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, typeCheckSucceedTree = gtNewNothingNode(); } BasicBlock* typeCheckSucceedBb = - typeCheckNotNeeded ? nullptr - : fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, typeCheckSucceedTree, debugInfo); + typeCheckNotNeeded ? nullptr : fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, typeCheckSucceedTree, debugInfo); // // Wire up the blocks @@ -2516,7 +2513,7 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt, } fgRemoveRefPred(firstBb->GetTargetEdge()); - + { FlowEdge* const newEdge = fgAddRefPred(nullcheckBb, firstBb); firstBb->SetTargetEdge(newEdge); diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 3fa16a6abdd8c8..d6d5560e566021 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -4691,7 +4691,7 @@ void Compiler::impImportLeave(BasicBlock* block) { fgRemoveRefPred(step->GetTarget(), step); } - + FlowEdge* const newEdge = fgAddRefPred(exitBlock, step); newEdge->setLikelihood(1.0); step->SetTargetEdge(newEdge); // the previous step (maybe a call to a nested finally, or a nested catch @@ -4730,8 +4730,7 @@ void Compiler::impImportLeave(BasicBlock* block) (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; - callBlock = - fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block); + callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block); // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE, @@ -4762,7 +4761,7 @@ void Compiler::impImportLeave(BasicBlock* block) assert(callBlock->HasInitializedTarget()); fgRemoveRefPred(callBlock->GetTargetEdge()); - // callBlock will call the finally handler. This will be set up later. +// callBlock will call the finally handler. This will be set up later. #ifdef DEBUG if (verbose) @@ -4843,8 +4842,7 @@ void Compiler::impImportLeave(BasicBlock* block) assert((step == block) || !step->HasInitializedTarget()); // callBlock will call the finally handler - callBlock = - fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step); + callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step); if (step == block) { fgRemoveRefPred(step->GetTargetEdge()); diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index a6221664925082..0e98b7d44b472c 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -1036,7 +1036,7 @@ class IndirectCallTransformer checkBlock->SetTargetEdge(thenEdge); checkBlock->SetFlags(BBF_NONE_QUIRK); assert(checkBlock->JumpsToNext()); - + FlowEdge* const elseEdge = compiler->fgAddRefPred(remainderBlock, thenBlock); elseEdge->setLikelihood(elseLikelihoodWt); thenBlock->SetTargetEdge(elseEdge); diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index d2223f46150a06..e7195b5c98cb2b 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -4341,8 +4341,8 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) #ifdef DEBUG if (verbose) { - printf("EH#%u: Updating target for filter ret block: " FMT_BB " => " FMT_BB "\n", - ehGetIndex(HBtab), bFilterLast->bbNum, bPrev->bbNum); + printf("EH#%u: Updating target for filter ret block: " FMT_BB " => " FMT_BB "\n", ehGetIndex(HBtab), + bFilterLast->bbNum, bPrev->bbNum); } #endif // DEBUG // Change the target for bFilterLast from the old first 'block' to the new first 'bPrev' diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index 35e1547e6ee636..c6d37dc507b95a 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -1962,8 +1962,7 @@ void Compiler::optCloneLoop(FlowGraphNaturalLoop* loop, LoopCloneContext* contex // Make a new pre-header block for the fast loop. JITDUMP("Create new preheader block for fast loop\n"); - BasicBlock* fastPreheader = - fgNewBBafter(BBJ_ALWAYS, preheader, /*extendRegion*/ true); + BasicBlock* fastPreheader = fgNewBBafter(BBJ_ALWAYS, preheader, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " after " FMT_BB "\n", fastPreheader->bbNum, preheader->bbNum); fastPreheader->bbWeight = fastPreheader->isRunRarely() ? BB_ZERO_WEIGHT : ambientWeight; @@ -2058,12 +2057,12 @@ void Compiler::optCloneLoop(FlowGraphNaturalLoop* loop, LoopCloneContext* contex // Now redirect the old preheader to jump to the first new condition that // was inserted by the above function. assert(preheader->KindIs(BBJ_ALWAYS)); - + { FlowEdge* const newEdge = fgAddRefPred(preheader->Next(), preheader); preheader->SetTargetEdge(newEdge); } - + preheader->SetFlags(BBF_NONE_QUIRK); // And make sure we insert a pred link for the final fallthrough into the fast preheader. diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 54f61fe64fc55f..e224b5d58409a2 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -14647,11 +14647,10 @@ bool Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) FlowEdge* const newEdge = fgAddRefPred(remainderBlock, elseBlock); elseBlock->SetTargetEdge(newEdge); } - + assert(condBlock->JumpsToNext()); assert(elseBlock->JumpsToNext()); - condBlock->SetFlags(propagateFlagsToAll | BBF_NONE_QUIRK); elseBlock->SetFlags(propagateFlagsToAll | BBF_NONE_QUIRK); @@ -14678,7 +14677,7 @@ bool Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) FlowEdge* const newEdge = fgAddRefPred(remainderBlock, thenBlock); thenBlock->SetTargetEdge(newEdge); - + assert(condBlock->TargetIs(elseBlock)); FlowEdge* const falseEdge = fgAddRefPred(thenBlock, condBlock); condBlock->SetCond(condBlock->GetTargetEdge(), falseEdge); diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index 283a1cee47295b..713d9f17c58344 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -806,7 +806,7 @@ bool OptBoolsDsc::optOptimizeRangeTests() { return false; } - + // Re-direct firstBlock to jump to inRangeBb FlowEdge* const newEdge = m_comp->fgAddRefPred(inRangeBb, m_b1); diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 4e42da1dbb2524..31744e078d3318 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -2201,7 +2201,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) // Update pred info // - FlowEdge* const trueEdge = fgAddRefPred(bJoin, bNewCond); + FlowEdge* const trueEdge = fgAddRefPred(bJoin, bNewCond); FlowEdge* const falseEdge = fgAddRefPred(bTop, bNewCond); bNewCond->SetTrueEdge(trueEdge); bNewCond->SetFalseEdge(falseEdge); From 54918f63d512a5f748a98495864c7e578613573d Mon Sep 17 00:00:00 2001 From: "Aman Khalid (from Dev Box)" Date: Tue, 27 Feb 2024 10:12:37 -0500 Subject: [PATCH 15/16] Comments --- src/coreclr/jit/block.cpp | 5 +++++ src/coreclr/jit/fgbasic.cpp | 3 +-- src/coreclr/jit/fgflow.cpp | 5 +++++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 29c9718d9cd3f0..75def5b0e6a45e 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -857,6 +857,11 @@ void BasicBlock::TransferTarget(BasicBlock* from) SetEhf(from->GetEhfTargets()); from->bbEhfTargets = nullptr; // Make sure nobody uses the descriptor after this. break; + + // TransferTarget may be called after setting the source block of `from`'s + // successor edges to this block. + // This means calling GetTarget/GetTrueTarget/GetFalseTarget would trigger asserts. + // Avoid this by accessing the edges directly. case BBJ_COND: SetCond(from->bbTrueEdge, from->bbFalseEdge); break; diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 4ef37c9188249f..625b306e600237 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -4827,8 +4827,7 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) newEdge->setLikelihood(1.0); // Transfer the kind and target. Do this after the code above, to avoid null-ing out the old targets used by the - // above code (and so newBlock->bbNext is valid, so SetCond() can initialize the false target if newBlock is a - // BBJ_COND). + // above code. newBlock->TransferTarget(curr); curr->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge); diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp index 14de412b225fe1..0f1192288152b7 100644 --- a/src/coreclr/jit/fgflow.cpp +++ b/src/coreclr/jit/fgflow.cpp @@ -214,6 +214,11 @@ FlowEdge* Compiler::fgAddRefPred(BasicBlock* block, BasicBlock* blockPred, FlowE // When initializing preds, ensure edge likelihood is set, // such that this edge is as likely as any other successor edge + // Note: We probably shouldn't call NumSucc on a new BBJ_COND block. + // NumSucc compares bbTrueEdge and bbFalseEdge to determine if this BBJ_COND block has only one successor, + // but these members are uninitialized. Aside from the fact that this compares uninitialized memory, + // we don't know if the true and false targets are the same in NumSucc until both edges exist. + // TODO: Move this edge likelihood logic to fgLinkBasicBlocks. // const unsigned numSucc = blockPred->NumSucc(); assert(numSucc > 0); From 24997f919db1f446b249a56d26fc2a88e3461d94 Mon Sep 17 00:00:00 2001 From: "Aman Khalid (from Dev Box)" Date: Tue, 27 Feb 2024 10:41:04 -0500 Subject: [PATCH 16/16] Remove overzealous assert --- src/coreclr/jit/importer.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index d6d5560e566021..808d879e268e31 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -4486,7 +4486,6 @@ void Compiler::impImportLeave(BasicBlock* block) unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel; assert(finallyNesting <= compHndBBtabCount); - assert(!callBlock->HasInitializedTarget()); FlowEdge* const newEdge = fgAddRefPred(HBtab->ebdHndBeg, callBlock); newEdge->setLikelihood(1.0); callBlock->SetKindAndTargetEdge(BBJ_CALLFINALLY, newEdge);