diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 58b91b739fe424..273c6f045123df 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -722,12 +722,12 @@ void BasicBlock::dspKind() const { printf(" ->"); - const unsigned jumpCnt = bbSwtTargets->bbsCount; - BasicBlock** const jumpTab = bbSwtTargets->bbsDstTab; + const unsigned jumpCnt = bbSwtTargets->bbsCount; + FlowEdge** const jumpTab = bbSwtTargets->bbsDstTab; for (unsigned i = 0; i < jumpCnt; i++) { - printf("%c%s", (i == 0) ? ' ' : ',', dspBlockNum(jumpTab[i])); + printf("%c%s", (i == 0) ? ' ' : ',', dspBlockNum(jumpTab[i]->getDestinationBlock())); const bool isDefault = bbSwtTargets->bbsHasDefault && (i == jumpCnt - 1); if (isDefault) @@ -1217,7 +1217,7 @@ BasicBlock* BasicBlock::GetSucc(unsigned i) const return bbEhfTargets->bbeSuccs[i]->getDestinationBlock(); case BBJ_SWITCH: - return bbSwtTargets->bbsDstTab[i]; + return bbSwtTargets->bbsDstTab[i]->getDestinationBlock(); default: unreached(); @@ -1774,7 +1774,7 @@ BBswtDesc::BBswtDesc(Compiler* comp, const BBswtDesc* other) { // Allocate and fill in a new dst tab // - bbsDstTab = new (comp, CMK_BasicBlock) BasicBlock*[bbsCount]; + bbsDstTab = new (comp, CMK_FlowEdge) FlowEdge*[bbsCount]; for (unsigned i = 0; i < bbsCount; i++) { bbsDstTab[i] = other->bbsDstTab[i]; diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 09fc1c71c35638..208eb07e583fa0 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -1840,8 +1840,8 @@ class BasicBlockRangeList // struct BBswtDesc { - BasicBlock** bbsDstTab; // case label table address - unsigned bbsCount; // count of cases (includes 'default' if bbsHasDefault) + FlowEdge** bbsDstTab; // case label table address + unsigned bbsCount; // count of cases (includes 'default' if bbsHasDefault) // Case number and likelihood of most likely case // (only known with PGO, only valid if bbsHasDominantCase is true) @@ -1867,7 +1867,7 @@ struct BBswtDesc bbsCount--; } - BasicBlock* getDefault() + FlowEdge* getDefault() { assert(bbsHasDefault); assert(bbsCount > 0); @@ -1994,8 +1994,10 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) // We don't use the m_succs in-line data for switches; use the existing jump table in the block. assert(block->bbSwtTargets != nullptr); assert(block->bbSwtTargets->bbsDstTab != nullptr); - m_begin = block->bbSwtTargets->bbsDstTab; - m_end = block->bbSwtTargets->bbsDstTab + block->bbSwtTargets->bbsCount; + m_beginEdge = block->bbSwtTargets->bbsDstTab; + m_endEdge = block->bbSwtTargets->bbsDstTab + block->bbSwtTargets->bbsCount; + + iterateEdges = true; break; default: diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp index 4a8c08a89858e8..eefee0e6912cf9 100644 --- a/src/coreclr/jit/codegenarm.cpp +++ b/src/coreclr/jit/codegenarm.cpp @@ -650,9 +650,9 @@ void CodeGen::genJumpTable(GenTree* treeNode) noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); - unsigned jumpCount = compiler->compCurBB->GetSwitchTargets()->bbsCount; - BasicBlock** jumpTable = compiler->compCurBB->GetSwitchTargets()->bbsDstTab; - unsigned jmpTabBase; + unsigned jumpCount = compiler->compCurBB->GetSwitchTargets()->bbsCount; + FlowEdge** jumpTable = compiler->compCurBB->GetSwitchTargets()->bbsDstTab; + unsigned jmpTabBase; jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, false); @@ -660,7 +660,8 @@ void CodeGen::genJumpTable(GenTree* treeNode) for (unsigned i = 0; i < jumpCount; i++) { - BasicBlock* target = *jumpTable++; + BasicBlock* target = (*jumpTable)->getDestinationBlock(); + jumpTable++; noway_assert(target->HasFlag(BBF_HAS_LABEL)); JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum); diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index 3883b20118ad8a..4587bace1697ab 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -3753,10 +3753,10 @@ void CodeGen::genJumpTable(GenTree* treeNode) noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); - unsigned jumpCount = compiler->compCurBB->GetSwitchTargets()->bbsCount; - BasicBlock** jumpTable = compiler->compCurBB->GetSwitchTargets()->bbsDstTab; - unsigned jmpTabOffs; - unsigned jmpTabBase; + unsigned jumpCount = compiler->compCurBB->GetSwitchTargets()->bbsCount; + FlowEdge** jumpTable = compiler->compCurBB->GetSwitchTargets()->bbsDstTab; + unsigned jmpTabOffs; + unsigned jmpTabBase; jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, true); @@ -3766,7 +3766,8 @@ void CodeGen::genJumpTable(GenTree* treeNode) for (unsigned i = 0; i < jumpCount; i++) { - BasicBlock* target = *jumpTable++; + BasicBlock* target = (*jumpTable)->getDestinationBlock(); + jumpTable++; noway_assert(target->HasFlag(BBF_HAS_LABEL)); JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum); diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index 41266917205a52..c850dad2ce765f 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -2930,10 +2930,10 @@ void CodeGen::genJumpTable(GenTree* treeNode) noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); - unsigned jumpCount = compiler->compCurBB->GetSwitchTargets()->bbsCount; - BasicBlock** jumpTable = compiler->compCurBB->GetSwitchTargets()->bbsDstTab; - unsigned jmpTabOffs; - unsigned jmpTabBase; + unsigned jumpCount = compiler->compCurBB->GetSwitchTargets()->bbsCount; + FlowEdge** jumpTable = compiler->compCurBB->GetSwitchTargets()->bbsDstTab; + unsigned jmpTabOffs; + unsigned jmpTabBase; jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, true); @@ -2943,7 +2943,8 @@ void CodeGen::genJumpTable(GenTree* treeNode) for (unsigned i = 0; i < jumpCount; i++) { - BasicBlock* target = *jumpTable++; + BasicBlock* target = (*jumpTable)->getDestinationBlock(); + jumpTable++; noway_assert(target->HasFlag(BBF_HAS_LABEL)); JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum); diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index a468c026c22cbd..6d5a9a195024f3 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -2852,10 +2852,10 @@ void CodeGen::genJumpTable(GenTree* treeNode) noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); - unsigned jumpCount = compiler->compCurBB->GetSwitchTargets()->bbsCount; - BasicBlock** jumpTable = compiler->compCurBB->GetSwitchTargets()->bbsDstTab; - unsigned jmpTabOffs; - unsigned jmpTabBase; + unsigned jumpCount = compiler->compCurBB->GetSwitchTargets()->bbsCount; + FlowEdge** jumpTable = compiler->compCurBB->GetSwitchTargets()->bbsDstTab; + unsigned jmpTabOffs; + unsigned jmpTabBase; jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, true); @@ -2865,7 +2865,7 @@ void CodeGen::genJumpTable(GenTree* treeNode) for (unsigned i = 0; i < jumpCount; i++) { - BasicBlock* target = *jumpTable++; + BasicBlock* target = (*jumpTable)->getDestinationBlock(); noway_assert(target->HasFlag(BBF_HAS_LABEL)); JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum); diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index b877262b1583d8..2aba181486435b 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -4356,10 +4356,10 @@ void CodeGen::genJumpTable(GenTree* treeNode) noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); - unsigned jumpCount = compiler->compCurBB->GetSwitchTargets()->bbsCount; - BasicBlock** jumpTable = compiler->compCurBB->GetSwitchTargets()->bbsDstTab; - unsigned jmpTabOffs; - unsigned jmpTabBase; + unsigned jumpCount = compiler->compCurBB->GetSwitchTargets()->bbsCount; + FlowEdge** jumpTable = compiler->compCurBB->GetSwitchTargets()->bbsDstTab; + unsigned jmpTabOffs; + unsigned jmpTabBase; jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, true); @@ -4369,7 +4369,8 @@ void CodeGen::genJumpTable(GenTree* treeNode) for (unsigned i = 0; i < jumpCount; i++) { - BasicBlock* target = *jumpTable++; + BasicBlock* target = (*jumpTable)->getDestinationBlock(); + jumpTable++; noway_assert(target->HasFlag(BBF_HAS_LABEL)); JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum); diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index c850b8f76176a2..c20eb8188f9bbc 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -6862,7 +6862,7 @@ class Compiler bool optExtractInitTestIncr( BasicBlock** pInitBlock, BasicBlock* bottom, BasicBlock* top, GenTree** ppInit, GenTree** ppTest, GenTree** ppIncr); - void optRedirectBlock(BasicBlock* blk, + void optSetMappedBlockTargets(BasicBlock* blk, BasicBlock* newBlk, BlockToBlockMap* redirectMap); diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 3800e2ffe89d94..3e583d74500529 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -382,36 +382,26 @@ void Compiler::fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSw assert(fgPredsComputed); // Walk the switch's jump table, updating the predecessor for each branch. - for (BasicBlock* const bJump : oldSwitchBlock->SwitchTargets()) - { - noway_assert(bJump != nullptr); - - // Note that if there are duplicate branch targets in the switch jump table, - // fgRemoveRefPred()/fgAddRefPred() will do the right thing: the second and - // subsequent duplicates will simply subtract from and add to the duplicate - // count (respectively). - // - // However this does the "wrong" thing with respect to edge profile - // data; the old edge is not returned by fgRemoveRefPred until it has - // a dup count of 0, and the fgAddRefPred only uses the optional - // old edge arg when the new edge is first created. - // - // Remove the old edge [oldSwitchBlock => bJump] - // - assert(bJump->countOfInEdges() > 0); - FlowEdge* const oldEdge = fgRemoveRefPred(bJump, oldSwitchBlock); + BBswtDesc* swtDesc = oldSwitchBlock->GetSwitchTargets(); - // - // Create the new edge [newSwitchBlock => bJump] - // - FlowEdge* const newEdge = fgAddRefPred(bJump, newSwitchBlock); + for (unsigned i = 0; i < swtDesc->bbsCount; i++) + { + FlowEdge* succEdge = swtDesc->bbsDstTab[i]; + assert(succEdge != nullptr); - // Handle the profile update, once we get our hands on the old edge. - // - if (oldEdge != nullptr) + if (succEdge->getSourceBlock() != oldSwitchBlock) { - assert(!newEdge->hasLikelihood()); - newEdge->setLikelihood(oldEdge->getLikelihood()); + // swtDesc can have duplicate targets, so we may have updated this edge already + // + assert(succEdge->getSourceBlock() == newSwitchBlock); + assert(succEdge->getDupCount() > 1); + } + else + { + // Redirect edge's source block from oldSwitchBlock to newSwitchBlock, + // and keep successor block's pred list in order + // + fgReplacePred(succEdge, newSwitchBlock); } } @@ -709,29 +699,17 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* oldTarget, Bas case BBJ_SWITCH: { - unsigned const jumpCnt = block->GetSwitchTargets()->bbsCount; - BasicBlock** const jumpTab = block->GetSwitchTargets()->bbsDstTab; - bool changed = false; + unsigned const jumpCnt = block->GetSwitchTargets()->bbsCount; + FlowEdge** const jumpTab = block->GetSwitchTargets()->bbsDstTab; + bool changed = false; for (unsigned i = 0; i < jumpCnt; i++) { - if (jumpTab[i] == oldTarget) + if (jumpTab[i]->getDestinationBlock() == oldTarget) { - jumpTab[i] = newTarget; - changed = true; - FlowEdge* const oldEdge = fgRemoveRefPred(oldTarget, block); - FlowEdge* const newEdge = fgAddRefPred(newTarget, block, oldEdge); - - // Handle the profile update, once we get our hands on the old edge. - // (see notes in fgChangeSwitchBlock for why this extra step is necessary) - // - // We do it slightly differently here so we don't lose the old - // edge weight propagation that would sometimes happen - // - if ((oldEdge != nullptr) && !newEdge->hasLikelihood()) - { - newEdge->setLikelihood(oldEdge->getLikelihood()); - } + fgRemoveRefPred(jumpTab[i]); + jumpTab[i] = fgAddRefPred(newTarget, block, jumpTab[i]); + changed = true; } } @@ -3041,23 +3019,23 @@ void Compiler::fgLinkBasicBlocks() case BBJ_SWITCH: { - unsigned jumpCnt = curBBdesc->GetSwitchTargets()->bbsCount; - BasicBlock** jumpPtr = curBBdesc->GetSwitchTargets()->bbsDstTab; + unsigned jumpCnt = curBBdesc->GetSwitchTargets()->bbsCount; + FlowEdge** jumpPtr = curBBdesc->GetSwitchTargets()->bbsDstTab; do { - BasicBlock* jumpDest = fgLookupBB((unsigned)*(size_t*)jumpPtr); - *jumpPtr = jumpDest; - fgAddRefPred(jumpDest, curBBdesc); - if ((*jumpPtr)->bbNum <= curBBdesc->bbNum) + BasicBlock* jumpDest = fgLookupBB((unsigned)*(size_t*)jumpPtr); + FlowEdge* const newEdge = fgAddRefPred(jumpDest, curBBdesc); + *jumpPtr = newEdge; + if (jumpDest->bbNum <= curBBdesc->bbNum) { - fgMarkBackwardJump(*jumpPtr, curBBdesc); + fgMarkBackwardJump(jumpDest, curBBdesc); } } while (++jumpPtr, --jumpCnt); /* Default case of CEE_SWITCH (next block), is at end of jumpTab[] */ - noway_assert(curBBdesc->NextIs(*(jumpPtr - 1))); + noway_assert(curBBdesc->NextIs((*(jumpPtr - 1))->getDestinationBlock())); break; } @@ -3220,8 +3198,8 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F unsigned jmpBase; unsigned jmpCnt; // # of switch cases (excluding default) - BasicBlock** jmpTab; - BasicBlock** jmpPtr; + FlowEdge** jmpTab; + FlowEdge** jmpPtr; /* Allocate the switch descriptor */ @@ -3238,7 +3216,7 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F /* Allocate the jump table */ - jmpPtr = jmpTab = new (this, CMK_BasicBlock) BasicBlock*[jmpCnt + 1]; + jmpPtr = jmpTab = new (this, CMK_FlowEdge) FlowEdge*[jmpCnt + 1]; /* Fill in the jump table */ @@ -3248,12 +3226,12 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F codeAddr += 4; // store the offset in the pointer. We change these in fgLinkBasicBlocks(). - *jmpPtr++ = (BasicBlock*)(size_t)(jmpBase + jmpDist); + *jmpPtr++ = (FlowEdge*)(size_t)(jmpBase + jmpDist); } /* Append the default label to the target table */ - *jmpPtr++ = (BasicBlock*)(size_t)jmpBase; + *jmpPtr++ = (FlowEdge*)(size_t)jmpBase; /* Make sure we found the right number of labels */ diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index 900a58fb404033..6a2068169e1a54 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -1101,7 +1101,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) { fprintf(fgxFile, "\n switchCases=\"%d\"", edge->getDupCount()); } - if (bSource->GetSwitchTargets()->getDefault() == bTarget) + if (bSource->GetSwitchTargets()->getDefault()->getDestinationBlock() == bTarget) { fprintf(fgxFile, "\n switchDefault=\"true\""); } @@ -2066,12 +2066,12 @@ void Compiler::fgTableDispBasicBlock(const BasicBlock* block, const BBswtDesc* const jumpSwt = block->GetSwitchTargets(); const unsigned jumpCnt = jumpSwt->bbsCount; - BasicBlock** const jumpTab = jumpSwt->bbsDstTab; + FlowEdge** const jumpTab = jumpSwt->bbsDstTab; for (unsigned i = 0; i < jumpCnt; i++) { printedBlockWidth += 1 /* space/comma */; - printf("%c%s", (i == 0) ? ' ' : ',', dspBlockNum(jumpTab[i])); + printf("%c%s", (i == 0) ? ' ' : ',', dspBlockNum(jumpTab[i]->getDestinationBlock())); const bool isDefault = jumpSwt->bbsHasDefault && (i == jumpCnt - 1); if (isDefault) diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 4c9a6e3b1dab10..4c781cbc0c222c 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -1099,7 +1099,7 @@ PhaseStatus Compiler::fgCloneFinally() } else { - optRedirectBlock(block, newBlock, &blockMap); + optSetMappedBlockTargets(block, newBlock, &blockMap); } } diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp index a7766b64cae6bb..4359449fde46fc 100644 --- a/src/coreclr/jit/fgflow.cpp +++ b/src/coreclr/jit/fgflow.cpp @@ -459,11 +459,14 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) break; case BBJ_SWITCH: - for (BasicBlock* const bTarget : block->SwitchTargets()) + { + BBswtDesc* const swtDesc = block->GetSwitchTargets(); + for (unsigned i = 0; i < swtDesc->bbsCount; i++) { - fgRemoveRefPred(bTarget, block); + fgRemoveRefPred(swtDesc->bbsDstTab[i]); } break; + } default: noway_assert(!"Block doesn't have a valid bbKind!!!!"); diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index acce2402044762..f0030bc1c0ec67 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -1807,16 +1807,16 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) { assert(block->KindIs(BBJ_SWITCH)); - unsigned jmpCnt = block->GetSwitchTargets()->bbsCount; - BasicBlock** jmpTab = block->GetSwitchTargets()->bbsDstTab; - BasicBlock* bNewDest; // the new jump target for the current switch case - BasicBlock* bDest; - bool returnvalue = false; + unsigned jmpCnt = block->GetSwitchTargets()->bbsCount; + FlowEdge** jmpTab = block->GetSwitchTargets()->bbsDstTab; + BasicBlock* bNewDest; // the new jump target for the current switch case + BasicBlock* bDest; + bool returnvalue = false; do { REPEAT_SWITCH:; - bDest = *jmpTab; + bDest = (*jmpTab)->getDestinationBlock(); bNewDest = bDest; // Do we have a JUMP to an empty unconditional JUMP block? @@ -1871,14 +1871,13 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) } } - // Update the switch jump table - *jmpTab = bNewDest; + // Update the switch jump table (this has to happen before calling UpdateSwitchTableTarget) + FlowEdge* const newEdge = fgAddRefPred(bNewDest, block, fgRemoveRefPred(bDest, block)); + *jmpTab = newEdge; // Maintain, if necessary, the set of unique targets of "block." UpdateSwitchTableTarget(block, bDest, bNewDest); - fgAddRefPred(bNewDest, block, fgRemoveRefPred(bDest, block)); - // we optimized a Switch label - goto REPEAT_SWITCH to follow this new jump returnvalue = true; @@ -1996,18 +1995,16 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) } // Change the switch jump into a BBJ_ALWAYS - block->SetKindAndTarget(BBJ_ALWAYS, block->GetSwitchTargets()->bbsDstTab[0]); - if (jmpCnt > 1) + block->SetKindAndTarget(BBJ_ALWAYS, block->GetSwitchTargets()->bbsDstTab[0]->getDestinationBlock()); + for (unsigned i = 1; i < jmpCnt; ++i) { - for (unsigned i = 1; i < jmpCnt; ++i) - { - (void)fgRemoveRefPred(jmpTab[i], block); - } + fgRemoveRefPred(jmpTab[i]->getDestinationBlock(), block); } return true; } - else if ((block->GetSwitchTargets()->bbsCount == 2) && block->NextIs(block->GetSwitchTargets()->bbsDstTab[1])) + else if ((block->GetSwitchTargets()->bbsCount == 2) && + block->NextIs(block->GetSwitchTargets()->bbsDstTab[1]->getDestinationBlock())) { /* Use a BBJ_COND(switchVal==0) for a switch with only one significant clause besides the default clause, if the @@ -2060,7 +2057,9 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) fgSetStmtSeq(switchStmt); } - block->SetCond(block->GetSwitchTargets()->bbsDstTab[0], block->GetSwitchTargets()->bbsDstTab[1]); + BasicBlock* const trueTarget = block->GetSwitchTargets()->bbsDstTab[0]->getDestinationBlock(); + BasicBlock* const falseTarget = block->GetSwitchTargets()->bbsDstTab[1]->getDestinationBlock(); + block->SetCond(trueTarget, falseTarget); JITDUMP("After:\n"); DISPNODE(switchTree); @@ -2999,7 +2998,7 @@ bool Compiler::fgOptimizeSwitchJumps() // The dominant case should not be the default case, as we already peel that one. // assert(dominantCase < (block->GetSwitchTargets()->bbsCount - 1)); - BasicBlock* const dominantTarget = block->GetSwitchTargets()->bbsDstTab[dominantCase]; + BasicBlock* const dominantTarget = block->GetSwitchTargets()->bbsDstTab[dominantCase]->getDestinationBlock(); Statement* const switchStmt = block->lastStmt(); GenTree* const switchTree = switchStmt->GetRootNode(); assert(switchTree->OperIs(GT_SWITCH)); diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index 9e29cc25792734..02a4c22d0aade1 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -4231,18 +4231,19 @@ void EfficientEdgeCountReconstructor::MarkInterestingSwitches(BasicBlock* block, // If it turns out often we fail at this stage, we might consider building a histogram of switch case // values at runtime, similar to what we do for classes at virtual call sites. // - const unsigned caseCount = block->GetSwitchTargets()->bbsCount; - BasicBlock** const jumpTab = block->GetSwitchTargets()->bbsDstTab; - unsigned dominantCase = caseCount; + const unsigned caseCount = block->GetSwitchTargets()->bbsCount; + FlowEdge** const jumpTab = block->GetSwitchTargets()->bbsDstTab; + unsigned dominantCase = caseCount; for (unsigned i = 0; i < caseCount; i++) { - if (jumpTab[i] == dominantEdge->m_targetBlock) + BasicBlock* jumpTarget = jumpTab[i]->getDestinationBlock(); + if (jumpTarget == dominantEdge->m_targetBlock) { if (dominantCase != caseCount) { JITDUMP("Both case %u and %u lead to " FMT_BB "-- can't optimize\n", i, dominantCase, - jumpTab[i]->bbNum); + jumpTarget->bbNum); dominantCase = caseCount; break; } diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 1c40239b35beeb..c393648c843ef4 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -5603,8 +5603,8 @@ void FlowGraphNaturalLoop::Duplicate(BasicBlock** insertAfter, BlockToBlockMap* assert(!newBlk->HasInitializedTarget()); // Redirect the new block according to "blockMap". - // opRedirectBlock will set newBlk's successors, and add pred edges for the successors. - comp->optRedirectBlock(blk, newBlk, map); + // optSetMappedBlockTargets will set newBlk's successors, and add pred edges for the successors. + comp->optSetMappedBlockTargets(blk, newBlk, map); return BasicBlockVisit::Continue; }); diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 196054a044a50b..9ac78b22bb5c3d 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -7667,16 +7667,16 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (opts.OptimizationEnabled() && (op1->gtOper == GT_CNS_INT)) { // Find the jump target - size_t switchVal = (size_t)op1->AsIntCon()->gtIconVal; - unsigned jumpCnt = block->GetSwitchTargets()->bbsCount; - BasicBlock** jumpTab = block->GetSwitchTargets()->bbsDstTab; - bool foundVal = false; + size_t switchVal = (size_t)op1->AsIntCon()->gtIconVal; + unsigned jumpCnt = block->GetSwitchTargets()->bbsCount; + FlowEdge** jumpTab = block->GetSwitchTargets()->bbsDstTab; + bool foundVal = false; for (unsigned val = 0; val < jumpCnt; val++, jumpTab++) { - BasicBlock* curJump = *jumpTab; + FlowEdge* curEdge = *jumpTab; - assert(curJump->countOfInEdges() > 0); + assert(curEdge->getDestinationBlock()->countOfInEdges() > 0); // If val matches switchVal or we are at the last entry and // we never found the switch value then set the new jump dest @@ -7684,13 +7684,13 @@ void Compiler::impImportBlockCode(BasicBlock* block) if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1))) { // transform the basic block into a BBJ_ALWAYS - block->SetKindAndTarget(BBJ_ALWAYS, curJump); + block->SetKindAndTarget(BBJ_ALWAYS, curEdge->getDestinationBlock()); foundVal = true; } else { - // Remove 'block' from the predecessor list of 'curJump' - fgRemoveRefPred(curJump, block); + // Remove 'curEdge' + fgRemoveRefPred(curEdge); } } diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 591db3a78a22c0..2d699eb6bf81ce 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -827,10 +827,6 @@ GenTree* Lowering::LowerArrLength(GenTreeArrCommon* node) GenTree* Lowering::LowerSwitch(GenTree* node) { - unsigned jumpCnt; - unsigned targetCnt; - BasicBlock** jumpTab; - assert(node->gtOper == GT_SWITCH); // The first step is to build the default case conditional construct that is @@ -844,9 +840,9 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // jumpCnt is the number of elements in the jump table array. // jumpTab is the actual pointer to the jump table array. // targetCnt is the number of unique targets in the jump table array. - jumpCnt = originalSwitchBB->GetSwitchTargets()->bbsCount; - jumpTab = originalSwitchBB->GetSwitchTargets()->bbsDstTab; - targetCnt = originalSwitchBB->NumSucc(comp); + const unsigned jumpCnt = originalSwitchBB->GetSwitchTargets()->bbsCount; + FlowEdge** const jumpTab = originalSwitchBB->GetSwitchTargets()->bbsDstTab; + const unsigned targetCnt = originalSwitchBB->NumSucc(comp); // GT_SWITCH must be a top-level node with no use. #ifdef DEBUG @@ -865,7 +861,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) { JITDUMP("Lowering switch " FMT_BB ": single target; converting to BBJ_ALWAYS\n", originalSwitchBB->bbNum); noway_assert(comp->opts.OptimizationDisabled()); - originalSwitchBB->SetKindAndTarget(BBJ_ALWAYS, jumpTab[0]); + originalSwitchBB->SetKindAndTarget(BBJ_ALWAYS, jumpTab[0]->getDestinationBlock()); if (originalSwitchBB->JumpsToNext()) { @@ -875,7 +871,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // Remove extra predecessor links if there was more than one case. for (unsigned i = 1; i < jumpCnt; ++i) { - (void)comp->fgRemoveRefPred(jumpTab[i], originalSwitchBB); + comp->fgRemoveRefPred(jumpTab[i]); } // We have to get rid of the GT_SWITCH node but a child might have side effects so just assign @@ -909,11 +905,11 @@ GenTree* Lowering::LowerSwitch(GenTree* node) unsigned tempLclNum = temp->AsLclVarCommon()->GetLclNum(); var_types tempLclType = temp->TypeGet(); - BasicBlock* defaultBB = jumpTab[jumpCnt - 1]; + BasicBlock* defaultBB = jumpTab[jumpCnt - 1]->getDestinationBlock(); BasicBlock* followingBB = originalSwitchBB->Next(); /* Is the number of cases right for a test and jump switch? */ - const bool fFirstCaseFollows = (followingBB == jumpTab[0]); + const bool fFirstCaseFollows = (followingBB == jumpTab[0]->getDestinationBlock()); const bool fDefaultFollows = (followingBB == defaultBB); unsigned minSwitchTabJumpCnt = 2; // table is better than just 2 cmp/jcc @@ -962,14 +958,14 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // The GT_SWITCH code is still in originalSwitchBB (it will be removed later). - // Turn originalSwitchBB into a BBJ_COND. - originalSwitchBB->SetCond(jumpTab[jumpCnt - 1], afterDefaultCondBlock); - // Fix the pred for the default case: the default block target still has originalSwitchBB // as a predecessor, but the fgSplitBlockAfterStatement() moved all predecessors to point // to afterDefaultCondBlock. - FlowEdge* oldEdge = comp->fgRemoveRefPred(jumpTab[jumpCnt - 1], afterDefaultCondBlock); - comp->fgAddRefPred(jumpTab[jumpCnt - 1], originalSwitchBB, oldEdge); + comp->fgRemoveRefPred(jumpTab[jumpCnt - 1]); + comp->fgAddRefPred(defaultBB, originalSwitchBB, jumpTab[jumpCnt - 1]); + + // Turn originalSwitchBB into a BBJ_COND. + originalSwitchBB->SetCond(defaultBB, afterDefaultCondBlock); bool useJumpSequence = jumpCnt < minSwitchTabJumpCnt; @@ -989,7 +985,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // If we originally had 2 unique successors, check to see whether there is a unique // non-default case, in which case we can eliminate the switch altogether. // Note that the single unique successor case is handled above. - BasicBlock* uniqueSucc = nullptr; + FlowEdge* uniqueSucc = nullptr; if (targetCnt == 2) { uniqueSucc = jumpTab[0]; @@ -1008,17 +1004,17 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // If the unique successor immediately follows this block, we have nothing to do - // it will simply fall-through after we remove the switch, below. // Otherwise, make this a BBJ_ALWAYS. - // Now, fixup the predecessor links to uniqueSucc. In the original jumpTab: + // Now, fixup the predecessor links to uniqueSucc's target block. In the original jumpTab: // jumpTab[i-1] was the default target, which we handled above, // jumpTab[0] is the first target, and we'll leave that predecessor link. - // Remove any additional predecessor links to uniqueSucc. + // Remove any additional predecessor links to uniqueSucc's target block. for (unsigned i = 1; i < jumpCnt - 1; ++i) { assert(jumpTab[i] == uniqueSucc); - (void)comp->fgRemoveRefPred(uniqueSucc, afterDefaultCondBlock); + comp->fgRemoveRefPred(uniqueSucc); } - afterDefaultCondBlock->SetKindAndTarget(BBJ_ALWAYS, uniqueSucc); + afterDefaultCondBlock->SetKindAndTarget(BBJ_ALWAYS, uniqueSucc->getDestinationBlock()); if (afterDefaultCondBlock->JumpsToNext()) { @@ -1054,12 +1050,13 @@ GenTree* Lowering::LowerSwitch(GenTree* node) for (unsigned i = 0; i < jumpCnt - 1; ++i) { assert(currentBlock != nullptr); + BasicBlock* targetBlock = jumpTab[i]->getDestinationBlock(); // Remove the switch from the predecessor list of this case target's block. // We'll add the proper new predecessor edge later. - FlowEdge* oldEdge = comp->fgRemoveRefPred(jumpTab[i], afterDefaultCondBlock); + comp->fgRemoveRefPred(jumpTab[i]); - if (jumpTab[i] == followingBB) + if (targetBlock == followingBB) { // This case label follows the switch; let it fall through. fAnyTargetFollows = true; @@ -1084,7 +1081,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) } // Wire up the predecessor list for the "branch" case. - comp->fgAddRefPred(jumpTab[i], currentBlock, oldEdge); + comp->fgAddRefPred(targetBlock, currentBlock, jumpTab[i]); if (!fAnyTargetFollows && (i == jumpCnt - 2)) { @@ -1093,13 +1090,13 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // case: there is no need to compare against the case index, since it's // guaranteed to be taken (since the default case was handled first, above). - currentBlock->SetKindAndTarget(BBJ_ALWAYS, jumpTab[i]); + currentBlock->SetKindAndTarget(BBJ_ALWAYS, targetBlock); } else { // Otherwise, it's a conditional branch. Set the branch kind, then add the // condition statement. - currentBlock->SetCond(jumpTab[i], currentBlock->Next()); + currentBlock->SetCond(targetBlock, currentBlock->Next()); // Now, build the conditional statement for the current case that is // being evaluated: @@ -1212,7 +1209,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // to emit the jump table itself that can reach up to 256 bytes (for 64 entries). // bool Lowering::TryLowerSwitchToBitTest( - BasicBlock* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue) + FlowEdge* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue) { assert(jumpCount >= 2); assert(targetCount >= 2); @@ -1249,29 +1246,32 @@ bool Lowering::TryLowerSwitchToBitTest( // table and/or swap the blocks if it's beneficial. // - BasicBlock* bbCase0 = nullptr; - BasicBlock* bbCase1 = jumpTable[0]; - size_t bitTable = 1; + FlowEdge* case0Edge = nullptr; + FlowEdge* case1Edge = jumpTable[0]; + size_t bitTable = 1; for (unsigned bitIndex = 1; bitIndex < bitCount; bitIndex++) { - if (jumpTable[bitIndex] == bbCase1) + if (jumpTable[bitIndex] == case1Edge) { bitTable |= (size_t(1) << bitIndex); } - else if (bbCase0 == nullptr) + else if (case0Edge == nullptr) { - bbCase0 = jumpTable[bitIndex]; + case0Edge = jumpTable[bitIndex]; } - else if (jumpTable[bitIndex] != bbCase0) + else if (jumpTable[bitIndex] != case0Edge) { - // If it's neither bbCase0 nor bbCase1 then it means we have 3 targets. There can't be more + // If it's neither case0Edge nor case`Edge then it means we have 3 targets. There can't be more // than 3 because of the check at the start of the function. assert(targetCount == 3); return false; } } + BasicBlock* bbCase0 = case0Edge->getDestinationBlock(); + BasicBlock* bbCase1 = case1Edge->getDestinationBlock(); + // // One of the case blocks has to follow the switch block. This requirement could be avoided // by adding a BBJ_ALWAYS block after the switch block but doing that sometimes negatively @@ -1322,6 +1322,7 @@ bool Lowering::TryLowerSwitchToBitTest( bbSwitch->SetCond(bbCase0, bbCase1); } + // TODO: Use old edges to influence new edge likelihoods? comp->fgAddRefPred(bbCase0, bbSwitch); comp->fgAddRefPred(bbCase1, bbSwitch); diff --git a/src/coreclr/jit/lower.h b/src/coreclr/jit/lower.h index d35738c944dcf2..d44f16ceca527b 100644 --- a/src/coreclr/jit/lower.h +++ b/src/coreclr/jit/lower.h @@ -349,7 +349,7 @@ class Lowering final : public Phase GenTree* LowerSwitch(GenTree* node); bool TryLowerSwitchToBitTest( - BasicBlock* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue); + FlowEdge* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue); void LowerCast(GenTree* node); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 0c44bbad973ea6..abb6362d6acff8 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -13357,29 +13357,29 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) // modify the flow graph // Find the actual jump target - size_t switchVal = (size_t)cond->AsIntCon()->gtIconVal; - unsigned jumpCnt = block->GetSwitchTargets()->bbsCount; - BasicBlock** jumpTab = block->GetSwitchTargets()->bbsDstTab; - bool foundVal = false; + size_t switchVal = (size_t)cond->AsIntCon()->gtIconVal; + unsigned jumpCnt = block->GetSwitchTargets()->bbsCount; + FlowEdge** jumpTab = block->GetSwitchTargets()->bbsDstTab; + bool foundVal = false; for (unsigned val = 0; val < jumpCnt; val++, jumpTab++) { - BasicBlock* curJump = *jumpTab; + FlowEdge* curEdge = *jumpTab; - assert(curJump->countOfInEdges() > 0); + assert(curEdge->getDestinationBlock()->countOfInEdges() > 0); // If val matches switchVal or we are at the last entry and // we never found the switch value then set the new jump dest if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1))) { - block->SetKindAndTarget(BBJ_ALWAYS, curJump); + block->SetKindAndTarget(BBJ_ALWAYS, curEdge->getDestinationBlock()); foundVal = true; } else { - // Remove 'block' from the predecessor list of 'curJump' - fgRemoveRefPred(curJump, block); + // Remove 'curEdge' + fgRemoveRefPred(curEdge->getDestinationBlock(), block); } } diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index e2f1d335e21277..c5d168bedf9cff 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -553,7 +553,7 @@ void Compiler::optCheckPreds() #endif // DEBUG //------------------------------------------------------------------------ -// optRedirectBlock: Initialize the branch successors of a block based on a block map. +// optSetMappedBlockTargets: Initialize the branch successors of a block based on a block map. // // Updates the successors of `newBlk`, a copy of `blk`: // If `blk2` is a branch successor of `blk`, and there is a mapping @@ -570,7 +570,7 @@ void Compiler::optCheckPreds() // Upon returning, `newBlk` should have all of its successors initialized. // `blk` must have its successors set upon entry; these won't be changed. // -void Compiler::optRedirectBlock(BasicBlock* blk, BasicBlock* newBlk, BlockToBlockMap* redirectMap) +void Compiler::optSetMappedBlockTargets(BasicBlock* blk, BasicBlock* newBlk, BlockToBlockMap* redirectMap) { // Caller should not have initialized newBlk's target yet assert(newBlk->KindIs(BBJ_ALWAYS)); @@ -672,23 +672,27 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BasicBlock* newBlk, BlockToBloc { BBswtDesc* currSwtDesc = blk->GetSwitchTargets(); BBswtDesc* newSwtDesc = new (this, CMK_BasicBlock) BBswtDesc(currSwtDesc); - newSwtDesc->bbsDstTab = new (this, CMK_BasicBlock) BasicBlock*[newSwtDesc->bbsCount]; - BasicBlock** jumpPtr = newSwtDesc->bbsDstTab; + newSwtDesc->bbsDstTab = new (this, CMK_FlowEdge) FlowEdge*[newSwtDesc->bbsCount]; - for (BasicBlock* const switchTarget : blk->SwitchTargets()) + for (unsigned i = 0; i < newSwtDesc->bbsCount; i++) { + FlowEdge* const inspiringEdge = currSwtDesc->bbsDstTab[i]; + BasicBlock* const switchTarget = inspiringEdge->getDestinationBlock(); + FlowEdge* newEdge; + // Determine if newBlk should target switchTarget, or be redirected if (redirectMap->Lookup(switchTarget, &newTarget)) { - *jumpPtr = newTarget; + // TODO: Set likelihood using inspiringEdge + newEdge = fgAddRefPred(newTarget, newBlk); } else { - *jumpPtr = switchTarget; + // TODO: Set likelihood using inspiringEdge + newEdge = fgAddRefPred(switchTarget, newBlk); } - fgAddRefPred(*jumpPtr, newBlk); - jumpPtr++; + newSwtDesc->bbsDstTab[i] = newEdge; } newBlk->SetSwitch(newSwtDesc); diff --git a/src/coreclr/jit/switchrecognition.cpp b/src/coreclr/jit/switchrecognition.cpp index fa6abd0f23e8bb..1008b81194f8d5 100644 --- a/src/coreclr/jit/switchrecognition.cpp +++ b/src/coreclr/jit/switchrecognition.cpp @@ -51,8 +51,8 @@ PhaseStatus Compiler::optSwitchRecognition() // // Arguments: // block - The block to check -// blockIfTrue - [out] The block that will be jumped to if X == CNS -// blockIfFalse - [out] The block that will be jumped to if X != CNS +// trueEdge - [out] The successor edge taken if X == CNS +// falseEdge - [out] The successor edge taken if X != CNS // isReversed - [out] True if the condition is reversed (GT_NE) // variableNode - [out] The variable node (X in the example above) // cns - [out] The constant value (CNS in the example above) @@ -61,8 +61,8 @@ PhaseStatus Compiler::optSwitchRecognition() // True if the block represents a constant test, false otherwise // bool IsConstantTestCondBlock(const BasicBlock* block, - BasicBlock** blockIfTrue, - BasicBlock** blockIfFalse, + BasicBlock** trueTarget, + BasicBlock** falseTarget, bool* isReversed, GenTree** variableNode = nullptr, ssize_t* cns = nullptr) @@ -94,9 +94,9 @@ bool IsConstantTestCondBlock(const BasicBlock* block, return false; } - *isReversed = rootNode->gtGetOp1()->OperIs(GT_NE); - *blockIfTrue = *isReversed ? block->GetFalseTarget() : block->GetTrueTarget(); - *blockIfFalse = *isReversed ? block->GetTrueTarget() : block->GetFalseTarget(); + *isReversed = rootNode->gtGetOp1()->OperIs(GT_NE); + *trueTarget = *isReversed ? block->GetFalseTarget() : block->GetTrueTarget(); + *falseTarget = *isReversed ? block->GetTrueTarget() : block->GetFalseTarget(); if (block->FalseTargetIs(block) || block->TrueTargetIs(block)) { @@ -141,14 +141,14 @@ bool Compiler::optSwitchDetectAndConvert(BasicBlock* firstBlock) GenTree* variableNode = nullptr; ssize_t cns = 0; - BasicBlock* blockIfTrue = nullptr; - BasicBlock* blockIfFalse = nullptr; + BasicBlock* trueTarget = nullptr; + BasicBlock* falseTarget = nullptr; // The algorithm is simple - we check that the given block is a constant test block // and then try to accumulate as many constant test blocks as possible. Once we hit // a block that doesn't match the pattern, we start processing the accumulated blocks. bool isReversed = false; - if (IsConstantTestCondBlock(firstBlock, &blockIfTrue, &blockIfFalse, &isReversed, &variableNode, &cns)) + if (IsConstantTestCondBlock(firstBlock, &trueTarget, &falseTarget, &isReversed, &variableNode, &cns)) { if (isReversed) { @@ -170,8 +170,8 @@ bool Compiler::optSwitchDetectAndConvert(BasicBlock* firstBlock) { GenTree* currVariableNode = nullptr; ssize_t currCns = 0; - BasicBlock* currBlockIfTrue = nullptr; - BasicBlock* currBlockIfFalse = nullptr; + BasicBlock* currTrueTarget = nullptr; + BasicBlock* currFalseTarget = nullptr; if (!currBb->hasSingleStmt()) { @@ -181,10 +181,10 @@ bool Compiler::optSwitchDetectAndConvert(BasicBlock* firstBlock) } // Inspect secondary blocks - if (IsConstantTestCondBlock(currBb, &currBlockIfTrue, &currBlockIfFalse, &isReversed, &currVariableNode, + if (IsConstantTestCondBlock(currBb, &currTrueTarget, &currFalseTarget, &isReversed, &currVariableNode, &currCns)) { - if (currBlockIfTrue != blockIfTrue) + if (currTrueTarget != trueTarget) { // This blocks jumps to a different target, stop searching and process what we already have. return optSwitchConvert(firstBlock, testValueIndex, testValues, variableNode); @@ -345,9 +345,9 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* blockToRemove = fgRemoveBlock(blockToRemove, true); } - const auto jumpCount = static_cast(maxValue - minValue + 1); + const unsigned jumpCount = static_cast(maxValue - minValue + 1); assert((jumpCount > 0) && (jumpCount <= SWITCH_MAX_DISTANCE + 1)); - const auto jmpTab = new (this, CMK_BasicBlock) BasicBlock*[jumpCount + 1 /*default case*/]; + FlowEdge** jmpTab = new (this, CMK_FlowEdge) FlowEdge*[jumpCount + 1 /*default case*/]; // Quirk: lastBlock's false target may have diverged from bbNext. If the false target is behind firstBlock, // we may create a cycle in the BasicBlock list by setting firstBlock->bbNext to it. @@ -362,15 +362,17 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* { assert(lastBlock->FalseTargetIs(blockIfTrue)); fgRemoveRefPred(blockIfTrue, firstBlock); - blockIfTrue = fgNewBBafter(BBJ_ALWAYS, firstBlock, true, blockIfTrue); - fgAddRefPred(blockIfTrue->GetTarget(), blockIfTrue); - skipPredRemoval = true; + BasicBlock* targetBlock = blockIfTrue; + blockIfTrue = fgNewBBafter(BBJ_ALWAYS, firstBlock, true, targetBlock); + FlowEdge* const newEdge = fgAddRefPred(targetBlock, blockIfTrue); + skipPredRemoval = true; } else { assert(lastBlock->FalseTargetIs(blockIfFalse)); - blockIfFalse = fgNewBBafter(BBJ_ALWAYS, firstBlock, true, blockIfFalse); - fgAddRefPred(blockIfFalse->GetTarget(), blockIfFalse); + BasicBlock* targetBlock = blockIfFalse; + blockIfFalse = fgNewBBafter(BBJ_ALWAYS, firstBlock, true, targetBlock); + FlowEdge* const newEdge = fgAddRefPred(targetBlock, blockIfFalse); } } @@ -402,14 +404,14 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* { // value exists in the testValues array (via bitVector) - 'true' case. const bool isTrue = (bitVector & static_cast(1ULL << i)) != 0; - jmpTab[i] = isTrue ? blockIfTrue : blockIfFalse; - fgAddRefPred(jmpTab[i], firstBlock); + FlowEdge* const newEdge = fgAddRefPred((isTrue ? blockIfTrue : blockIfFalse), firstBlock); + jmpTab[i] = newEdge; } // Link the 'default' case - jmpTab[jumpCount] = blockIfFalse; - fgAddRefPred(blockIfFalse, firstBlock); + FlowEdge* const defaultEdge = fgAddRefPred(blockIfFalse, firstBlock); + jmpTab[jumpCount] = defaultEdge; return true; }